repo_name
stringlengths
6
130
hexsha
sequence
file_path
sequence
code
sequence
apis
sequence
possible_versions
list
mkirby1995/PyISRU
[ "46338999e70308604692bbd3c20b4f770c27dc6d" ]
[ "periodic_table.py" ]
[ "import pandas as pd\nimport json\n\n\nclass Element:\n\n\n def __init__(self, name, abrivation, atomic_number,\n atomic_mass, period, group):\n\n self.name = name\n self.abrivation = abrivation\n self.atomic_number = atomic_number\n self.atomic_mass = atomic_mass\n self.period = period #row\n self.group = group #column\n self.protons = self.atomic_number\n self.neutrons = self.atomic_mass - self.protons\n\n\n def __repr__(self):\n return f\"{self.abrivation}\\n{self.atomic_number}\\n{self.atomic_mass}\"\n\n\ndf = pd.read_csv('elements.csv', header=None).dropna(axis = 0)\ndf[0] = df[0].str.strip(\"'\")\ndf[1] = df[1].str.strip(\"'\")\n\nperiodic = {}\n\nfor i in range(len(df)):\n element = Element(name = df[0][i],\n abrivation = df[1][i],\n atomic_number = df[2][i],\n atomic_mass = df[3][i],\n period = df[4][i],\n group = df[5][i])\n\n\n periodic[element.abrivation] = {'name': element.name,\n 'atomic_number': element.atomic_number,\n 'atomic_mass': element.atomic_mass,\n 'period':element.period,\n 'group': element.group}\n\n\nwith open('periodic.json', 'w', encoding='utf-8') as f:\n json.dump(periodic, f, ensure_ascii=False, indent=4)\n" ]
[ [ "pandas.read_csv" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
dan3dewey/Pythonista-bagatelles
[ "8c2e1d061d76184490605e2a5d52fb236931cba7" ]
[ "three_body_2d.py" ]
[ "# coding: utf-8\n# three_body_2d.py\n\"\"\" Use Docstrings like this so that help() can give useful output.\n These can be multi-line, that's nice ;-)\n Simple 3-body simulation constrained to 2D \"\"\"\n\n# run it at os command line:\n# osprompt> py three_body_2d.py\n\n# v8 - adjusted to run in pythonista\n# v7 - put previous comments at bottom; some tweaking for PEP8.\n# Include a 3D plot with t as the z axis (based on lines3d_demo.py)\n# (Test using git.)\n# Shared as a gist:https://gist.github.com/f5fa24c52bc6d6087e3dc6f3c62ced09\n\nfrom mpl_toolkits.mplot3d import Axes3D\nimport numpy as np\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\n\n# max time to simulate\ntmax = 6.0\n# time step size\ndt = 0.001\n\n# include a \"softening\" parameter in the Force calculation\nepsilon = 1.0e-4\n\n# Gravitational constant... (here for completeness)\ngrav = 0.8\n\n# variables and initial values for each body's x,y vx,vy and mass:\nb1x = 1.0\nb1y = 0.0\nb1vx = 0.0\nb1vy = 0.5\nb1m = 3.0\nb2x = -0.5\nb2y = 0.86\nb2vx = 0.5\nb2vy = 0.0\nb2m = 4.1\nb3x = -0.5\nb3y = -0.86\nb3vx = -0.5\nb3vy = 0.0\nb3m = 3.9\n\n\n# adjust the v's so that the CM is constant (total p = 0)\npx = b1vx * b1m + b2vx * b2m + b3vx * b3m\ndv = px / (b1m + b2m + b3m)\nb1vx -= dv\nb2vx -= dv\nb3vx -= dv\n# y axis\npy = b1vy * b1m + b2vy * b2m + b3vy * b3m\ndv = py / (b1m + b2m + b3m)\nb1vy -= dv\nb2vy -= dv\nb3vy -= dv\n\n##colors = np.random.rand(3)\ncolors = [0.07167369, 0.6313451, 0.98]\n\nprint()\nprint(\"(\", b1x, b1y, \") (\", b2x, b2y, \") (\", b3x, b3y, \")\")\nprint()\n# print(colors)\n\n# keep track of the locations, starting at\nxs = [b1x, b2x, b3x]\nys = [b1y, b2y, b3y]\n\n# coming into the Leapfrog loop they want \"a0\", so do this:\n# calc distances squared - these are useful\nr12sq = (b2x - b1x)**2 + (b2y - b1y)**2\nr13sq = (b3x - b1x)**2 + (b3y - b1y)**2\nr23sq = (b3x - b2x)**2 + (b3y - b2y)**2\n# calc the forces\nftemp = grav * (b1m * b2m / r12sq) / np.sqrt(r12sq + epsilon)\nf12x = ftemp * (b2x - b1x)\nf12y = ftemp * (b2y - b1y)\nftemp = grav * (b1m * b3m / r13sq) / np.sqrt(r13sq + epsilon)\nf13x = ftemp * (b3x - b1x)\nf13y = ftemp * (b3y - b1y)\nftemp = grav * (b2m * b3m / r23sq) / np.sqrt(r23sq + epsilon)\nf23x = ftemp * (b3x - b2x)\nf23y = ftemp * (b3y - b2y)\n# these forces can be used at step zero..\n#\n# do enough time steps to get to tmax\ntotalsteps = int(tmax / dt)\nfor thisstep in range(totalsteps):\n # create the x_n+1/2 values; they replace the current x's:\n b1x += 0.5 * dt * b1vx\n b1y += 0.5 * dt * b1vy\n b2x += 0.5 * dt * b2vx\n b2y += 0.5 * dt * b2vy\n b3x += 0.5 * dt * b3vx\n b3y += 0.5 * dt * b3vy\n # if it's the first time through add in acceleration:\n if (thisstep == 0):\n b1x += 0.25 * dt * dt * (f12x + f13x) / b1m\n b1y += 0.25 * dt * dt * (f12y + f13y) / b1m\n b2x += 0.25 * dt * dt * (-1.0 * f12x + f23x) / b2m\n b2y += 0.25 * dt * dt * (-1.0 * f12y + f23y) / b2m\n b3x += 0.25 * dt * dt * (-1.0 * f13x - f23x) / b3m\n b3y += 0.25 * dt * dt * (-1.0 * f13y - f23y) / b3m\n # do the force calculations for the x_n+1/2 values:\n # calc distances squared - these are useful\n r12sq = (b2x - b1x)**2 + (b2y - b1y)**2\n r13sq = (b3x - b1x)**2 + (b3y - b1y)**2\n r23sq = (b3x - b2x)**2 + (b3y - b2y)**2\n # calc the forces\n ftemp = grav * (b1m * b2m / r12sq) / np.sqrt(r12sq + epsilon)\n f12x = ftemp * (b2x - b1x)\n f12y = ftemp * (b2y - b1y)\n ftemp = grav * (b1m * b3m / r13sq) / np.sqrt(r13sq + epsilon)\n f13x = ftemp * (b3x - b1x)\n f13y = ftemp * (b3y - b1y)\n ftemp = grav * (b2m * b3m / r23sq) / np.sqrt(r23sq + epsilon)\n f23x = ftemp * (b3x - b2x)\n f23y = ftemp * (b3y - b2y)\n # update the velocities to v_n+1\n b1vx += dt * (f12x + f13x) / b1m\n b1vy += dt * (f12y + f13y) / b1m\n b2vx += dt * (-1.0 * f12x + f23x) / b2m\n b2vy += dt * (-1.0 * f12y + f23y) / b2m\n b3vx += dt * (-1.0 * f13x - f23x) / b3m\n b3vy += dt * (-1.0 * f13y - f23y) / b3m\n # update the positions to x_n+1\n b1x += 0.5 * dt * b1vx\n b1y += 0.5 * dt * b1vy\n b2x += 0.5 * dt * b2vx\n b2y += 0.5 * dt * b2vy\n b3x += 0.5 * dt * b3vx\n b3y += 0.5 * dt * b3vy\n # append them to the list\n xs += [b1x, b2x, b3x]\n ys += [b1y, b2y, b3y]\n\nprint(\"(\", b1x, b1y, \") (\", b2x, b2y, \") (\", b3x, b3y, \")\")\nprint()\n\n# Show paths on 2D plot\nfig2d = plt.figure(1)\nplt.scatter(xs, ys, c=(int(len(xs) / 3)) * colors,\n s=20, alpha=0.5, edgecolors='face')\nplt.show()\n\nif 1 == 1:\n # Show paths in 3D\n mpl.rcParams['legend.fontsize'] = 10\n\n fig = plt.figure(2)\n ax = fig.gca(projection='3d')\n\n # Make a z array - time!\n zs = dt * np.array(range(totalsteps))\n\n ax.plot(xs[0:3 * totalsteps:3], ys[0:3 * totalsteps:3], zs,\n label='Body 1')\n ax.plot(xs[1:3 * totalsteps:3], ys[1:3 * totalsteps:3], zs,\n label='Body 2')\n ax.plot(xs[2:3 * totalsteps:3], ys[2:3 * totalsteps:3], zs,\n label='Body 3')\n #ax.plot(x, y, z, label='parametric curve')\n #ax.plot(x, y, 0.5*z, label='squished curve')\n ax.legend()\n\n plt.show()\n\n\n# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n# Previous versions info:\n# v6 - all bodies moving, adjust parameters for fun... very sensitive.\n# v5 - Implemented: Leapfrog integration (remove the v4 A-B stuff)\n# https://msdn.microsoft.com/en-us/library/dn528554(v=vs.85).aspx\n# Check/compare this to the v3 results: * This is better, e.g., dt=0.05 not as bad. *\n# dt=0.05: ( 0.429118848385 1.71982512641 ) ( 0.393327671913 0.943054785555 ) ( -1.0 -0.86 )\n# dt=0.03: ( 0.449177825241 1.69442795657 ) ( 0.378174762019 0.960296617147 ) ( -0.9995 -0.86 )\n# dt=0.01: ( 0.487766620217 1.69262159742 ) ( 0.350414668134 0.962959806766 ) ( -1.0 -0.86 )\n# dt=0.002: ( 0.487766620217 1.69262159742 ) ( 0.350414668134 0.962959806766 ) ( -1.0 -0.86 )\n# dt=0.0001:( 0.490110100467 1.69129612836 ) ( 0.348699926487 0.963929662175 ) ( -1.0 -0.86 )\n# v4 - added epsilon; tried to improve the interations, e.g. by using:\n# Two-step Adams-Bashforth (on https://en.wikipedia.org/wiki/Linear_multistep_method )\n# y_n+2 = y_n+1 + 3/2 dt f_n+1(t,y) - 1/2 dt f_n(t,y)\n# This didn't work... because v' is not f(t,v) ?\n# v3 - set to two masses only by zeroing f13, f23; adjust v's for a wide orbit.\n# Check numerical accuracy by looking at t=10 point vs dt, for simple \"dv=a*dt, dx=v*dt\" method:\n# dt=0.05: ( 0.279169636664 1.62654526791 ) ( 0.503046607319 1.01130834056 ) ( -1.0 -0.86 )\n# eps.1e-4 ( 0.278340759037 1.62615345622 ) ( 0.503653103143 1.01159503203 ) ( -1.0 -0.86 )\n# dt=0.03: ( 0.361276244657 1.65494013081 ) ( 0.442492991714 0.989190148188 ) ( -0.9995 -0.86 )\n# dt=0.01: ( 0.461599078445 1.68496607947 ) ( 0.369561649918 0.968561405263 ) ( -1.0 -0.86 )\n# dt=0.005: ( 0.477366118664 1.68809842666 ) ( 0.358024791221 0.966269443908 ) ( -1.0 -0.86 )\n# dt=0.002: ( 0.485886985153 1.68993737755 ) ( 0.351790010864 0.964923870083 ) ( -1.0 -0.86 )\n# dt=0.001: ( 0.488567582574 1.69055396488 ) ( 0.349828598116 0.964472708628 ) ( -1.0 -0.86 )\n# dt=0.0001:( 0.490911201825 1.69111267825 ) ( 0.348113754762 0.964063893965 ) ( -1.0 -0.86 )\n# eps.1e-4 ( 0.489854101957 1.69123302781 ) ( 0.34888724247 0.963975833309 ) ( -1.0 -0.86 )\n# v2 - improve organization; use tmax; all 3 masses move.\n# v1 - just starting...\n" ]
[ [ "matplotlib.pyplot.show", "numpy.sqrt", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
TPCD/LifelongReID
[ "cb33f9c29fe398e7546db345fab1c338dda8252f", "cb33f9c29fe398e7546db345fab1c338dda8252f" ]
[ "lreid/models/LwFnet.py", "lreid/methods/drop_grad.py" ]
[ "import torch.nn as nn\nimport torchvision\nimport copy\nimport torch\nimport numpy as np\nfrom .bnneck import BNClassifier, Classifier, Classifier_without_bias\nfrom torch.autograd import Variable\n\ndef weights_init_kaiming(m):\n classname = m.__class__.__name__\n if classname.find('Linear') != -1:\n nn.init.kaiming_normal_(m.weight, a=0, mode='fan_out')\n nn.init.constant_(m.bias, 0.0)\n elif classname.find('Conv') != -1:\n nn.init.kaiming_normal_(m.weight, a=0, mode='fan_in')\n if m.bias is not None:\n nn.init.constant_(m.bias, 0.0)\n elif classname.find('BatchNorm') != -1:\n if m.affine:\n nn.init.constant_(m.weight, 1.0)\n nn.init.constant_(m.bias, 0.0)\n\ndef weights_init_classifier(m):\n classname = m.__class__.__name__\n if classname.find('Linear') != -1:\n nn.init.normal_(m.weight, std=0.001)\n if m.bias:\n nn.init.constant_(m.bias, 0.0)\n\n\nclass GlobalPoolFlat(nn.Module):\n def __init__(self, pool_mode='avg'):\n super(GlobalPoolFlat, self).__init__()\n if pool_mode == 'avg':\n self.pool = nn.AdaptiveAvgPool2d(1)\n else:\n self.pool = nn.AdaptiveMaxPool2d(1)\n\n def forward(self, x):\n x = self.pool(x)\n if len(x.size()) == 4:\n n, c = x.size(0), x.size(1)\n else:\n assert len(x.size()) == 4\n flatted = x.view(n, -1)\n assert flatted.size(1) == c\n return flatted\n\n\n\n\n\nclass LwFNet(nn.Module):\n def __init__(self, class_num_list, pretrained=True):\n super(LwFNet, self).__init__()\n\n self.class_num_list = class_num_list\n # backbone and optimize its architecture\n resnet = torchvision.models.resnet50(pretrained=pretrained)\n resnet.layer4[0].conv2.stride = (1, 1)\n resnet.layer4[0].downsample[0].stride = (1, 1)\n self.backbone = nn.Sequential(\n copy.deepcopy(resnet.conv1),\n copy.deepcopy(resnet.bn1),\n # copy.deepcopy(resnet.relu), # no relu\n copy.deepcopy(resnet.maxpool),\n copy.deepcopy(resnet.layer1),\n copy.deepcopy(resnet.layer2),\n copy.deepcopy(resnet.layer3[0])) # conv4_1\n # cnn backbone\n\n res_conv4 = nn.Sequential(*resnet.layer3[1:])\n res_conv5 = resnet.layer4\n self.feature_dim = resnet.fc.in_features\n self.encoder_feature = nn.Sequential(copy.deepcopy(res_conv4),\n copy.deepcopy(res_conv5),\n GlobalPoolFlat(pool_mode='avg'),\n )\n del resnet\n\n # classifier\n\n self.classifier_dict = nn.ModuleDict()\n for step, num in enumerate(self.class_num_list):\n self.classifier_dict[f'step:{step}'] = BNClassifier(self.feature_dim, num)\n\n def forward(self, x, current_step=0):\n if isinstance(current_step, list):\n feature_maps = self.backbone(x)\n cls_score_list = []\n features = self.encoder_feature(feature_maps)\n for c_s in current_step:\n bned_features, cls_score = self.classifier_dict[f'step:{c_s}'](features)\n cls_score_list.append(cls_score)\n if self.training:\n # cls_score = torch.cat(cls_score_list, dim=1)\n return features, cls_score_list, feature_maps\n else:\n return bned_features, feature_maps\n else:\n feature_maps = self.backbone(x)\n features = self.encoder_feature(feature_maps)\n bned_features, cls_score = self.classifier_dict[f'step:{current_step}'](features)\n if self.training:\n return features, cls_score, feature_maps\n else:\n return bned_features, feature_maps\n\n\n\n def classify_latent_codes(self, latent_codes, current_step):\n if isinstance(current_step, list):\n cls_score_list = []\n for c_s in current_step:\n bned_features, cls_score = self.classifier_dict[f'step:{c_s}'](latent_codes)\n cls_score_list.append(cls_score)\n if self.training:\n # cls_score = torch.cat(cls_score_list, dim=1)\n return None, cls_score_list, None\n else:\n return bned_features, None\n else:\n bned_features, cls_score = self.classifier_dict[f'step:{current_step}'](latent_codes)\n if self.training:\n return None, cls_score, None\n else:\n return bned_features, None\n\n\nclass LwFNet_without_bn(nn.Module):\n def __init__(self, class_num_list, pretrained=True):\n super(LwFNet_without_bn, self).__init__()\n\n self.class_num_list = class_num_list\n # backbone and optimize its architecture\n resnet = torchvision.models.resnet50(pretrained=pretrained)\n resnet.layer4[0].conv2.stride = (1, 1)\n resnet.layer4[0].downsample[0].stride = (1, 1)\n self.backbone = nn.Sequential(\n copy.deepcopy(resnet.conv1),\n copy.deepcopy(resnet.bn1),\n # copy.deepcopy(resnet.relu), # no relu\n copy.deepcopy(resnet.maxpool),\n copy.deepcopy(resnet.layer1),\n copy.deepcopy(resnet.layer2),\n copy.deepcopy(resnet.layer3[0])) # conv4_1\n # cnn backbone\n\n res_conv4 = nn.Sequential(*resnet.layer3[1:])\n res_conv5 = resnet.layer4\n feature_dim = resnet.fc.in_features\n self.encoder_feature = nn.Sequential(copy.deepcopy(res_conv4),\n copy.deepcopy(res_conv5),\n GlobalPoolFlat(pool_mode='avg'),\n )\n del resnet\n\n # classifier\n\n self.classifier_dict = nn.ModuleDict()\n for step, num in enumerate(self.class_num_list):\n self.classifier_dict[f'step:{step}'] = Classifier(feature_dim, num)\n\n def forward(self, x, current_step):\n if isinstance(current_step, list):\n feature_maps = self.backbone(x)\n cls_score_list = []\n features = self.encoder_feature(feature_maps)\n for c_s in current_step:\n bned_features, cls_score = self.classifier_dict[f'step:{c_s}'](features)\n cls_score_list.append(cls_score)\n if self.training:\n # cls_score = torch.cat(cls_score_list, dim=1)\n return features, cls_score_list, feature_maps\n else:\n return bned_features, feature_maps\n else:\n feature_maps = self.backbone(x)\n features = self.encoder_feature(feature_maps)\n bned_features, cls_score = self.classifier_dict[f'step:{current_step}'](features)\n if self.training:\n return features, cls_score, feature_maps\n else:\n return bned_features, feature_maps\n\n def classify_featuremaps(self, featuremaps):\n features = self.encoder_feature(featuremaps)\n bned_features, cls_score = self.classifier(features)\n return cls_score\n\n def classify_latent_codes(self, latent_codes, current_step):\n bned_features, cls_score = self.classifier_dict[f'step:{current_step}'](latent_codes)\n return cls_score\n\n\nclass LwFNet_without_bn_bias(nn.Module):\n def __init__(self, class_num_list, pretrained=True):\n super(LwFNet_without_bn_bias, self).__init__()\n\n self.class_num_list = class_num_list\n # backbone and optimize its architecture\n resnet = torchvision.models.resnet50(pretrained=pretrained)\n resnet.layer4[0].conv2.stride = (1, 1)\n resnet.layer4[0].downsample[0].stride = (1, 1)\n self.backbone = nn.Sequential(\n copy.deepcopy(resnet.conv1),\n copy.deepcopy(resnet.bn1),\n # copy.deepcopy(resnet.relu), # no relu\n copy.deepcopy(resnet.maxpool),\n copy.deepcopy(resnet.layer1),\n copy.deepcopy(resnet.layer2),\n copy.deepcopy(resnet.layer3[0])) # conv4_1\n # cnn backbone\n\n res_conv4 = nn.Sequential(*resnet.layer3[1:])\n res_conv5 = resnet.layer4\n feature_dim = resnet.fc.in_features\n self.encoder_feature = nn.Sequential(copy.deepcopy(res_conv4),\n copy.deepcopy(res_conv5),\n GlobalPoolFlat(pool_mode='avg'),\n )\n del resnet\n\n # classifier\n\n self.classifier_dict = nn.ModuleDict()\n for step, num in enumerate(self.class_num_list):\n self.classifier_dict[f'step:{step}'] = Classifier_without_bias(feature_dim, num)\n\n def forward(self, x, current_step):\n if isinstance(current_step, list):\n feature_maps = self.backbone(x)\n cls_score_list = []\n features = self.encoder_feature(feature_maps)\n for c_s in current_step:\n bned_features, cls_score = self.classifier_dict[f'step:{c_s}'](features)\n cls_score_list.append(cls_score)\n if self.training:\n # cls_score = torch.cat(cls_score_list, dim=1)\n return features, cls_score_list, feature_maps\n else:\n return bned_features, feature_maps\n else:\n feature_maps = self.backbone(x)\n features = self.encoder_feature(feature_maps)\n bned_features, cls_score = self.classifier_dict[f'step:{current_step}'](features)\n if self.training:\n return features, cls_score, feature_maps\n else:\n return bned_features, feature_maps\n\n def classify_featuremaps(self, featuremaps):\n features = self.encoder_feature(featuremaps)\n bned_features, cls_score = self.classifier(features)\n return cls_score\n\n def classify_latent_codes(self, latent_codes, current_step):\n bned_features, cls_score = self.classifier_dict[f'step:{current_step}'](latent_codes)\n return cls_score\n\n", "import numpy as np\nimport torch\n\nclass DropGrad(torch.nn.Module):\n def __init__(self, method='gaussian', rate=0.1, schedule='constant'):\n super(DropGrad, self).__init__()\n self.method = method\n self.rate = rate if self.method != 'gaussian' else np.sqrt(rate/(1 - rate))\n self.schedule = schedule\n\n def update_rate(self, epoch ,stop_epoch):\n if self.schedule == 'constant':\n self.cur_rate = self.rate\n elif self.schedule == 'linear':\n self.cur_rate = self.rate * epoch / (stop_epoch - 1)\n else:\n raise Exception('no such DropGrad schedule')\n\n def forward(self, input):\n if self.method == 'binary':\n output = input * (torch.gt(torch.rand_like(input), self.cur_rate).float() * (1 / (1 - self.cur_rate)))\n elif self.method == 'gaussian':\n output = input * torch.normal(mean=torch.ones_like(input), std=torch.ones_like(input)*self.cur_rate)\n elif self.method == 'none':\n output = input\n else:\n raise Exception('no such DropGrad method')\n return output\n" ]
[ [ "torch.nn.Sequential", "torch.nn.AdaptiveMaxPool2d", "torch.nn.init.constant_", "torch.nn.ModuleDict", "torch.nn.init.normal_", "torch.nn.AdaptiveAvgPool2d", "torch.nn.init.kaiming_normal_" ], [ "torch.ones_like", "numpy.sqrt", "torch.rand_like" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
vlstyxz/Brain-Computer-Interface-with-Neurosky
[ "185d31a6e8d044fb766838947c37eec0af8f84f4" ]
[ "knn_train.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Sep 17 20:43:41 2019\n\n@author: anilosmantur\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn import metrics\nfrom pylab import rcParams\nrcParams['figure.figsize'] = 10, 5\n\nn_samples = 30#91\n\ndataNameList = ['attention','meditation','rawValue','delta','theta','lowAlpha','highAlpha',\n 'lowBeta','highBeta','lowGamma','midGamma','poorSignal']\nfeatureList = ['attention','meditation','rawValue','delta','theta','lowAlpha','highAlpha',\n 'lowBeta','highBeta','lowGamma','midGamma']\n\nlabels = ['focus','relax', 'upWord', 'downWord', \n 'upColor', 'downColor', \n 'CyanUP','greenDOWN', 'yellowRIGHT', 'BlackLEFT']#,'blink']\n\nlabels = ['relax','upColor','CyanUP']\n\nn_label = len(labels)\n#label = labels[2]\n#count = 0\ntrainDataDict = dict()\nfor data in dataNameList:\n trainDataDict[data] = []\ntestDataDict = dict()\nfor data in dataNameList:\n testDataDict[data] = []\n\ndef load_data(dataDict, label, count): \n for data in dataNameList:\n dataDict[data].append(np.load('dataset/{}/{}/{}.npy'.format(label,count,data))[:100])\n\n#n_samples = 10\ntest_n_samples = int(n_samples/2)\ntest_size = n_label * int(n_samples/2)\ntrain_n_samples = round(n_samples/2)\ntrain_size = n_label * round(n_samples/2)\n#nums = np.arange(n_samples)*2\nnums = np.arange(n_samples)\ntrainNums = np.concatenate([nums[:5],nums[10:15],nums[20:25]])#,nums[31:41], nums[51:61],nums[71:81]])\n#trainNums = nums[:5]\nnp.random.shuffle(trainNums)\ntestNums = np.concatenate([nums[5:10],nums[15:20],nums[25:30]])#,nums[41:51], nums[61:71],nums[81:91]])\n#testNums = nums[5:10]\nnp.random.shuffle(testNums)\nfor label in labels:\n for i in trainNums:\n load_data(trainDataDict,label, i)\n\nfor label in labels:\n for i in testNums:\n load_data(testDataDict,label, i)\n\n\nfor data in dataNameList:\n trainDataDict[data] = np.array(trainDataDict[data])\nfor data in dataNameList:\n testDataDict[data] = np.array(testDataDict[data])\n\n#connect features\ntrainData = []\nfor data in featureList:\n trainData.append(trainDataDict[data])\ntrainData = np.array(trainData).transpose(1,0,2)\ntestData = []\nfor data in featureList:\n testData.append(testDataDict[data])\ntestData = np.array(testData).transpose(1,0,2)\n\ntrainData = trainData.astype('float32')\ntestData = testData.astype('float32')\n## normalization needed\nscaler = MinMaxScaler()\nprint(scaler.fit(trainData.reshape(-1, 1100)))\ntrainData = scaler.transform(trainData.reshape(-1, 1100))\ntestData = scaler.transform(testData.reshape(-1, 1100))\n\ntrainLabels = []\nfor i in range(n_label):\n trainLabels.append(np.ones(train_n_samples)*i )#,np.ones(15)*2])\ntrainLabels = np.concatenate(trainLabels)\n\ntestLabels = []\nfor i in range(n_label):\n testLabels.append(np.ones(test_n_samples)*i )#,np.ones(15)*2])\ntestLabels = np.concatenate(testLabels)\n\nfrom sklearn.model_selection import GridSearchCV\n\nnN = 7\nparam_grid = {\"n_neighbors\":np.arange(0,nN)*2 + 1}\n\nprint(trainData.reshape(train_size, -1).shape)\n\nknn = KNeighborsClassifier()\nknn_cv = GridSearchCV(knn,param_grid,cv=5)\nknn_cv.fit(trainData.reshape(train_size, -1), trainLabels)\nprint(knn_cv.best_score_)\nprint(knn_cv.best_params_)\n\npreds = np.array(knn_cv.predict(testData.reshape(test_size, -1)))\nprobs = np.array(knn_cv.predict_proba(testData.reshape(test_size, -1)))\nscores = metrics.accuracy_score(testLabels, preds)\nprint(' N class: ', n_label)\nprint('test %: {:6.2f}%'.format(scores*100))\n\n\"\"\"\ni = 0\n\nknn = KNeighborsClassifier()\nknn_cv = GridSearchCV(knn,param_grid,cv=5)\nknn_cv.fit(trainDataDict[dataNameList[i]], Labels)\nprint(knn_cv.best_score_)\nprint(knn_cv.best_params_)\n\n\"\"\"\nnN = 3\nneigh = KNeighborsClassifier(n_neighbors=nN)\nneigh.fit(trainData.reshape(train_size, -1), trainLabels) \n\npreds = np.array(neigh.predict(testData.reshape(test_size, -1)))\nprobs = np.array(neigh.predict_proba(testData.reshape(test_size, -1)))\nscores = metrics.accuracy_score(testLabels, preds)\nprint('N class: ', n_label,'\\nn neighbour: ', nN)\nprint('test %: {:6.2f}%'.format(scores*100))\n\npreds = np.array(neigh.predict(trainData.reshape(train_size, -1)))\nprobs = np.array(neigh.predict_proba(trainData.reshape(train_size, -1)))\nscores = metrics.accuracy_score(trainLabels, preds)\nprint('N class: ', n_label,'\\nn neighbour: ', nN)\nprint('train %: {:6.2f}%'.format(scores*100))\n\n#import pickle\n#\n#knnPickle = open('models/knn_best.pkl', 'wb')\n#pickle.dump(neigh, knnPickle)\n\nneigh = pickle.load(open('models/knn_best.pkl', 'rb'))\npreds = np.array(neigh.predict(testData.reshape(test_size, -1)))\nprobs = np.array(neigh.predict_proba(testData.reshape(test_size, -1)))\nscores = metrics.accuracy_score(testLabels, preds)\nprint('N class: ', n_label,'\\nn neighbour: ', nN)\nprint('test %: {:6.2f}%'.format(scores*100))\n\npreds = np.array(neigh.predict(trainData.reshape(train_size, -1)))\nprobs = np.array(neigh.predict_proba(trainData.reshape(train_size, -1)))\nscores = metrics.accuracy_score(trainLabels, preds)\nprint('N class: ', n_label,'\\nn neighbour: ', nN)\nprint('train %: {:6.2f}%'.format(scores*100))" ]
[ [ "sklearn.model_selection.GridSearchCV", "numpy.arange", "numpy.random.shuffle", "sklearn.neighbors.KNeighborsClassifier", "numpy.concatenate", "numpy.ones", "numpy.array", "sklearn.preprocessing.MinMaxScaler", "sklearn.metrics.accuracy_score" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
stanfordmlgroup/CXR-RePaiR
[ "5142820c85f0098a233441710a6f8a0cec004644" ]
[ "retrieval_baseline_edit.py" ]
[ "from cgi import test\nimport torch\nfrom torchvision import transforms\nimport torchvision.models as models\nfrom torch.utils import data\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.metrics.pairwise import cosine_similarity\nfrom scipy.spatial.distance import cdist\n\nfrom PIL import Image\nimport pandas as pd\nfrom tqdm import tqdm\nfrom pathlib import Path\nimport numpy as np\nimport h5py\nimport os\nimport time\n\nclass Identity(torch.nn.Module):\n def __init__(self):\n super(Identity, self).__init__()\n \n def forward(self, x):\n return x\n\n\ndef get_valid_indices(train_path, test_path):\n train_df = pd.read_csv(train_path, index_col=False)\n test_study_ids = pd.read_csv(test_path, index_col=False)['study_id']\n train_df['study_id'] = train_df['filename'].astype(str).str[1:-4].astype(int)\n train_study_ids = train_df['study_id']\n contained = train_study_ids.isin(test_study_ids)\n not_present_indices = list(contained[contained == False].index)\n return not_present_indices\n\nclass MIMICEncodingsDataset(data.Dataset):\n def __init__(self, encodings_path):\n encodings_file = h5py.File(encodings_path)\n self.encodings = encodings_file.get('encodings')\n self.reports = encodings_file.get('reports')\n self.indices = get_valid_indices('/deep/group/data/med-data/mimic_cxr_impressions.csv', '/deep/group/data/med-data/mimic-cxr-jpg-split/bootstrap_test/reports.csv')\n\n def __len__(self):\n return np.shape(self.encodings)[0]\n # return len(self.indices)\n\n def __getitem__(self, idx):\n return self.encodings[idx], self.reports[idx]\n # train_index = self.indices[idx]\n # return self.encodings[train_index], self.reports[train_index]\n\n\nclass MIMICDataset(data.Dataset):\n \"\"\"Represents an abstract HDF5 dataset.\n \"\"\"\n def __init__(self, img_path, txt_path, size=None, transform=None):\n super().__init__()\n self.img_dset = h5py.File(img_path, 'r')['cxr_unprocessed']\n self.txt_dset = pd.read_csv(txt_path)['report']\n self.transform = transform\n \n def __len__(self):\n return len(self.txt_dset)\n \n def __getitem__(self, idx):\n if torch.is_tensor(idx):\n idx = idx.tolist()\n \n img = self.img_dset[idx] # np array, (320, 320)\n img = np.expand_dims(img, axis=0)\n img = np.repeat(img, 3, axis=0)\n txt = self.txt_dset[idx] # python str\n\n img = torch.from_numpy(img) # torch, (3, 320, 320)\n if self.transform:\n img = self.transform(img)\n sample = {'img': img, 'txt': txt }\n \n return sample\n\nclass MIMICTestDataset(torch.utils.data.Dataset):\n def __init__(self, img_path, transform=None):\n super().__init__()\n self.img_dset = h5py.File(img_path, 'r')['cxr']\n self.transform = transform\n\n def __len__(self):\n return len(self.img_dset)\n\n def __getitem__(self, idx):\n img = self.img_dset[idx] # np array, (320, 320)\n img = np.expand_dims(img, axis=0)\n img = np.repeat(img, 3, axis=0)\n\n img = torch.from_numpy(img) # torch, (3, 320, 320)\n if self.transform:\n img = self.transform(img)\n return img\n\nclass CheXpertDataset(data.Dataset):\n def __init__(self, img_path, transform=None):\n super().__init__()\n\n imgs_df = pd.read_csv(img_path)\n root_path = \"/deep/group/CheXpert/CodaLab/\"\n self.scale = 320\n self.transform = transform\n self.paths = []\n for _path in imgs_df[\"Path\"]:\n if \"view1\" not in _path: continue # TODO: check how to aggregate studies, consider them independent??\n _pth = _path.replace(\"CheXpert-v1.0\", \"\")\n _pth = Path(root_path+_pth)\n self.paths.append(_pth)\n def __len__(self):\n return len(self.paths)\n \n def __getitem__(self, idx):\n # img = Image.open(self.paths[idx]).resize((self.scale, self.scale)).convert('RGB')\n _np_img = np.asarray(Image.open(self.paths[idx]).resize((self.scale, self.scale), resample=Image.BICUBIC)) # these images all have diff sizes!\n _np_img = _np_img.astype('float32')\n img = np.expand_dims(_np_img, axis=0)\n img = np.repeat(img, 3, axis=0)\n img = torch.from_numpy(img)\n\n if self.transform:\n img = self.transform(img) # goes from H x W x C to C x H x W\n return img\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n# SET VARIABLES\nsave_train_encodings = False\nretrieve_most_similar_train = True\nbatch_size = 256\nchexpert_filepath = \"/deep/group/CheXpert/CodaLab/test_image_paths.csv\"\ncxr_filepath = '/deep/group/data/med-data/cxr.h5'\ntxt_filepath = '/deep/group/data/med-data/mimic_cxr_impressions.csv'\nencodings_type = 'chexpert'\nout_path = '/deep/u/markendo/CXR-RePaiR/results/CheXpert/Retrieval-Baseline/generated_reports.csv'\nif encodings_type == 'chexpert':\n mimic_h5py_path = '/deep/group/report-clip/resnet_mimic_encodings/chexpert.h5'\n # out_path = '/deep/u/markendo/R2Gen/out/retrieval_baseline_mimic_generated_reports.csv'\n state_dict_path = '/deep/u/markendo/aihc-winter19-robustness/chexpert-model/classification_model_checkpoints/resnet18/1z6xfh2n/epoch=1-chexpert_competition_AUROC=0.88_v0.ckpt'\nelif encodings_type == 'moco-cxr':\n mimic_h5py_path = '/deep/group/report-clip/resnet_mimic_encodings/moco-cxr.h5'\n # out_path = '/deep/group/report-clip/aihc-win21-clip/notebooks/eval/out/moco_normalized/moco_normalized_generated_reports.csv'\n state_dict_path = '/deep/group/aihc-bootcamp-spring2020/cxr_fewer_samples/experiments/ntruongv/r8w1n416s_20201202_resnet18-chexpert-0.0001-pretrained_20201202-093038_SLURM1931462/checkpoint_0000.pth.tar'\n\ntime_before = time.time()\n\n# load resnet 18 model\nmodel = models.resnet18(pretrained=False)\nmodel.fc = Identity()\nstate_dict = torch.load(state_dict_path)\nstate_dict = state_dict['state_dict']\nif encodings_type == 'chexpert':\n for key in list(state_dict.keys()):\n new_key = key.replace('model.model.', '')\n if (new_key != \"fc.bias\" and new_key != \"fc.weight\"):\n state_dict[new_key] = state_dict[key]\n del state_dict[key]\nelif encodings_type == 'moco-cxr':\n for key in list(state_dict.keys()):\n if 'encoder_q' in key: # discard encoder_k, only use query not key\n new_key = key.replace('module.encoder_q.', '')\n state_dict[new_key] = state_dict[key]\n del state_dict[key]\n del state_dict['fc.0.weight']\n del state_dict['fc.0.bias']\n del state_dict['fc.2.weight']\n del state_dict['fc.2.bias']\nmodel.load_state_dict(state_dict)\nmodel = torch.nn.DataParallel(model)\nmodel.to(device)\n\nif save_train_encodings:\n mimic_transform = transforms.Compose([\n # means computed from sample in `cxr_stats` notebook\n transforms.Normalize((101.48761, 101.48761, 101.48761), (83.43944, 83.43944, 83.43944)),\n ])\n mimic_dset = MIMICDataset(img_path=cxr_filepath,\n txt_path=txt_filepath, transform=mimic_transform)\n mimic_loader = data.DataLoader(mimic_dset, batch_size=batch_size)\n # get the images and impressions\n\n if not os.path.exists(Path(mimic_h5py_path).parent.absolute()):\n os.makedirs(Path(mimic_h5py_path).parent.absolute())\n\n dset_size = len(mimic_loader.dataset)\n with h5py.File(mimic_h5py_path, 'w') as f:\n with torch.no_grad():\n encodings_dset = f.create_dataset('encodings', shape=(dset_size, 512))\n reports_dset = f.create_dataset('reports', shape=(dset_size,), dtype=h5py.string_dtype())\n for i, pack in enumerate(tqdm(mimic_loader)):\n imgs = pack['img']\n resnet_encodings = model(imgs)\n encodings_arr = resnet_encodings.data.cpu().numpy()\n reports = pack['txt']\n\n start_index = i * batch_size\n if i == len(mimic_loader) - 1:\n encodings_dset[start_index:] = encodings_arr\n reports_dset[start_index:] = reports\n else:\n encodings_dset[start_index:start_index+batch_size] = encodings_arr\n reports_dset[start_index:start_index+batch_size] = reports\n saved_file = h5py.File(mimic_h5py_path)\n print(saved_file.get('encodings')[0])\n print(saved_file.get('reports')[0])\n\n\n# retrieve train examples that are most similar to train set encodings\nif retrieve_most_similar_train:\n # MIMIC-CXR test set\n # CXR_FILEPATH = '/deep/group/data/med-data/mimic_test_cxr.h5'\n # CXR_FILEPATH = '/deep/group/data/med-data/mimic-cxr-jpg-split/bootstrap_test/cxr.h5'\n # mimic_transform = transforms.Compose([\n # # means computed from sample in `cxr_stats` notebook\n # transforms.Normalize((101.48761, 101.48761, 101.48761), (83.43944, 83.43944, 83.43944)),\n # ])\n # # mimic_transform = transforms.Compose([\n # # transforms.Resize((224, 224)),\n # # transforms.ToTensor(),\n # # transforms.Normalize((0.485, 0.456, 0.406),\n # # (0.229, 0.224, 0.225))])\n # mimic_test_dset = MIMICTestDataset(CXR_FILEPATH, transform=mimic_transform)\n # loader = data.DataLoader(mimic_test_dset, shuffle=False, batch_size=batch_size)\n\n\n # CheXpert test set\n chexpert_transform = transforms.Compose([\n # transforms.ToTensor(),\n # transforms.Normalize((.5020, .5020, .5020),(.085585, .085585, .085585)),\n transforms.Normalize((129.4185, 129.4185, 129.4185), (73.3378, 73.3378, 73.3378))])\n torch_chexpert_dset = CheXpertDataset(img_path=chexpert_filepath, transform=chexpert_transform)\n chexpert_loader = data.DataLoader(torch_chexpert_dset, batch_size=batch_size)\n\n # data_mean = next(iter(chexpert_loader))\n # print(data_mean[0].mean(), data_mean[0].std())\n\n train_encodings_dset = MIMICEncodingsDataset(mimic_h5py_path)\n train_encodings_loader = data.DataLoader(train_encodings_dset, batch_size=256)\n\n output_reports = []\n\n with torch.no_grad():\n for i, images in enumerate(chexpert_loader):\n images = images.to(device)\n test_encodings_batch = model(images)\n batch_size = len(test_encodings_batch)\n highest_similarities = np.array([-1.] * batch_size)\n best_reports = [''] * batch_size\n for train_encodings in tqdm(train_encodings_loader):\n train_encodings_batch = train_encodings[0]\n\n # Using torch operations (for gpu)\n train_encodings_batch = train_encodings_batch.to(device)\n test_encodings_batch_norm = test_encodings_batch / test_encodings_batch.norm(dim=1)[:, None]\n train_encodings_batch_norm = train_encodings_batch / train_encodings_batch.norm(dim=1)[:, None]\n sim = torch.mm(test_encodings_batch_norm, train_encodings_batch_norm.transpose(0,1))\n maxes = torch.max(sim, dim=1)\n\n # Using np operations (for cpu)\n # sim = cosine_similarity(test_encodings_batch.cpu(), train_encodings_batch)\n # maxes = np.amax(sim, axis=1)\n\n for minibatch_index in range(batch_size):\n if maxes.values[minibatch_index] > highest_similarities[minibatch_index]:\n highest_similarities[minibatch_index] = maxes.values[minibatch_index]\n best_reports[minibatch_index] = train_encodings[1][maxes.indices[minibatch_index]].decode(\"utf-8\")\n # best_reports[minibatch_index] = train_encodings[1][np.argmax(sim[minibatch_index])].decode(\"utf-8\")\n output_reports.extend(best_reports)\n print(time.time() - time_before)\n _df = pd.DataFrame(output_reports)\n _df.columns = [\"report\"]\n _df.to_csv(out_path, index=False)" ]
[ [ "pandas.read_csv", "numpy.expand_dims", "torch.max", "torch.load", "torch.utils.data.DataLoader", "torch.is_tensor", "pandas.DataFrame", "numpy.repeat", "torch.from_numpy", "torch.no_grad", "numpy.shape", "torch.cuda.is_available", "torch.nn.DataParallel", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [], "tensorflow": [] } ]
Lemon-362/RGCSA-master
[ "4425f86e7e07de19a7256a57eee1642ebffb6199" ]
[ "Utils/averageAccuracy.py" ]
[ "import numpy as np\nfrom operator import truediv\n\n\ndef AA_andEachClassAccuracy(confusion_matrix):\n counter = confusion_matrix.shape[0]\n list_diag = np.diag(confusion_matrix) # 对角线\n list_raw_sum = np.sum(confusion_matrix, axis=1)\n each_acc = np.nan_to_num(truediv(list_diag, list_raw_sum))\n average_acc = np.mean(each_acc)\n return each_acc, average_acc\n" ]
[ [ "numpy.diag", "numpy.mean", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
havocesp/ta
[ "938d6a9854e2f87ebb4ca3d25c74f9bda863d65e" ]
[ "ta/trend.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\n.. module:: trend\n :synopsis: Trend Indicators.\n\n.. moduleauthor:: Dario Lopez Padial (Bukosabino)\n\n\"\"\"\nimport numpy as np\nimport pandas as pd\n\nfrom .utils import *\n\n\ndef macd(close, n_fast=12, n_slow=26, fillna=False):\n \"\"\"Moving Average Convergence Divergence (MACD)\n\n Is a trend-following momentum indicator that shows the relationship between\n two moving averages of prices.\n\n https://en.wikipedia.org/wiki/MACD\n\n Args:\n close(pandas.Series): dataset 'Close' column.\n n_fast(int): n period short-term.\n n_slow(int): n period long-term.\n fillna(bool): if True, fill nan values.\n\n Returns:\n pandas.Series: New feature generated.\n \"\"\"\n emafast = ema(close, n_fast, fillna)\n emaslow = ema(close, n_slow, fillna)\n macd = emafast - emaslow\n if fillna:\n macd = macd.replace([np.inf, -np.inf], np.nan).fillna(0)\n return pd.Series(macd, name='MACD_%d_%d' % (n_fast, n_slow))\n\n\ndef macd_signal(close, n_fast=12, n_slow=26, n_sign=9, fillna=False):\n \"\"\"Moving Average Convergence Divergence (MACD Signal)\n\n Shows EMA of MACD.\n\n https://en.wikipedia.org/wiki/MACD\n\n Args:\n close(pandas.Series): dataset 'Close' column.\n n_fast(int): n period short-term.\n n_slow(int): n period long-term.\n n_sign(int): n period to signal.\n fillna(bool): if True, fill nan values.\n\n Returns:\n pandas.Series: New feature generated.\n \"\"\"\n emafast = ema(close, n_fast, fillna)\n emaslow = ema(close, n_slow, fillna)\n macd = emafast - emaslow\n macd_signal = ema(macd, n_sign, fillna)\n if fillna:\n macd_signal = macd_signal.replace([np.inf, -np.inf], np.nan).fillna(0)\n return pd.Series(macd_signal, name='MACD_sign')\n\n\ndef macd_diff(close, n_fast=12, n_slow=26, n_sign=9, fillna=False):\n \"\"\"Moving Average Convergence Divergence (MACD Diff)\n\n Shows the relationship between MACD and MACD Signal.\n\n https://en.wikipedia.org/wiki/MACD\n\n Args:\n close(pandas.Series): dataset 'Close' column.\n n_fast(int): n period short-term.\n n_slow(int): n period long-term.\n n_sign(int): n period to signal.\n fillna(bool): if True, fill nan values.\n\n Returns:\n pandas.Series: New feature generated.\n \"\"\"\n emafast = ema(close, n_fast, fillna)\n emaslow = ema(close, n_slow, fillna)\n macd = emafast - emaslow\n macdsign = ema(macd, n_sign, fillna)\n macd_diff = macd - macdsign\n if fillna:\n macd_diff = macd_diff.replace([np.inf, -np.inf], np.nan).fillna(0)\n return pd.Series(macd_diff, name='MACD_diff')\n\n\ndef ema_indicator(close, n=12, fillna=False):\n \"\"\"EMA\n\n Exponential Moving Average via Pandas\n\n Args:\n close(pandas.Series): dataset 'Close' column.\n n_fast(int): n period short-term.\n fillna(bool): if True, fill nan values.\n\n Returns:\n pandas.Series: New feature generated.\n \"\"\"\n ema_ = ema(close, n, fillna)\n return pd.Series(ema_, name='ema')\n\n\ndef adx(high, low, close, n=14, fillna=False):\n \"\"\"Average Directional Movement Index (ADX)\n\n The Plus Directional Indicator (+DI) and Minus Directional Indicator (-DI)\n are derived from smoothed averages of these differences, and measure trend\n direction over time. These two indicators are often referred to\n collectively as the Directional Movement Indicator (DMI).\n\n The Average Directional Index (ADX) is in turn derived from the smoothed\n averages of the difference between +DI and -DI, and measures the strength\n of the trend (regardless of direction) over time.\n\n Using these three indicators together, chartists can determine both the\n direction and strength of the trend.\n\n http://stockcharts.com/school/doku.php?id=chart_school:technical_indicators:average_directional_index_adx\n\n Args:\n high(pandas.Series): dataset 'High' column.\n low(pandas.Series): dataset 'Low' column.\n close(pandas.Series): dataset 'Close' column.\n n(int): n period.\n fillna(bool): if True, fill nan values.\n\n Returns:\n pandas.Series: New feature generated.\n \"\"\"\n cs = close.shift(1)\n pdm = high.combine(cs, lambda x1, x2: get_min_max(x1, x2, 'max'))\n pdn = low.combine(cs, lambda x1, x2: get_min_max(x1, x2, 'min'))\n tr = pdm - pdn\n\n trs_initial = np.zeros(n-1)\n trs = np.zeros(len(close) - (n - 1))\n trs[0] = tr.dropna()[0:n].sum()\n tr = tr.reset_index(drop=True)\n for i in range(1, len(trs)-1):\n trs[i] = trs[i-1] - (trs[i-1]/float(n)) + tr[n+i]\n\n up = high - high.shift(1)\n dn = low.shift(1) - low\n pos = abs(((up > dn) & (up > 0)) * up)\n neg = abs(((dn > up) & (dn > 0)) * dn)\n\n dip_mio = np.zeros(len(close) - (n - 1))\n dip_mio[0] = pos.dropna()[0:n].sum()\n\n pos = pos.reset_index(drop=True)\n for i in range(1, len(dip_mio)-1):\n dip_mio[i] = dip_mio[i-1] - (dip_mio[i-1]/float(n)) + pos[n+i]\n\n din_mio = np.zeros(len(close) - (n - 1))\n din_mio[0] = neg.dropna()[0:n].sum()\n\n neg = neg.reset_index(drop=True)\n for i in range(1, len(din_mio)-1):\n din_mio[i] = din_mio[i-1] - (din_mio[i-1]/float(n)) + neg[n+i]\n\n dip = np.zeros(len(trs))\n for i in range(len(trs)):\n dip[i] = 100 * (dip_mio[i]/trs[i])\n\n din = np.zeros(len(trs))\n for i in range(len(trs)):\n din[i] = 100 * (din_mio[i]/trs[i])\n\n dx = 100 * np.abs((dip - din) / (dip + din))\n\n adx = np.zeros(len(trs))\n adx[n] = dx[0:n].mean()\n\n for i in range(n+1, len(adx)):\n adx[i] = ((adx[i-1] * (n - 1)) + dx[i-1]) / float(n)\n\n adx = np.concatenate((trs_initial, adx), axis=0)\n adx = pd.Series(data=adx, index=close.index)\n\n if fillna:\n adx = adx.replace([np.inf, -np.inf], np.nan).fillna(20)\n return pd.Series(adx, name='adx')\n\n\ndef adx_pos(high, low, close, n=14, fillna=False):\n \"\"\"Average Directional Movement Index Positive (ADX)\n\n The Plus Directional Indicator (+DI) and Minus Directional Indicator (-DI)\n are derived from smoothed averages of these differences, and measure trend\n direction over time. These two indicators are often referred to\n collectively as the Directional Movement Indicator (DMI).\n\n The Average Directional Index (ADX) is in turn derived from the smoothed\n averages of the difference between +DI and -DI, and measures the strength\n of the trend (regardless of direction) over time.\n\n Using these three indicators together, chartists can determine both the\n direction and strength of the trend.\n\n http://stockcharts.com/school/doku.php?id=chart_school:technical_indicators:average_directional_index_adx\n\n Args:\n high(pandas.Series): dataset 'High' column.\n low(pandas.Series): dataset 'Low' column.\n close(pandas.Series): dataset 'Close' column.\n n(int): n period.\n fillna(bool): if True, fill nan values.\n\n Returns:\n pandas.Series: New feature generated.\n \"\"\"\n cs = close.shift(1)\n pdm = high.combine(cs, lambda x1, x2: get_min_max(x1, x2, 'max'))\n pdn = low.combine(cs, lambda x1, x2: get_min_max(x1, x2, 'min'))\n tr = pdm - pdn\n\n trs_initial = np.zeros(n-1)\n trs = np.zeros(len(close) - (n - 1))\n trs[0] = tr.dropna()[0:n].sum()\n tr = tr.reset_index(drop=True)\n for i in range(1, len(trs)-1):\n trs[i] = trs[i-1] - (trs[i-1]/float(n)) + tr[n+i]\n\n up = high - high.shift(1)\n dn = low.shift(1) - low\n pos = abs(((up > dn) & (up > 0)) * up)\n neg = abs(((dn > up) & (dn > 0)) * dn)\n\n dip_mio = np.zeros(len(close) - (n - 1))\n dip_mio[0] = pos.dropna()[0:n].sum()\n\n pos = pos.reset_index(drop=True)\n for i in range(1, len(dip_mio)-1):\n dip_mio[i] = dip_mio[i-1] - (dip_mio[i-1]/float(n)) + pos[n+i]\n\n dip = np.zeros(len(close))\n for i in range(1, len(trs)-1):\n dip[i+n] = 100 * (dip_mio[i]/trs[i])\n\n dip = pd.Series(data=dip, index=close.index)\n\n if fillna:\n dip = dip.replace([np.inf, -np.inf], np.nan).fillna(20)\n return pd.Series(dip, name='adx_pos')\n\n\ndef adx_neg(high, low, close, n=14, fillna=False):\n \"\"\"Average Directional Movement Index Negative (ADX)\n\n The Plus Directional Indicator (+DI) and Minus Directional Indicator (-DI)\n are derived from smoothed averages of these differences, and measure trend\n direction over time. These two indicators are often referred to\n collectively as the Directional Movement Indicator (DMI).\n\n The Average Directional Index (ADX) is in turn derived from the smoothed\n averages of the difference between +DI and -DI, and measures the strength\n of the trend (regardless of direction) over time.\n\n Using these three indicators together, chartists can determine both the\n direction and strength of the trend.\n\n http://stockcharts.com/school/doku.php?id=chart_school:technical_indicators:average_directional_index_adx\n\n Args:\n high(pandas.Series): dataset 'High' column.\n low(pandas.Series): dataset 'Low' column.\n close(pandas.Series): dataset 'Close' column.\n n(int): n period.\n fillna(bool): if True, fill nan values.\n\n Returns:\n pandas.Series: New feature generated.\n \"\"\"\n cs = close.shift(1)\n pdm = high.combine(cs, lambda x1, x2: get_min_max(x1, x2, 'max'))\n pdn = low.combine(cs, lambda x1, x2: get_min_max(x1, x2, 'min'))\n tr = pdm - pdn\n\n trs_initial = np.zeros(n-1)\n trs = np.zeros(len(close) - (n - 1))\n trs[0] = tr.dropna()[0:n].sum()\n tr = tr.reset_index(drop=True)\n for i in range(1, len(trs)-1):\n trs[i] = trs[i-1] - (trs[i-1]/float(n)) + tr[n+i]\n\n up = high - high.shift(1)\n dn = low.shift(1) - low\n pos = abs(((up > dn) & (up > 0)) * up)\n neg = abs(((dn > up) & (dn > 0)) * dn)\n\n din_mio = np.zeros(len(close) - (n - 1))\n din_mio[0] = neg.dropna()[0:n].sum()\n\n neg = neg.reset_index(drop=True)\n for i in range(1, len(din_mio)-1):\n din_mio[i] = din_mio[i-1] - (din_mio[i-1]/float(n)) + neg[n+i]\n\n din = np.zeros(len(close))\n for i in range(1, len(trs)-1):\n din[i+n] = 100 * (din_mio[i]/float(trs[i]))\n\n din = pd.Series(data=din, index=close.index)\n\n if fillna:\n din = din.replace([np.inf, -np.inf], np.nan).fillna(20)\n return pd.Series(din, name='adx_neg')\n\n\ndef vortex_indicator_pos(high, low, close, n=14, fillna=False):\n \"\"\"Vortex Indicator (VI)\n\n It consists of two oscillators that capture positive and negative trend\n movement. A bullish signal triggers when the positive trend indicator\n crosses above the negative trend indicator or a key level.\n\n http://stockcharts.com/school/doku.php?id=chart_school:technical_indicators:vortex_indicator\n\n Args:\n high(pandas.Series): dataset 'High' column.\n low(pandas.Series): dataset 'Low' column.\n close(pandas.Series): dataset 'Close' column.\n n(int): n period.\n fillna(bool): if True, fill nan values.\n\n Returns:\n pandas.Series: New feature generated.\n \"\"\"\n tr = (high.combine(close.shift(1, fill_value=close.mean()), max)\n - low.combine(close.shift(1, fill_value=close.mean()), min))\n trn = tr.rolling(n).sum()\n\n vmp = np.abs(high - low.shift(1, fill_value=low.mean()))\n vmm = np.abs(low - high.shift(1, fill_value=high.mean()))\n\n vip = vmp.rolling(n, min_periods=0).sum() / trn\n if fillna:\n vip = vip.replace([np.inf, -np.inf], np.nan).fillna(1)\n return pd.Series(vip, name='vip')\n\n\ndef vortex_indicator_neg(high, low, close, n=14, fillna=False):\n \"\"\"Vortex Indicator (VI)\n\n It consists of two oscillators that capture positive and negative trend\n movement. A bearish signal triggers when the negative trend indicator\n crosses above the positive trend indicator or a key level.\n\n http://stockcharts.com/school/doku.php?id=chart_school:technical_indicators:vortex_indicator\n\n Args:\n high(pandas.Series): dataset 'High' column.\n low(pandas.Series): dataset 'Low' column.\n close(pandas.Series): dataset 'Close' column.\n n(int): n period.\n fillna(bool): if True, fill nan values.\n\n Returns:\n pandas.Series: New feature generated.\n \"\"\"\n tr = high.combine(close.shift(1), max) - low.combine(close.shift(1), min)\n trn = tr.rolling(n).sum()\n\n vmp = np.abs(high - low.shift(1))\n vmm = np.abs(low - high.shift(1))\n\n vin = vmm.rolling(n).sum() / trn\n if fillna:\n vin = vin.replace([np.inf, -np.inf], np.nan).fillna(1)\n return pd.Series(vin, name='vin')\n\n\ndef trix(close, n=15, fillna=False):\n \"\"\"Trix (TRIX)\n\n Shows the percent rate of change of a triple exponentially smoothed moving\n average.\n\n http://stockcharts.com/school/doku.php?id=chart_school:technical_indicators:trix\n\n Args:\n close(pandas.Series): dataset 'Close' column.\n n(int): n period.\n fillna(bool): if True, fill nan values.\n\n Returns:\n pandas.Series: New feature generated.\n \"\"\"\n ema1 = ema(close, n, fillna)\n ema2 = ema(ema1, n, fillna)\n ema3 = ema(ema2, n, fillna)\n trix = (ema3 - ema3.shift(1, fill_value=ema3.mean())) / ema3.shift(1, fill_value=ema3.mean())\n trix *= 100\n if fillna:\n trix = trix.replace([np.inf, -np.inf], np.nan).fillna(0)\n return pd.Series(trix, name='trix_'+str(n))\n\n\ndef mass_index(high, low, n=9, n2=25, fillna=False):\n \"\"\"Mass Index (MI)\n\n It uses the high-low range to identify trend reversals based on range\n expansions. It identifies range bulges that can foreshadow a reversal of\n the current trend.\n\n http://stockcharts.com/school/doku.php?id=chart_school:technical_indicators:mass_index\n\n Args:\n high(pandas.Series): dataset 'High' column.\n low(pandas.Series): dataset 'Low' column.\n n(int): n low period.\n n2(int): n high period.\n fillna(bool): if True, fill nan values.\n\n Returns:\n pandas.Series: New feature generated.\n\n \"\"\"\n amplitude = high - low\n ema1 = ema(amplitude, n, fillna)\n ema2 = ema(ema1, n, fillna)\n mass = ema1 / ema2\n mass = mass.rolling(n2, min_periods=0).sum()\n if fillna:\n mass = mass.replace([np.inf, -np.inf], np.nan).fillna(n2)\n return pd.Series(mass, name='mass_index_'+str(n))\n\n\ndef cci(high, low, close, n=20, c=0.015, fillna=False):\n \"\"\"Commodity Channel Index (CCI)\n\n CCI measures the difference between a security's price change and its\n average price change. High positive readings indicate that prices are well\n above their average, which is a show of strength. Low negative readings\n indicate that prices are well below their average, which is a show of\n weakness.\n\n http://stockcharts.com/school/doku.php?id=chart_school:technical_indicators:commodity_channel_index_cci\n\n Args:\n high(pandas.Series): dataset 'High' column.\n low(pandas.Series): dataset 'Low' column.\n close(pandas.Series): dataset 'Close' column.\n n(int): n period.\n c(int): constant.\n fillna(bool): if True, fill nan values.\n\n Returns:\n pandas.Series: New feature generated.\n\n \"\"\"\n pp = (high + low + close) / 3.0\n cci = (pp - pp.rolling(n, min_periods=0).mean()) / (c * pp.rolling(n, min_periods=0).std())\n if fillna:\n cci = cci.replace([np.inf, -np.inf], np.nan).fillna(0)\n return pd.Series(cci, name='cci')\n\n\ndef dpo(close, n=20, fillna=False):\n \"\"\"Detrended Price Oscillator (DPO)\n\n Is an indicator designed to remove trend from price and make it easier to\n identify cycles.\n\n http://stockcharts.com/school/doku.php?id=chart_school:technical_indicators:detrended_price_osci\n\n Args:\n close(pandas.Series): dataset 'Close' column.\n n(int): n period.\n fillna(bool): if True, fill nan values.\n\n Returns:\n pandas.Series: New feature generated.\n \"\"\"\n dpo = close.shift(int((0.5 * n) + 1), fill_value=close.mean()) - close.rolling(n, min_periods=0).mean()\n if fillna:\n dpo = dpo.replace([np.inf, -np.inf], np.nan).fillna(0)\n return pd.Series(dpo, name='dpo_'+str(n))\n\n\ndef kst(close, r1=10, r2=15, r3=20, r4=30, n1=10, n2=10, n3=10, n4=15, fillna=False):\n \"\"\"KST Oscillator (KST)\n\n It is useful to identify major stock market cycle junctures because its\n formula is weighed to be more greatly influenced by the longer and more\n dominant time spans, in order to better reflect the primary swings of stock\n market cycle.\n\n https://en.wikipedia.org/wiki/KST_oscillator\n\n Args:\n close(pandas.Series): dataset 'Close' column.\n r1(int): r1 period.\n r2(int): r2 period.\n r3(int): r3 period.\n r4(int): r4 period.\n n1(int): n1 smoothed period.\n n2(int): n2 smoothed period.\n n3(int): n3 smoothed period.\n n4(int): n4 smoothed period.\n fillna(bool): if True, fill nan values.\n\n Returns:\n pandas.Series: New feature generated.\n \"\"\"\n rocma1 = ((close - close.shift(r1, fill_value=close.mean()))\n / close.shift(r1, fill_value=close.mean())).rolling(n1, min_periods=0).mean()\n rocma2 = ((close - close.shift(r2, fill_value=close.mean()))\n / close.shift(r2, fill_value=close.mean())).rolling(n2, min_periods=0).mean()\n rocma3 = ((close - close.shift(r3, fill_value=close.mean()))\n / close.shift(r3, fill_value=close.mean())).rolling(n3, min_periods=0).mean()\n rocma4 = ((close - close.shift(r4, fill_value=close.mean()))\n / close.shift(r4, fill_value=close.mean())).rolling(n4, min_periods=0).mean()\n kst = 100 * (rocma1 + 2 * rocma2 + 3 * rocma3 + 4 * rocma4)\n if fillna:\n kst = kst.replace([np.inf, -np.inf], np.nan).fillna(0)\n return pd.Series(kst, name='kst')\n\n\ndef kst_sig(close, r1=10, r2=15, r3=20, r4=30, n1=10, n2=10, n3=10, n4=15, nsig=9, fillna=False):\n \"\"\"KST Oscillator (KST Signal)\n\n It is useful to identify major stock market cycle junctures because its\n formula is weighed to be more greatly influenced by the longer and more\n dominant time spans, in order to better reflect the primary swings of stock\n market cycle.\n\n http://stockcharts.com/school/doku.php?id=chart_school:technical_indicators:know_sure_thing_kst\n\n Args:\n close(pandas.Series): dataset 'Close' column.\n r1(int): r1 period.\n r2(int): r2 period.\n r3(int): r3 period.\n r4(int): r4 period.\n n1(int): n1 smoothed period.\n n2(int): n2 smoothed period.\n n3(int): n3 smoothed period.\n n4(int): n4 smoothed period.\n nsig(int): n period to signal.\n fillna(bool): if True, fill nan values.\n\n Returns:\n pandas.Series: New feature generated.\n \"\"\"\n rocma1 = ((close - close.shift(r1, fill_value=close.mean()))\n / close.shift(r1, fill_value=close.mean())).rolling(n1, min_periods=0).mean()\n rocma2 = ((close - close.shift(r2, fill_value=close.mean()))\n / close.shift(r2, fill_value=close.mean())).rolling(n2, min_periods=0).mean()\n rocma3 = ((close - close.shift(r3, fill_value=close.mean()))\n / close.shift(r3, fill_value=close.mean())).rolling(n3, min_periods=0).mean()\n rocma4 = ((close - close.shift(r4, fill_value=close.mean()))\n / close.shift(r4, fill_value=close.mean())).rolling(n4, min_periods=0).mean()\n kst = 100 * (rocma1 + 2 * rocma2 + 3 * rocma3 + 4 * rocma4)\n kst_sig = kst.rolling(nsig, min_periods=0).mean()\n if fillna:\n kst_sig = kst_sig.replace([np.inf, -np.inf], np.nan).fillna(0)\n return pd.Series(kst_sig, name='kst_sig')\n\n\ndef ichimoku_a(high, low, n1=9, n2=26, visual=False, fillna=False):\n \"\"\"Ichimoku Kinkō Hyō (Ichimoku)\n\n It identifies the trend and look for potential signals within that trend.\n\n http://stockcharts.com/school/doku.php?id=chart_school:technical_indicators:ichimoku_cloud\n\n Args:\n high(pandas.Series): dataset 'High' column.\n low(pandas.Series): dataset 'Low' column.\n n1(int): n1 low period.\n n2(int): n2 medium period.\n fillna(bool): if True, fill nan values.\n\n Returns:\n pandas.Series: New feature generated.\n \"\"\"\n conv = 0.5 * (high.rolling(n1, min_periods=0).max() + low.rolling(n1, min_periods=0).min())\n base = 0.5 * (high.rolling(n2, min_periods=0).max() + low.rolling(n2, min_periods=0).min())\n\n spana = 0.5 * (conv + base)\n\n if visual:\n spana = spana.shift(n2, fill_value=spana.mean())\n\n if fillna:\n spana = spana.replace([np.inf, -np.inf], np.nan).fillna(method='backfill')\n\n return pd.Series(spana, name='ichimoku_a_'+str(n2))\n\n\ndef ichimoku_b(high, low, n2=26, n3=52, visual=False, fillna=False):\n \"\"\"Ichimoku Kinkō Hyō (Ichimoku)\n\n It identifies the trend and look for potential signals within that trend.\n\n http://stockcharts.com/school/doku.php?id=chart_school:technical_indicators:ichimoku_cloud\n\n Args:\n high(pandas.Series): dataset 'High' column.\n low(pandas.Series): dataset 'Low' column.\n n2(int): n2 medium period.\n n3(int): n3 high period.\n fillna(bool): if True, fill nan values.\n\n Returns:\n pandas.Series: New feature generated.\n \"\"\"\n spanb = 0.5 * (high.rolling(n3, min_periods=0).max() + low.rolling(n3, min_periods=0).min())\n\n if visual:\n spanb = spanb.shift(n2, fill_value=spanb.mean())\n\n if fillna:\n spanb = spanb.replace([np.inf, -np.inf], np.nan).fillna(method='backfill')\n\n return pd.Series(spanb, name='ichimoku_b_'+str(n2))\n\n\ndef aroon_up(close, n=25, fillna=False):\n \"\"\"Aroon Indicator (AI)\n\n Identify when trends are likely to change direction (uptrend).\n\n Aroon Up - ((N - Days Since N-day High) / N) x 100\n\n https://www.investopedia.com/terms/a/aroon.asp\n Args:\n close(pandas.Series): dataset 'Close' column.\n n(int): n period.\n fillna(bool): if True, fill nan values.\n\n Returns:\n pandas.Series: New feature generated.\n\n \"\"\"\n aroon_up = close.rolling(n, min_periods=0).apply(lambda x: float(np.argmax(x) + 1) / n * 100, raw=True)\n if fillna:\n aroon_up = aroon_up.replace([np.inf, -np.inf], np.nan).fillna(0)\n return pd.Series(aroon_up, name='aroon_up'+str(n))\n\n\ndef aroon_down(close, n=25, fillna=False):\n \"\"\"Aroon Indicator (AI)\n\n Identify when trends are likely to change direction (downtrend).\n\n Aroon Down - ((N - Days Since N-day Low) / N) x 100\n\n https://www.investopedia.com/terms/a/aroon.asp\n Args:\n close(pandas.Series): dataset 'Close' column.\n n(int): n period.\n fillna(bool): if True, fill nan values.\n\n Returns:\n pandas.Series: New feature generated.\n \"\"\"\n aroon_down = close.rolling(n, min_periods=0).apply(lambda x: float(np.argmin(x) + 1) / n * 100, raw=True)\n if fillna:\n aroon_down = aroon_down.replace([np.inf, -np.inf], np.nan).fillna(0)\n return pd.Series(aroon_down, name='aroon_down'+str(n))\n" ]
[ [ "numpy.abs", "pandas.Series", "numpy.concatenate", "numpy.argmax", "numpy.argmin", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
Stifael/uloganalysis
[ "70c1ae9bf0a39bc96d388cc80b90464f2edd16fa" ]
[ "examples/attitude.py" ]
[ "\"\"\"Create dataframe with messages required to run attitude tests.\n\nStore topics required for attitude tests.\nAdd missing messages to the dataframe which are required for attitude tests.\n\n\"\"\"\nimport pandas as pd\nimport numpy as np\nimport argparse\nimport os\nimport pyulog\nfrom pyulgresample import ulogconv as conv\nfrom pyulgresample import mathpandas as mpd\nfrom pyulgresample import loginfo\nfrom pyulgresample.ulogdataframe import DfUlg, TopicMsgs\n\nimport matplotlib\n\nmatplotlib.use(\"Agg\")\nimport matplotlib.pyplot as plt\nfrom matplotlib.backends.backend_pdf import PdfPages\n\nparser = argparse.ArgumentParser(description=\"Script to process attitude\")\nparser.add_argument(\"filename\", metavar=\"file.ulg\", help=\"ulog file\")\n\n\ndef add_roll_pitch_yaw(df):\n \"\"\"Compute roll, pitch and yaw angle and add them to the dataframe.\n\n Arguments:\n df -- dataframe containing messages from the required topics\n\n \"\"\"\n roll, pitch, yaw = mpd.series_quat2euler(\n df[\"T_vehicle_attitude_0__F_q_0\"],\n df[\"T_vehicle_attitude_0__F_q_1\"],\n df[\"T_vehicle_attitude_0__F_q_2\"],\n df[\"T_vehicle_attitude_0__F_q_3\"],\n )\n df[\"T_vehicle_attitude_0__NF_roll\"] = roll.values\n df[\"T_vehicle_attitude_0__NF_pitch\"] = pitch.values\n df[\"T_vehicle_attitude_0__NF_yaw\"] = yaw.values\n\n\ndef add_euler_error(df):\n \"\"\"Compute orientation error as euler angles and add them to the dataframe.\n\n Arguments:\n df -- dataframe containing messages from the required topics\n\n \"\"\"\n df[\"T_vehicle_attitude_setpoint_0__NF_e_roll\"] = mpd.angle_wrap(\n df[\"T_vehicle_attitude_setpoint_0__F_roll_body\"]\n - df[\"T_vehicle_attitude_0__NF_roll\"]\n )\n df[\"T_vehicle_attitude_setpoint_0__NF_e_pitch\"] = mpd.angle_wrap(\n df[\"T_vehicle_attitude_setpoint_0__F_pitch_body\"]\n - df[\"T_vehicle_attitude_0__NF_pitch\"]\n )\n df[\"T_vehicle_attitude_setpoint_0__NF_e_yaw\"] = mpd.angle_wrap(\n df[\"T_vehicle_attitude_setpoint_0__F_yaw_body\"]\n - df[\"T_vehicle_attitude_0__NF_yaw\"]\n )\n\n\ndef add_vehicle_z_axis(df):\n \"\"\"Compute the body z axis in world coordinate system and add it to the dataframe.\n\n Arguments:\n df -- dataframe containing messages from the required topics\n\n \"\"\"\n x = pd.Series(\n np.zeros(df.shape[0]),\n index=df[\"timestamp\"],\n name=\"T_vehicle_attitude_0__NF_body_z_axis_x\",\n )\n y = pd.Series(\n np.zeros(df.shape[0]),\n index=df[\"timestamp\"],\n name=\"T_vehicle_attitude_0__NF_body_z_axis_y\",\n )\n z = pd.Series(\n np.ones(df.shape[0]),\n index=df[\"timestamp\"],\n name=\"T_vehicle_attitude_0__NF_body_z_axis_z\",\n )\n x, y, z = mpd.series_quatrot(\n x,\n y,\n z,\n df[\"T_vehicle_attitude_0__F_q_0\"],\n df[\"T_vehicle_attitude_0__F_q_1\"],\n df[\"T_vehicle_attitude_0__F_q_2\"],\n df[\"T_vehicle_attitude_0__F_q_3\"],\n )\n\n df[x.name] = x.values\n df[y.name] = y.values\n df[z.name] = z.values\n\n\ndef add_desired_tilt(df):\n \"\"\"Compute desired tilt angle and add it to the dataframe.\n\n Arguments:\n df -- dataframe containing messages from the required topics\n\n \"\"\"\n if \"T_vehicle_attitude_setpoint_0__NF_body_z_axis_sp_x\" not in df:\n add_desired_z_axis(df)\n\n x = pd.Series(np.zeros(df.shape[0]), index=df[\"timestamp\"], name=\"x\")\n y = pd.Series(np.zeros(df.shape[0]), index=df[\"timestamp\"], name=\"y\")\n z = pd.Series(np.ones(df.shape[0]), index=df[\"timestamp\"], name=\"z\")\n\n tilt = mpd.series_dot(\n x,\n y,\n z,\n df[\"T_vehicle_attitude_setpoint_0__NF_body_z_axis_sp_x\"],\n df[\"T_vehicle_attitude_setpoint_0__NF_body_z_axis_sp_y\"],\n df[\"T_vehicle_attitude_setpoint_0__NF_body_z_axis_sp_z\"],\n )\n tilt.where(\n tilt < 1, 1, inplace=True\n ) # ensure that angle 1 is never exceeded\n df[\"T_vehicle_attitude_setpoint_0__NF_tilt_desired\"] = tilt.values\n df[\"T_vehicle_attitude_setpoint_0__NF_tilt_desired\"] = df[\n \"T_vehicle_attitude_setpoint_0__NF_tilt_desired\"\n ].apply(np.arccos)\n\n\ndef add_tilt(df):\n \"\"\"Compute tilt angle and add it to the dataframe.\n\n Arguments:\n df -- dataframe containing messages from the required topics\n\n \"\"\"\n if \"T_vehicle_attitude_0__NF_body_z_axis_x\" not in df:\n add_vehicle_z_axis(df)\n\n x = pd.Series(np.zeros(df.shape[0]), index=df[\"timestamp\"], name=\"x\")\n y = pd.Series(np.zeros(df.shape[0]), index=df[\"timestamp\"], name=\"y\")\n z = pd.Series(np.ones(df.shape[0]), index=df[\"timestamp\"], name=\"z\")\n\n tilt = mpd.series_dot(\n x,\n y,\n z,\n df[\"T_vehicle_attitude_0__NF_body_z_axis_x\"],\n df[\"T_vehicle_attitude_0__NF_body_z_axis_y\"],\n df[\"T_vehicle_attitude_0__NF_body_z_axis_z\"],\n )\n tilt.where(\n tilt < 1, 1, inplace=True\n ) # ensure that angle 1 is never exceeded\n df[\"T_vehicle_attitude_0__NF_tilt\"] = tilt.values\n df[\"T_vehicle_attitude_0__NF_tilt\"] = df[\n \"T_vehicle_attitude_0__NF_tilt\"\n ].apply(np.arccos)\n\n\ndef add_vehicle_inverted(df):\n \"\"\"Check if the vehicle is tilted more than 90 degrees and add that information to the dataframe.\n\n Arguments:\n df -- dataframe containing messages from the required topics\n\n \"\"\"\n if \"T_vehicle_attitude_0__NF_body_z_axis_z\" not in df:\n add_vehicle_z_axis(df)\n\n df[\n \"T_vehicle_attitude_0__NF_tilt_more_90\"\n ] = df.T_vehicle_attitude_0__NF_body_z_axis_z.values\n df[df[[\"T_vehicle_attitude_0__NF_tilt_more_90\"]] >= 0] = 0\n df[df[[\"T_vehicle_attitude_0__NF_tilt_more_90\"]] < 0] = 1\n\n\ndef add_desired_z_axis(df):\n \"\"\"Compute the desired body z axis in world coordinate system and add it to the dataframe.\n\n Arguments:\n df -- dataframe containing messages from the required topics\n\n \"\"\"\n x = pd.Series(\n np.zeros(df.shape[0]),\n index=df[\"timestamp\"],\n name=\"T_vehicle_attitude_setpoint_0__NF_body_z_axis_sp_x\",\n )\n y = pd.Series(\n np.zeros(df.shape[0]),\n index=df[\"timestamp\"],\n name=\"T_vehicle_attitude_setpoint_0__NF_body_z_axis_sp_y\",\n )\n z = pd.Series(\n np.ones(df.shape[0]),\n index=df[\"timestamp\"],\n name=\"T_vehicle_attitude_setpoint_0__NF_body_z_axis_sp_z\",\n )\n\n x, y, z = mpd.series_quatrot(\n x,\n y,\n z,\n df[\"T_vehicle_attitude_setpoint_0__F_q_d_0\"],\n df[\"T_vehicle_attitude_setpoint_0__F_q_d_1\"],\n df[\"T_vehicle_attitude_setpoint_0__F_q_d_2\"],\n df[\"T_vehicle_attitude_setpoint_0__F_q_d_3\"],\n )\n df[x.name] = x.values\n df[y.name] = y.values\n df[z.name] = z.values\n\n\ndef plot_time_series(df, plt):\n \"\"\"Plot a time series.\n\n Arguments:\n df -- dataframe containing messages from the required topics\n plt -- plot\n\n \"\"\"\n # Remove the plot frame lines\n delta = (df[\"timestamp\"].max() - df[\"timestamp\"].min()) / 10\n plt.xticks(\n np.arange(\n df[\"timestamp\"].min(),\n df[\"timestamp\"].max(),\n step=np.around(delta, decimals=1),\n )\n )\n plt.grid()\n\n\ndef main():\n \"\"\"Call methods and create pdf with plots showing relevant data.\"\"\"\n args = parser.parse_args()\n # create dataframe-ulog class for Attitude/Attiutde-setpoint topic\n att = DfUlg.create(\n args.filename, topics=[\"vehicle_attitude\", \"vehicle_attitude_setpoint\"]\n )\n\n with PdfPages(\"attitude.pdf\") as pdf:\n\n # roll pitch and yaw error\n add_roll_pitch_yaw(att.df)\n add_euler_error(att.df)\n\n plt.figure(0, figsize=(20, 13))\n df_tmp = att.df[\n [\n \"timestamp\",\n \"T_vehicle_attitude_setpoint_0__NF_e_roll\",\n \"T_vehicle_attitude_setpoint_0__NF_e_pitch\",\n \"T_vehicle_attitude_setpoint_0__NF_e_yaw\",\n ]\n ].copy()\n df_tmp.plot(x=\"timestamp\", linewidth=0.8)\n plot_time_series(df_tmp, plt)\n plt.title(\"Roll-Pitch-Yaw-Error\")\n plt.ylabel(\"rad\")\n pdf.savefig()\n plt.close(0)\n\n # inverted\n add_vehicle_z_axis(att.df)\n add_vehicle_inverted(att.df)\n plt.figure(1, figsize=(20, 13))\n df_tmp = att.df[\n [\"timestamp\", \"T_vehicle_attitude_0__NF_tilt_more_90\"]\n ].copy()\n df_tmp.plot(x=\"timestamp\", linewidth=0.8)\n plot_time_series(df_tmp, plt)\n plt.title(\"Inverted\")\n plt.ylabel(\"boolean\")\n pdf.savefig()\n plt.close(1)\n\n # tilt and desired tilt\n add_desired_z_axis(att.df)\n add_desired_tilt(att.df)\n add_tilt(att.df)\n\n pos_tilt = loginfo.get_param(att.ulog, \"MPC_TILTMAX_AIR\", 0)\n man_tilt = loginfo.get_param(att.ulog, \"MPC_MAN_TILT_MAX\", 0)\n plt.figure(2, figsize=(20, 13))\n df_tmp = att.df[\n [\n \"timestamp\",\n \"T_vehicle_attitude_0__NF_tilt\",\n \"T_vehicle_attitude_setpoint_0__NF_tilt_desired\",\n ]\n ].copy()\n df_tmp[\"MPC_TILTMAX_AIR\"] = pos_tilt * np.pi / 180\n df_tmp[\"MPC_MAN_TILT_MAX\"] = man_tilt * np.pi / 180\n df_tmp.plot(x=\"timestamp\", linewidth=0.8, style=[\"-\", \"-\", \"--\", \"--\"])\n\n plot_time_series(df_tmp, plt)\n plt.title(\"Tilt / Desired Tilt\")\n plt.ylabel(\"rad\")\n pdf.savefig()\n plt.close(2)\n\n print(\"attitude.pdf was created\")\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "matplotlib.backends.backend_pdf.PdfPages", "matplotlib.pyplot.title", "matplotlib.use", "numpy.around", "numpy.ones", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.grid", "matplotlib.pyplot.close", "numpy.zeros", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
anmatako/PatchmatchNet
[ "82206d8b603ec925b6e4b1990618e0ad769347de", "38b3e1a2b898cfd28208a7dca22a0ee948212c37" ]
[ "models/net.py", "datasets/mvs.py" ]
[ "from typing import Dict, List, Tuple\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\nfrom .module import ConvBnReLU, depth_regression\r\nfrom .patchmatch import PatchMatch\r\n\r\n\r\nclass FeatureNet(nn.Module):\r\n \"\"\"Feature Extraction Network: to extract features of original images from each view\"\"\"\r\n\r\n def __init__(self):\r\n \"\"\"Initialize different layers in the network\"\"\"\r\n\r\n super(FeatureNet, self).__init__()\r\n\r\n self.conv0 = ConvBnReLU(3, 8, 3, 1, 1)\r\n # [B,8,H,W]\r\n self.conv1 = ConvBnReLU(8, 8, 3, 1, 1)\r\n # [B,16,H/2,W/2]\r\n self.conv2 = ConvBnReLU(8, 16, 5, 2, 2)\r\n self.conv3 = ConvBnReLU(16, 16, 3, 1, 1)\r\n self.conv4 = ConvBnReLU(16, 16, 3, 1, 1)\r\n # [B,32,H/4,W/4]\r\n self.conv5 = ConvBnReLU(16, 32, 5, 2, 2)\r\n self.conv6 = ConvBnReLU(32, 32, 3, 1, 1)\r\n self.conv7 = ConvBnReLU(32, 32, 3, 1, 1)\r\n # [B,64,H/8,W/8]\r\n self.conv8 = ConvBnReLU(32, 64, 5, 2, 2)\r\n self.conv9 = ConvBnReLU(64, 64, 3, 1, 1)\r\n self.conv10 = ConvBnReLU(64, 64, 3, 1, 1)\r\n\r\n self.output1 = nn.Conv2d(64, 64, 1, bias=False)\r\n self.inner1 = nn.Conv2d(32, 64, 1, bias=True)\r\n self.inner2 = nn.Conv2d(16, 64, 1, bias=True)\r\n self.output2 = nn.Conv2d(64, 32, 1, bias=False)\r\n self.output3 = nn.Conv2d(64, 16, 1, bias=False)\r\n\r\n def forward(self, x: torch.Tensor) -> Dict[int, torch.Tensor]:\r\n \"\"\"Forward method\r\n\r\n Args:\r\n x: images from a single view, in the shape of [B, C, H, W]. Generally, C=3\r\n\r\n Returns:\r\n output_feature: a python dictionary contains extracted features from stage 1 to stage 3\r\n keys are 1, 2, and 3\r\n \"\"\"\r\n output_feature: Dict[int, torch.Tensor] = {}\r\n\r\n conv1 = self.conv1(self.conv0(x))\r\n conv4 = self.conv4(self.conv3(self.conv2(conv1)))\r\n\r\n conv7 = self.conv7(self.conv6(self.conv5(conv4)))\r\n conv10 = self.conv10(self.conv9(self.conv8(conv7)))\r\n\r\n output_feature[3] = self.output1(conv10)\r\n intra_feat = F.interpolate(conv10, scale_factor=2.0, mode=\"bilinear\", align_corners=False) + self.inner1(conv7)\r\n del conv7\r\n del conv10\r\n\r\n output_feature[2] = self.output2(intra_feat)\r\n intra_feat = F.interpolate(\r\n intra_feat, scale_factor=2.0, mode=\"bilinear\", align_corners=False) + self.inner2(conv4)\r\n del conv4\r\n\r\n output_feature[1] = self.output3(intra_feat)\r\n del intra_feat\r\n\r\n return output_feature\r\n\r\n\r\nclass Refinement(nn.Module):\r\n \"\"\"Depth map refinement network\"\"\"\r\n\r\n def __init__(self):\r\n \"\"\"Initialize\"\"\"\r\n\r\n super(Refinement, self).__init__()\r\n\r\n # img: [B,3,H,W]\r\n self.conv0 = ConvBnReLU(in_channels=3, out_channels=8)\r\n # depth map:[B,1,H/2,W/2]\r\n self.conv1 = ConvBnReLU(in_channels=1, out_channels=8)\r\n self.conv2 = ConvBnReLU(in_channels=8, out_channels=8)\r\n self.deconv = nn.ConvTranspose2d(\r\n in_channels=8, out_channels=8, kernel_size=3, padding=1, output_padding=1, stride=2, bias=False\r\n )\r\n\r\n self.bn = nn.BatchNorm2d(8)\r\n self.conv3 = ConvBnReLU(in_channels=16, out_channels=8)\r\n self.res = nn.Conv2d(in_channels=8, out_channels=1, kernel_size=3, padding=1, bias=False)\r\n\r\n def forward(\r\n self, img: torch.Tensor, depth_0: torch.Tensor, depth_min: torch.Tensor, depth_max: torch.Tensor\r\n ) -> torch.Tensor:\r\n \"\"\"Forward method\r\n\r\n Args:\r\n img: input reference images (B, 3, H, W)\r\n depth_0: current depth map (B, 1, H//2, W//2)\r\n depth_min: pre-defined minimum depth (B, )\r\n depth_max: pre-defined maximum depth (B, )\r\n\r\n Returns:\r\n depth: refined depth map (B, 1, H, W)\r\n \"\"\"\r\n\r\n batch_size = depth_min.size()[0]\r\n # pre-scale the depth map into [0,1]\r\n depth = (depth_0 - depth_min.view(batch_size, 1, 1, 1)) / (depth_max - depth_min).view(batch_size, 1, 1, 1)\r\n\r\n conv0 = self.conv0(img)\r\n deconv = F.relu(self.bn(self.deconv(self.conv2(self.conv1(depth)))), inplace=True)\r\n # depth residual\r\n res = self.res(self.conv3(torch.cat((deconv, conv0), dim=1)))\r\n del conv0\r\n del deconv\r\n\r\n depth = F.interpolate(depth, scale_factor=2.0, mode=\"nearest\") + res\r\n # convert the normalized depth back\r\n return depth * (depth_max - depth_min).view(batch_size, 1, 1, 1) + depth_min.view(batch_size, 1, 1, 1)\r\n\r\n\r\nclass PatchmatchNet(nn.Module):\r\n \"\"\" Implementation of complete structure of PatchmatchNet\"\"\"\r\n\r\n def __init__(\r\n self,\r\n patchmatch_interval_scale: List[float],\r\n propagation_range: List[int],\r\n patchmatch_iteration: List[int],\r\n patchmatch_num_sample: List[int],\r\n propagate_neighbors: List[int],\r\n evaluate_neighbors: List[int],\r\n ) -> None:\r\n \"\"\"Initialize modules in PatchmatchNet\r\n\r\n Args:\r\n patchmatch_interval_scale: depth interval scale in patchmatch module\r\n propagation_range: propagation range\r\n patchmatch_iteration: patchmatch iteration number\r\n patchmatch_num_sample: patchmatch number of samples\r\n propagate_neighbors: number of propagation neighbors\r\n evaluate_neighbors: number of propagation neighbors for evaluation\r\n \"\"\"\r\n super(PatchmatchNet, self).__init__()\r\n\r\n self.stages = 4\r\n self.feature = FeatureNet()\r\n self.patchmatch_num_sample = patchmatch_num_sample\r\n\r\n num_features = [16, 32, 64]\r\n\r\n self.propagate_neighbors = propagate_neighbors\r\n self.evaluate_neighbors = evaluate_neighbors\r\n # number of groups for group-wise correlation\r\n self.G = [4, 8, 8]\r\n\r\n for i in range(self.stages - 1):\r\n patchmatch = PatchMatch(\r\n propagation_out_range=propagation_range[i],\r\n patchmatch_iteration=patchmatch_iteration[i],\r\n patchmatch_num_sample=patchmatch_num_sample[i],\r\n patchmatch_interval_scale=patchmatch_interval_scale[i],\r\n num_feature=num_features[i],\r\n G=self.G[i],\r\n propagate_neighbors=self.propagate_neighbors[i],\r\n evaluate_neighbors=evaluate_neighbors[i],\r\n stage=i + 1,\r\n )\r\n setattr(self, f\"patchmatch_{i+1}\", patchmatch)\r\n\r\n self.upsample_net = Refinement()\r\n\r\n def forward(\r\n self,\r\n images: List[torch.Tensor],\r\n intrinsics: torch.Tensor,\r\n extrinsics: torch.Tensor,\r\n depth_min: torch.Tensor,\r\n depth_max: torch.Tensor,\r\n ) -> Tuple[torch.Tensor, torch.Tensor, Dict[int, List[torch.Tensor]]]:\r\n \"\"\"Forward method for PatchMatchNet\r\n\r\n Args:\r\n images: N images (B, 3, H, W) stored in list\r\n intrinsics: intrinsic 3x3 matrices for all images (B, N, 3, 3)\r\n extrinsics: extrinsic 4x4 matrices for all images (B, N, 4, 4)\r\n depth_min: minimum virtual depth (B, 1)\r\n depth_max: maximum virtual depth (B, 1)\r\n\r\n Returns:\r\n output tuple of PatchMatchNet, containing refined depthmap, depth patchmatch, and photometric confidence.\r\n \"\"\"\r\n assert len(images) == intrinsics.size()[1], \"Different number of images and intrinsic matrices\"\r\n assert len(images) == extrinsics.size()[1], 'Different number of images and extrinsic matrices'\r\n images, intrinsics, orig_height, orig_width = adjust_image_dims(images, intrinsics)\r\n ref_image = images[0]\r\n _, _, ref_height, ref_width = ref_image.size()\r\n\r\n # step 1. Multi-scale feature extraction\r\n features: List[Dict[int, torch.Tensor]] = []\r\n for img in images:\r\n output_feature = self.feature(img)\r\n features.append(output_feature)\r\n del images\r\n ref_feature, src_features = features[0], features[1:]\r\n\r\n depth_min = depth_min.float()\r\n depth_max = depth_max.float()\r\n\r\n # step 2. Learning-based patchmatch\r\n device = intrinsics.device\r\n depth = torch.empty(0, device=device)\r\n depths: List[torch.Tensor] = []\r\n score = torch.empty(0, device=device)\r\n view_weights = torch.empty(0, device=device)\r\n depth_patchmatch: Dict[int, List[torch.Tensor]] = {}\r\n\r\n scale = 0.125\r\n for stage in range(self.stages - 1, 0, -1):\r\n src_features_l = [src_fea[stage] for src_fea in src_features]\r\n\r\n # Create projection matrix for specific stage\r\n intrinsics_l = intrinsics.clone()\r\n intrinsics_l[:, :, :2] *= scale\r\n proj = extrinsics.clone()\r\n proj[:, :, :3, :4] = torch.matmul(intrinsics_l, extrinsics[:, :, :3, :4])\r\n proj_l = torch.unbind(proj, 1)\r\n ref_proj, src_proj = proj_l[0], proj_l[1:]\r\n scale *= 2.0\r\n\r\n # Need conditional since TorchScript only allows \"getattr\" access with string literals\r\n if stage == 3:\r\n depths, score, view_weights = self.patchmatch_3(\r\n ref_feature=ref_feature[stage],\r\n src_features=src_features_l,\r\n ref_proj=ref_proj,\r\n src_projs=src_proj,\r\n depth_min=depth_min,\r\n depth_max=depth_max,\r\n depth=depth,\r\n view_weights=view_weights,\r\n )\r\n elif stage == 2:\r\n depths, score, view_weights = self.patchmatch_2(\r\n ref_feature=ref_feature[stage],\r\n src_features=src_features_l,\r\n ref_proj=ref_proj,\r\n src_projs=src_proj,\r\n depth_min=depth_min,\r\n depth_max=depth_max,\r\n depth=depth,\r\n view_weights=view_weights,\r\n )\r\n elif stage == 1:\r\n depths, score, view_weights = self.patchmatch_1(\r\n ref_feature=ref_feature[stage],\r\n src_features=src_features_l,\r\n ref_proj=ref_proj,\r\n src_projs=src_proj,\r\n depth_min=depth_min,\r\n depth_max=depth_max,\r\n depth=depth,\r\n view_weights=view_weights,\r\n )\r\n\r\n depth_patchmatch[stage] = depths\r\n depth = depths[-1].detach()\r\n\r\n if stage > 1:\r\n # upsampling the depth map and pixel-wise view weight for next stage\r\n depth = F.interpolate(depth, scale_factor=2.0, mode=\"nearest\")\r\n view_weights = F.interpolate(view_weights, scale_factor=2.0, mode=\"nearest\")\r\n\r\n del ref_feature\r\n del src_features\r\n\r\n # step 3. Refinement\r\n depth = self.upsample_net(ref_image, depth, depth_min, depth_max)\r\n if ref_width != orig_width or ref_height != orig_height:\r\n depth = F.interpolate(depth, size=[orig_height, orig_width], mode='bilinear', align_corners=False)\r\n depth_patchmatch[0] = [depth]\r\n\r\n if self.training:\r\n return depth, torch.empty(0, device=device), depth_patchmatch\r\n else:\r\n num_depth = self.patchmatch_num_sample[0]\r\n score_sum4 = 4 * F.avg_pool3d(\r\n F.pad(score.unsqueeze(1), pad=(0, 0, 0, 0, 1, 2)), (4, 1, 1), stride=1, padding=0\r\n ).squeeze(1)\r\n # [B, 1, H, W]\r\n depth_index = depth_regression(\r\n score, depth_values=torch.arange(num_depth, device=score.device, dtype=torch.float)\r\n ).long().clamp(0, num_depth - 1)\r\n photometric_confidence = torch.gather(score_sum4, 1, depth_index)\r\n photometric_confidence = F.interpolate(\r\n photometric_confidence, size=[orig_height, orig_width], mode=\"nearest\").squeeze(1)\r\n\r\n return depth, photometric_confidence, depth_patchmatch\r\n\r\n\r\ndef adjust_image_dims(\r\n images: List[torch.Tensor], intrinsics: torch.Tensor) -> Tuple[List[torch.Tensor], torch.Tensor, int, int]:\r\n # stretch or compress image slightly to ensure width and height are multiples of 8\r\n _, _, ref_height, ref_width = images[0].size()\r\n for i in range(len(images)):\r\n _, _, height, width = images[i].size()\r\n new_height = int(round(height / 8)) * 8\r\n new_width = int(round(width / 8)) * 8\r\n if new_width != width or new_height != height:\r\n intrinsics[:, i, 0] *= new_width / width\r\n intrinsics[:, i, 1] *= new_height / height\r\n images[i] = nn.functional.interpolate(\r\n images[i], size=[new_height, new_width], mode='bilinear', align_corners=False)\r\n\r\n return images, intrinsics, ref_height, ref_width\r\n\r\n\r\ndef patchmatchnet_loss(\r\n depth_patchmatch: Dict[int, List[torch.Tensor]],\r\n depth_gt: List[torch.Tensor],\r\n mask: List[torch.Tensor],\r\n) -> torch.Tensor:\r\n \"\"\"Patchmatch Net loss function\r\n\r\n Args:\r\n depth_patchmatch: depth map predicted by patchmatch net\r\n depth_gt: ground truth depth map\r\n mask: mask for filter valid points\r\n\r\n Returns:\r\n loss: result loss value\r\n \"\"\"\r\n loss = 0\r\n for i in range(0, 4):\r\n gt_depth = depth_gt[i][mask[i]]\r\n for depth in depth_patchmatch[i]:\r\n loss = loss + F.smooth_l1_loss(depth[mask[i]], gt_depth, reduction=\"mean\")\r\n\r\n return loss\r\n", "import numpy as np\nimport os\nimport random\nfrom datasets.data_io import read_cam_file, read_image, read_map, read_pair_file\nfrom torch.utils.data import Dataset\nfrom typing import List, Tuple\n\n\nclass MVSDataset(Dataset):\n def __init__(\n self,\n data_path: str,\n num_views: int = 10,\n max_dim: int = -1,\n scan_list: str = '',\n num_light_idx: int = -1,\n cam_folder: str = \"cams\",\n pair_path: str = \"pair.txt\",\n image_folder: str = \"images\",\n depth_folder: str = \"depth_gt\",\n image_extension: str = \".jpg\",\n robust_train: bool = False\n ) -> None:\n super(MVSDataset, self).__init__()\n\n self.data_path = data_path\n self.num_views = num_views\n self.max_dim = max_dim\n self.robust_train = robust_train\n self.cam_folder = cam_folder\n self.depth_folder = depth_folder\n self.image_folder = image_folder\n self.image_extension = image_extension\n self.metas: List[Tuple[str, str, int, List[int]]] = []\n\n if os.path.isfile(scan_list):\n with open(scan_list) as f:\n scans = [line.rstrip() for line in f.readlines()]\n else:\n scans = ['']\n\n if num_light_idx > 0:\n light_indexes = [str(idx) for idx in range(num_light_idx)]\n else:\n light_indexes = ['']\n\n for scan in scans:\n pair_data = read_pair_file(os.path.join(self.data_path, scan, pair_path))\n for light_idx in light_indexes:\n self.metas += [(scan, light_idx, ref, src) for ref, src in pair_data]\n\n def __len__(self):\n return len(self.metas)\n\n def __getitem__(self, idx):\n scan, light_idx, ref_view, src_views = self.metas[idx]\n # use only the reference view and first num_views source views\n num_src_views = min(len(src_views), self.num_views)\n if self.robust_train:\n index = random.sample(range(len(src_views)), num_src_views)\n view_ids = [ref_view] + [src_views[i] for i in index]\n else:\n view_ids = [ref_view] + src_views[:num_src_views]\n\n images = []\n intrinsics = []\n extrinsics = []\n depth_min: float = -1.0\n depth_max: float = -1.0\n depth_gt = np.empty(0)\n mask = np.empty(0)\n\n for view_index, view_id in enumerate(view_ids):\n img_filename = os.path.join(\n self.data_path, scan, self.image_folder, light_idx, \"{:0>8}{}\".format(view_id, self.image_extension))\n\n image, original_h, original_w = read_image(img_filename, self.max_dim)\n images.append(image.transpose([2, 0, 1]))\n\n cam_filename = os.path.join(self.data_path, scan, self.cam_folder, \"{:0>8}_cam.txt\".format(view_id))\n intrinsic, extrinsic, depth_params = read_cam_file(cam_filename)\n\n intrinsic[0] *= image.shape[1] / original_w\n intrinsic[1] *= image.shape[0] / original_h\n intrinsics.append(intrinsic)\n extrinsics.append(extrinsic)\n\n if view_index == 0: # reference view\n depth_min = depth_params[0]\n depth_max = depth_params[1]\n depth_gt_filename = os.path.join(self.data_path, scan, self.depth_folder, \"{:0>8}.pfm\".format(view_id))\n\n if os.path.isfile(depth_gt_filename):\n # Using `copy()` here to avoid the negative stride resulting from the transpose\n depth_gt = read_map(depth_gt_filename, self.max_dim).transpose([2, 0, 1]).copy()\n # Create mask from GT depth map\n mask = depth_gt >= depth_min\n\n intrinsics = np.stack(intrinsics)\n extrinsics = np.stack(extrinsics)\n\n return {\n \"images\": images, # List[Tensor]: [N][3,Hi,Wi], N is number of images\n \"intrinsics\": intrinsics, # Tensor: [N,3,3]\n \"extrinsics\": extrinsics, # Tensor: [N,4,4]\n \"depth_min\": depth_min, # Tensor: [1]\n \"depth_max\": depth_max, # Tensor: [1]\n \"depth_gt\": depth_gt, # Tensor: [1,H0,W0] if exists\n \"mask\": mask, # Tensor: [1,H0,W0] if exists\n \"filename\": os.path.join(scan, \"{}\", \"{:0>8}\".format(view_ids[0]) + \"{}\")\n }\n" ]
[ [ "torch.empty", "torch.nn.ConvTranspose2d", "torch.cat", "torch.unbind", "torch.nn.Conv2d", "torch.arange", "torch.matmul", "torch.nn.functional.interpolate", "torch.nn.BatchNorm2d", "torch.gather", "torch.nn.functional.smooth_l1_loss" ], [ "numpy.stack", "numpy.empty" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
xygu/recourse
[ "7f41843056218ee5cca1921db80948a4c9848a36" ]
[ "loadData.py" ]
[ "import os\nimport sys\nimport copy\nimport pickle\nimport numpy as np\nimport pandas as pd\nfrom tqdm import tqdm\nfrom pprint import pprint\nfrom sklearn.model_selection import train_test_split\n\nimport utils\nfrom debug import ipsh\n\nsys.path.insert(0, '_data_main')\n\ntry:\n from _data_main.fair_adult_data import *\nexcept:\n print('[ENV WARNING] fair_adult_data not available')\n\ntry:\n from _data_main.fair_compas_data import *\nexcept:\n print('[ENV WARNING] fair_compas_data not available')\n\ntry:\n from _data_main.process_credit_data import *\nexcept:\n print('[ENV WARNING] process_credit_data not available')\n\ntry:\n from _data_main.process_german_data import *\nexcept:\n print('[ENV WARNING] process_german_data not available')\n\ntry:\n from _data_main.process_synthetic_data import *\nexcept:\n print('[ENV WARNING] process_synthetic_data not available')\n\ntry:\n from _data_main.process_mortgage_data import *\nexcept:\n print('[ENV WARNING] process_mortgage_data not available')\n\ntry:\n from _data_main.process_twomoon_data import *\nexcept:\n print('[ENV WARNING] process_twomoon_data not available')\n\ntry:\n from _data_main.process_test_data import *\nexcept:\n print('[ENV WARNING] process_test_data not available')\n\nVALID_ATTRIBUTE_DATA_TYPES = { \\\n 'numeric-int', \\\n 'numeric-real', \\\n 'binary', \\\n 'categorical', \\\n 'sub-categorical', \\\n 'ordinal', \\\n 'sub-ordinal'}\nVALID_ATTRIBUTE_NODE_TYPES = { \\\n 'meta', \\\n 'input', \\\n 'output'}\nVALID_ACTIONABILITY_TYPES = { \\\n 'none', \\\n 'any', \\\n 'same-or-increase', \\\n 'same-or-decrease'}\nVALID_MUTABILITY_TYPES = { \\\n True, \\\n False}\n\nfrom random import seed\nRANDOM_SEED = 54321\nseed(RANDOM_SEED) # set the random seed so that the random permutations can be reproduced again\nnp.random.seed(RANDOM_SEED)\n\n\nclass Dataset(object):\n\n # TODO: getOneHotEquivalent can be a class method, and this object can store\n # both one-hot and non-hot versions!\n\n def __init__(self, data_frame, attributes, is_one_hot, dataset_name):\n\n self.dataset_name = dataset_name\n\n self.is_one_hot = is_one_hot\n\n attributes_long = attributes\n data_frame_long = data_frame\n self.data_frame_long = data_frame_long # i.e., data_frame is indexed by attr_name_long\n self.attributes_long = attributes_long # i.e., attributes is indexed by attr_name_long\n\n attributes_kurz = dict((attributes[key].attr_name_kurz, value) for (key, value) in attributes_long.items())\n data_frame_kurz = copy.deepcopy(data_frame_long)\n data_frame_kurz.columns = self.getAllAttributeNames('kurz')\n self.data_frame_kurz = data_frame_kurz # i.e., data_frame is indexed by attr_name_kurz\n self.attributes_kurz = attributes_kurz # i.e., attributes is indexed by attr_name_kurz\n\n # assert that data_frame and attributes match on variable names (long)\n assert len(np.setdiff1d(\n data_frame.columns.values,\n np.array(self.getAllAttributeNames('long'))\n )) == 0\n\n # assert attribute type matches what is in the data frame\n for attr_name in np.setdiff1d(\n self.getInputAttributeNames('long'),\n self.getRealBasedAttributeNames('long'),\n ):\n unique_values = np.unique(data_frame_long[attr_name].to_numpy())\n # all non-numerical-real values should be integer or {0,1}\n for value in unique_values:\n assert value == np.floor(value)\n if is_one_hot and attributes_long[attr_name].attr_type != 'numeric-int': # binary, sub-categorical, sub-ordinal\n try:\n assert \\\n np.array_equal(unique_values, [0,1]) or \\\n np.array_equal(unique_values, [1,2]) or \\\n np.array_equal(unique_values, [1]) # the first sub-ordinal attribute is always 1\n # race (binary) in compass is encoded as {1,2}\n except:\n ipsh()\n\n # # assert attributes and is_one_hot agree on one-hot-ness (i.e., if is_one_hot,\n # # then at least one attribute should be encoded as one-hot (w/ parent reference))\n # tmp_is_one_hot = False\n # for attr_name in attributes.keys():\n # attr_obj = attributes[attr_name]\n # # this simply checks to make sure that at least one elem is one-hot encoded\n # if attr_obj.parent_name_long != -1 or attr_obj.parent_name_kurz != -1:\n # tmp_is_one_hot = True\n # # TODO: assert only if there is a cat/ord variable!\n # assert is_one_hot == tmp_is_one_hot, \"Dataset object and actual attributes don't agree on one-hot\"\n\n self.assertSiblingsShareAttributes('long')\n self.assertSiblingsShareAttributes('kurz')\n\n def getAttributeNames(self, allowed_node_types, long_or_kurz = 'kurz'):\n names = []\n # We must loop through all attributes and check attr_name\n for attr_name in self.attributes_long.keys():\n attr_obj = self.attributes_long[attr_name]\n if attr_obj.node_type not in allowed_node_types:\n continue\n if long_or_kurz == 'long':\n names.append(attr_obj.attr_name_long)\n elif long_or_kurz == 'kurz':\n names.append(attr_obj.attr_name_kurz)\n else:\n raise Exception(f'{long_or_kurz} not recognized as a valid `long_or_kurz`.')\n return np.array(names)\n\n def getAllAttributeNames(self, long_or_kurz = 'kurz'):\n return self.getAttributeNames({'meta', 'input', 'output'}, long_or_kurz)\n\n def getInputOutputAttributeNames(self, long_or_kurz = 'kurz'):\n return self.getAttributeNames({'input', 'output'}, long_or_kurz)\n\n def getMetaInputAttributeNames(self, long_or_kurz = 'kurz'):\n return self.getAttributeNames({'meta', 'input'}, long_or_kurz)\n\n def getMetaAttributeNames(self, long_or_kurz = 'kurz'):\n return self.getAttributeNames({'meta'}, long_or_kurz)\n\n def getInputAttributeNames(self, long_or_kurz = 'kurz'):\n return self.getAttributeNames({'input'}, long_or_kurz)\n\n def getOutputAttributeNames(self, long_or_kurz = 'kurz'):\n return self.getAttributeNames({'output'}, long_or_kurz)\n\n def getBinaryAttributeNames(self, long_or_kurz = 'kurz'):\n names = []\n # We must loop through all attributes and check binary\n for attr_name_long in self.getInputAttributeNames('long'):\n attr_obj = self.attributes_long[attr_name_long]\n if attr_obj.node_type == 'input' and attr_obj.attr_type == 'binary':\n if long_or_kurz == 'long':\n names.append(attr_obj.attr_name_long)\n elif long_or_kurz == 'kurz':\n names.append(attr_obj.attr_name_kurz)\n else:\n raise Exception(f'{long_or_kurz} not recognized as a valid `long_or_kurz`.')\n return np.array(names)\n\n def getActionableAttributeNames(self, long_or_kurz = 'kurz'):\n names = []\n # We must loop through all attributes and check actionability\n for attr_name_long in self.getInputAttributeNames('long'):\n attr_obj = self.attributes_long[attr_name_long]\n if attr_obj.node_type == 'input' and attr_obj.actionability != 'none':\n if long_or_kurz == 'long':\n names.append(attr_obj.attr_name_long)\n elif long_or_kurz == 'kurz':\n names.append(attr_obj.attr_name_kurz)\n else:\n raise Exception(f'{long_or_kurz} not recognized as a valid `long_or_kurz`.')\n return np.array(names)\n\n def getNonActionableAttributeNames(self, long_or_kurz = 'kurz'):\n a = self.getInputAttributeNames(long_or_kurz)\n b = self.getActionableAttributeNames(long_or_kurz)\n return np.setdiff1d(a,b)\n\n def getMutableAttributeNames(self, long_or_kurz = 'kurz'):\n names = []\n # We must loop through all attributes and check mutability\n for attr_name_long in self.getInputAttributeNames('long'):\n attr_obj = self.attributes_long[attr_name_long]\n if attr_obj.node_type == 'input' and attr_obj.mutability != False:\n if long_or_kurz == 'long':\n names.append(attr_obj.attr_name_long)\n elif long_or_kurz == 'kurz':\n names.append(attr_obj.attr_name_kurz)\n else:\n raise Exception(f'{long_or_kurz} not recognized as a valid `long_or_kurz`.')\n return np.array(names)\n\n def getNonMutableAttributeNames(self, long_or_kurz = 'kurz'):\n a = self.getInputAttributeNames(long_or_kurz)\n b = self.getMutableAttributeNames(long_or_kurz)\n return np.setdiff1d(a,b)\n\n def getIntegerBasedAttributeNames(self, long_or_kurz = 'kurz'):\n names = []\n # We must loop through all attributes and check attr_type\n for attr_name_long in self.getInputAttributeNames('long'):\n attr_obj = self.attributes_long[attr_name_long]\n if attr_obj.attr_type == 'numeric-int':\n if long_or_kurz == 'long':\n names.append(attr_obj.attr_name_long)\n elif long_or_kurz == 'kurz':\n names.append(attr_obj.attr_name_kurz)\n else:\n raise Exception(f'{long_or_kurz} not recognized as a valid `long_or_kurz`.')\n return np.array(names)\n\n def getRealBasedAttributeNames(self, long_or_kurz = 'kurz'):\n names = []\n # We must loop through all attributes and check attr_type\n for attr_name_long in self.getInputAttributeNames('long'):\n attr_obj = self.attributes_long[attr_name_long]\n if attr_obj.attr_type == 'numeric-real':\n if long_or_kurz == 'long':\n names.append(attr_obj.attr_name_long)\n elif long_or_kurz == 'kurz':\n names.append(attr_obj.attr_name_kurz)\n else:\n raise Exception(f'{long_or_kurz} not recognized as a valid `long_or_kurz`.')\n return np.array(names)\n\n def assertSiblingsShareAttributes(self, long_or_kurz = 'kurz'):\n # assert elems of dictOfSiblings share attr_type, node_type, parent, actionability, and mutability\n dict_of_siblings = self.getDictOfSiblings(long_or_kurz)\n for parent_name in dict_of_siblings['cat'].keys():\n siblings = dict_of_siblings['cat'][parent_name]\n assert len(siblings) > 1\n for sibling in siblings:\n if long_or_kurz == 'long':\n self.attributes_long[sibling].attr_type = self.attributes_long[siblings[0]].attr_type\n self.attributes_long[sibling].node_type = self.attributes_long[siblings[0]].node_type\n self.attributes_long[sibling].actionability = self.attributes_long[siblings[0]].actionability\n self.attributes_long[sibling].mutability = self.attributes_long[siblings[0]].mutability\n self.attributes_long[sibling].parent_name_long = self.attributes_long[siblings[0]].parent_name_long\n self.attributes_long[sibling].parent_name_kurz = self.attributes_long[siblings[0]].parent_name_kurz\n elif long_or_kurz == 'kurz':\n self.attributes_kurz[sibling].attr_type = self.attributes_kurz[siblings[0]].attr_type\n self.attributes_kurz[sibling].node_type = self.attributes_kurz[siblings[0]].node_type\n self.attributes_kurz[sibling].actionability = self.attributes_kurz[siblings[0]].actionability\n self.attributes_kurz[sibling].mutability = self.attributes_kurz[siblings[0]].mutability\n self.attributes_kurz[sibling].parent_name_long = self.attributes_kurz[siblings[0]].parent_name_long\n self.attributes_kurz[sibling].parent_name_kurz = self.attributes_kurz[siblings[0]].parent_name_kurz\n else:\n raise Exception(f'{long_or_kurz} not recognized as a valid `long_or_kurz`.')\n\n def getSiblingsFor(self, attr_name_long_or_kurz):\n # If attr_name_long is given, we will return siblings_long (the same length)\n # but not siblings_kurz. Same for the opposite direction.\n assert \\\n 'cat' in attr_name_long_or_kurz or 'ord' in attr_name_long_or_kurz, \\\n 'attr_name must include either `cat` or `ord`.'\n if attr_name_long_or_kurz in self.getInputOutputAttributeNames('long'):\n attr_name_long = attr_name_long_or_kurz\n dict_of_siblings_long = self.getDictOfSiblings('long')\n for parent_name_long in dict_of_siblings_long['cat']:\n siblings_long = dict_of_siblings_long['cat'][parent_name_long]\n if attr_name_long_or_kurz in siblings_long:\n return siblings_long\n for parent_name_long in dict_of_siblings_long['ord']:\n siblings_long = dict_of_siblings_long['ord'][parent_name_long]\n if attr_name_long_or_kurz in siblings_long:\n return siblings_long\n elif attr_name_long_or_kurz in self.getInputOutputAttributeNames('kurz'):\n attr_name_kurz = attr_name_long_or_kurz\n dict_of_siblings_kurz = self.getDictOfSiblings('kurz')\n for parent_name_kurz in dict_of_siblings_kurz['cat']:\n siblings_kurz = dict_of_siblings_kurz['cat'][parent_name_kurz]\n if attr_name_long_or_kurz in siblings_kurz:\n return siblings_kurz\n for parent_name_kurz in dict_of_siblings_kurz['ord']:\n siblings_kurz = dict_of_siblings_kurz['ord'][parent_name_kurz]\n if attr_name_long_or_kurz in siblings_kurz:\n return siblings_kurz\n else:\n raise Exception(f'{attr_name_long_or_kurz} not recognized as a valid `attr_name_long_or_kurz`.')\n\n def getDictOfSiblings(self, long_or_kurz = 'kurz'):\n if long_or_kurz == 'long':\n\n dict_of_siblings_long = {}\n dict_of_siblings_long['cat'] = {}\n dict_of_siblings_long['ord'] = {}\n\n for attr_name_long in self.getInputAttributeNames('long'):\n attr_obj = self.attributes_long[attr_name_long]\n if attr_obj.attr_type == 'sub-categorical':\n if attr_obj.parent_name_long not in dict_of_siblings_long['cat'].keys():\n dict_of_siblings_long['cat'][attr_obj.parent_name_long] = [] # initiate key-value pair\n dict_of_siblings_long['cat'][attr_obj.parent_name_long].append(attr_obj.attr_name_long)\n elif attr_obj.attr_type == 'sub-ordinal':\n if attr_obj.parent_name_long not in dict_of_siblings_long['ord'].keys():\n dict_of_siblings_long['ord'][attr_obj.parent_name_long] = [] # initiate key-value pair\n dict_of_siblings_long['ord'][attr_obj.parent_name_long].append(attr_obj.attr_name_long)\n\n # sort sub-arrays\n for key in dict_of_siblings_long['cat'].keys():\n dict_of_siblings_long['cat'][key] = sorted(dict_of_siblings_long['cat'][key], key = lambda x : int(x.split('_')[-1]))\n\n for key in dict_of_siblings_long['ord'].keys():\n dict_of_siblings_long['ord'][key] = sorted(dict_of_siblings_long['ord'][key], key = lambda x : int(x.split('_')[-1]))\n\n return dict_of_siblings_long\n\n elif long_or_kurz == 'kurz':\n\n dict_of_siblings_kurz = {}\n dict_of_siblings_kurz['cat'] = {}\n dict_of_siblings_kurz['ord'] = {}\n\n for attr_name_kurz in self.getInputAttributeNames('kurz'):\n attr_obj = self.attributes_kurz[attr_name_kurz]\n if attr_obj.attr_type == 'sub-categorical':\n if attr_obj.parent_name_kurz not in dict_of_siblings_kurz['cat'].keys():\n dict_of_siblings_kurz['cat'][attr_obj.parent_name_kurz] = [] # initiate key-value pair\n dict_of_siblings_kurz['cat'][attr_obj.parent_name_kurz].append(attr_obj.attr_name_kurz)\n elif attr_obj.attr_type == 'sub-ordinal':\n if attr_obj.parent_name_kurz not in dict_of_siblings_kurz['ord'].keys():\n dict_of_siblings_kurz['ord'][attr_obj.parent_name_kurz] = [] # initiate key-value pair\n dict_of_siblings_kurz['ord'][attr_obj.parent_name_kurz].append(attr_obj.attr_name_kurz)\n\n # sort sub-arrays\n for key in dict_of_siblings_kurz['cat'].keys():\n dict_of_siblings_kurz['cat'][key] = sorted(dict_of_siblings_kurz['cat'][key], key = lambda x : int(x.split('_')[-1]))\n\n for key in dict_of_siblings_kurz['ord'].keys():\n dict_of_siblings_kurz['ord'][key] = sorted(dict_of_siblings_kurz['ord'][key], key = lambda x : int(x.split('_')[-1]))\n\n return dict_of_siblings_kurz\n\n else:\n\n raise Exception(f'{long_or_kurz} not recognized as a valid `long_or_kurz`.')\n\n def getOneHotAttributesNames(self, long_or_kurz = 'kurz'):\n tmp = self.getDictOfSiblings(long_or_kurz)\n names = []\n for key1 in tmp.keys():\n for key2 in tmp[key1].keys():\n names.extend(tmp[key1][key2])\n return np.array(names)\n\n def getNonHotAttributesNames(self, long_or_kurz = 'kurz'):\n a = self.getInputAttributeNames(long_or_kurz)\n b = self.getOneHotAttributesNames(long_or_kurz)\n return np.setdiff1d(a,b)\n\n def getVariableRanges(self):\n return dict(zip(\n self.getInputAttributeNames('kurz'),\n [\n self.attributes_kurz[attr_name_kurz].upper_bound -\n self.attributes_kurz[attr_name_kurz].lower_bound\n for attr_name_kurz in self.getInputAttributeNames('kurz')\n ],\n ))\n\n def printDataset(self, long_or_kurz = 'kurz'):\n if long_or_kurz == 'long':\n for attr_name_long in self.attributes_long:\n print(self.attributes_long[attr_name_long].__dict__)\n elif long_or_kurz == 'kurz':\n for attr_name_kurz in self.attributes_kurz:\n print(self.attributes_kurz[attr_name_kurz].__dict__)\n else:\n raise Exception(f'{long_or_kurz} not recognized as a valid `long_or_kurz`.')\n\n # (2020.04.15) perhaps we need a memoize here... but I tried calling this function\n # multiple times in a row from another file and it always returned the same slice\n # of data... weird.\n def getTrainTestSplit(self, preprocessing = None, with_meta = False, balanced = True):\n\n # When working only with normalized data in [0, 1], data ranges must change to [0, 1] as well\n # otherwise, in computing normalized distance we will normalize with intial ranges again!\n # pseudonym (2020.05.17) does this work with cat/ord and sub-cat/sub-ord data???\n def setBoundsToZeroOne():\n for attr_name_kurz in self.getNonHotAttributesNames('kurz'):\n attr_obj = self.attributes_kurz[attr_name_kurz]\n attr_obj.lower_bound = 0.0\n attr_obj.upper_bound = 1.0\n\n attr_obj = self.attributes_long[attr_obj.attr_name_long]\n attr_obj.lower_bound = 0.0\n attr_obj.upper_bound = 1.0\n\n # Normalize data: bring everything to [0, 1] - implemented for when feeding the model to DiCE\n def normalizeData(X_train, X_test):\n for attr_name_kurz in self.getNonHotAttributesNames('kurz'):\n attr_obj = self.attributes_kurz[attr_name_kurz]\n lower_bound = attr_obj.lower_bound\n upper_bound =attr_obj.upper_bound\n X_train[attr_name_kurz] = (X_train[attr_name_kurz] - lower_bound) / (upper_bound - lower_bound)\n X_test[attr_name_kurz] = (X_test[attr_name_kurz] - lower_bound) / (upper_bound - lower_bound)\n\n setBoundsToZeroOne()\n\n return X_train, X_test\n\n # TODO: This should be used with caution... it messes things up in MACE as ranges\n # will differ between factual and counterfactual domains\n def standardizeData(X_train, X_test):\n x_mean = X_train.mean()\n x_std = X_train.std()\n for index in x_std.index:\n if '_ord_' in index or '_cat_' in index:\n x_mean[index] = 0\n x_std[index] = 1\n X_train = (X_train - x_mean) / x_std\n X_test = (X_test - x_mean) / x_std\n return X_train, X_test\n\n def getBalancedDataFrame(data_frame, output_col):\n # assert only two classes in label (maybe relax later??)\n unique_labels = np.unique(data_frame[output_col])\n assert \\\n np.array_equal(\n unique_labels,\n np.array([0, 1]) # only allowing {0, 1} labels,\n ) or \\\n np.array_equal(\n unique_labels,\n np.array([-1, 1]) # only allowing {-1, 1} labels,\n ), \\\n f'expected unique labels to be [0, 1], but got {unique_labels}'\n\n # get balanced dataframe (take minimum of the count, then round down to nearest 250)\n unique_values_and_count = data_frame[output_col].value_counts()\n number_of_subsamples_in_each_class = unique_values_and_count.min() // 250 * 250\n data_frame = pd.concat([\n data_frame[data_frame.loc[:,output_col] == unique_labels[0]].sample(number_of_subsamples_in_each_class, random_state = RANDOM_SEED),\n data_frame[data_frame.loc[:,output_col] == unique_labels[1]].sample(number_of_subsamples_in_each_class, random_state = RANDOM_SEED),\n ]).sample(frac = 1, random_state = RANDOM_SEED)\n # data_frame = pd.concat([\n # data_frame[data_frame.loc[:,output_col] == 0],\n # data_frame[data_frame.loc[:,output_col] == 1],\n # ]).sample(frac = 1, random_state = RANDOM_SEED)\n\n return data_frame\n\n meta_cols = self.getMetaAttributeNames()\n input_cols = self.getInputAttributeNames()\n output_col = self.getOutputAttributeNames()[0]\n\n data_frame = copy.deepcopy(self.data_frame_kurz)\n if balanced:\n data_frame = getBalancedDataFrame(data_frame, self.getOutputAttributeNames()[0])\n\n if with_meta:\n all_data = data_frame.loc[:,np.array((input_cols, meta_cols)).flatten()]\n all_true_labels = data_frame.loc[:,output_col]\n if preprocessing is not None:\n assert with_meta == False, 'This feature is not built yet...'\n\n X_train, X_test, y_train, y_test = train_test_split(\n all_data,\n all_true_labels,\n train_size=.7,\n random_state = RANDOM_SEED)\n\n # ordering of next two lines matters (shouldn't overwrite input_cols); silly code... :|\n U_train = X_train[self.getMetaAttributeNames()]\n U_test = X_test[self.getMetaAttributeNames()]\n X_train = X_train[self.getInputAttributeNames()]\n X_test = X_test[self.getInputAttributeNames()]\n y_train = y_train # noop\n y_test = y_test # noop\n\n return X_train, X_test, U_train, U_test, y_train, y_test\n else:\n all_data = data_frame.loc[:,input_cols]\n all_true_labels = data_frame.loc[:,output_col]\n\n X_train, X_test, y_train, y_test = train_test_split(\n all_data,\n all_true_labels,\n train_size=.7,\n random_state = RANDOM_SEED)\n\n # TODO (2020.05.18): this should be updated so as NOT to update meta variables\n if preprocessing == 'standardize':\n X_train, X_test = standardizeData(X_train, X_test)\n elif preprocessing == 'normalize':\n X_train, X_test = normalizeData(X_train, X_test)\n\n return X_train, X_test, y_train, y_test\n\n def getOriginalDataFrame(self, num_samples, with_meta = False, with_label = False, balanced = True, data_split = 'train_and_test'):\n\n if with_meta:\n X_train, X_test, U_train, U_test, y_train, y_test = self.getTrainTestSplit(with_meta = True, balanced = balanced)\n else:\n X_train, X_test, y_train, y_test = self.getTrainTestSplit(with_meta = False, balanced = balanced)\n\n # order of if/elif is important\n if with_meta and with_label:\n data_train = pd.concat([X_train, U_train, y_train], axis = 1)\n data_test = pd.concat([X_test, U_test, y_test], axis = 1)\n elif with_meta:\n data_train = pd.concat([X_train, U_train], axis = 1)\n data_test = pd.concat([X_test, U_test], axis = 1)\n elif with_label:\n data_train = pd.concat([X_train, y_train], axis = 1)\n data_test = pd.concat([X_test, y_test], axis = 1)\n else:\n data_train = X_train\n data_test = X_test\n\n if data_split == 'train_and_test':\n data_all = pd.concat([data_train, data_test], axis = 0)\n elif data_split == 'train_only':\n data_all = data_train\n elif data_split == 'test_only':\n data_all = data_test\n else:\n raise NotImplementedError\n\n return data_all[:num_samples]\n\n\nclass DatasetAttribute(object):\n\n def __init__(\n self,\n attr_name_long,\n attr_name_kurz,\n attr_type,\n node_type,\n actionability,\n mutability,\n parent_name_long,\n parent_name_kurz,\n lower_bound,\n upper_bound):\n\n if attr_type not in VALID_ATTRIBUTE_DATA_TYPES:\n raise Exception(\"`attr_type` must be one of %r.\" % VALID_ATTRIBUTE_DATA_TYPES)\n\n if node_type not in VALID_ATTRIBUTE_NODE_TYPES:\n raise Exception(\"`node_type` must be one of %r.\" % VALID_ATTRIBUTE_NODE_TYPES)\n\n if actionability not in VALID_ACTIONABILITY_TYPES:\n raise Exception(\"`actionability` must be one of %r.\" % VALID_ACTIONABILITY_TYPES)\n\n if mutability not in VALID_MUTABILITY_TYPES:\n raise Exception(\"`mutability` must be one of %r.\" % VALID_MUTABILITY_TYPES)\n\n if lower_bound > upper_bound:\n raise Exception(\"`lower_bound` must be <= `upper_bound`\")\n\n if attr_type in {'sub-categorical', 'sub-ordinal'}:\n assert parent_name_long != -1, 'Parent ID set for non-hot attribute.'\n assert parent_name_kurz != -1, 'Parent ID set for non-hot attribute.'\n if attr_type == 'sub-categorical':\n assert lower_bound == 0\n assert upper_bound == 1\n if attr_type == 'sub-ordinal':\n # the first elem in thermometer is always on, but the rest may be on or off\n assert lower_bound == 0 or lower_bound == 1\n assert upper_bound == 1\n else:\n assert parent_name_long == -1, 'Parent ID set for non-hot attribute.'\n assert parent_name_kurz == -1, 'Parent ID set for non-hot attribute.'\n\n if attr_type in {'categorical', 'ordinal'}:\n assert lower_bound == 1 # setOneHotValue & setThermoValue assume this in their logic\n\n if attr_type in {'binary', 'categorical', 'sub-categorical'}: # not 'ordinal' or 'sub-ordinal'\n # IMPORTANT: surprisingly, it is OK if all sub-ordinal variables share actionability\n # think about it, if each sub- variable is same-or-increase, along with\n # the constraints that x0_ord_1 >= x0_ord_2, all variables can only stay\n # the same or increase. It works :)\n assert actionability in {'none', 'any'}, f\"{attr_type}'s actionability can only be in {'none', 'any'}, not `{actionability}`.\"\n\n if node_type != 'input':\n assert actionability == 'none', f'{node_type} attribute is not actionable.'\n assert mutability == False, f'{node_type} attribute is not mutable.'\n\n # We have introduced 3 types of variables: (actionable and mutable, non-actionable but mutable, immutable and non-actionable)\n if actionability != 'none':\n assert mutability == True\n # TODO: above/below seem contradictory... (2020.04.14)\n if mutability == False:\n assert actionability == 'none'\n\n if parent_name_long == -1 or parent_name_kurz == -1:\n assert parent_name_long == parent_name_kurz == -1\n\n self.attr_name_long = attr_name_long\n self.attr_name_kurz = attr_name_kurz\n self.attr_type = attr_type\n self.node_type = node_type\n self.actionability = actionability\n self.mutability = mutability\n self.parent_name_long = parent_name_long\n self.parent_name_kurz = parent_name_kurz\n self.lower_bound = lower_bound\n self.upper_bound = upper_bound\n\n\ndef loadDataset(dataset_name, return_one_hot, load_from_cache = False, debug_flag = True, index_offset = 0, meta_param = None):\n\n def getInputOutputColumns(data_frame):\n all_data_frame_cols = data_frame.columns.values\n input_cols = [x for x in all_data_frame_cols if 'label' not in x.lower()]\n output_cols = [x for x in all_data_frame_cols if 'label' in x.lower()]\n assert len(output_cols) == 1\n return input_cols, output_cols[0]\n\n one_hot_string = 'one_hot' if return_one_hot else 'non_hot'\n\n save_file_path = os.path.join(\n os.path.dirname(__file__),\n f'_data_main/_cached/{dataset_name}_{one_hot_string}'\n )\n\n if load_from_cache:\n if debug_flag: print(f'[INFO] Attempting to load saved dataset (`{dataset_name}`) from cache...\\t', end = '')\n try:\n tmp = pickle.load(open(save_file_path, 'rb'))\n if debug_flag: print('done.')\n return tmp\n except:\n if debug_flag: print('failed. Re-creating dataset...')\n\n if dataset_name == 'adult':\n\n data_frame_non_hot = load_adult_data_new()\n data_frame_non_hot = data_frame_non_hot.reset_index(drop=True)\n attributes_non_hot = {}\n\n input_cols, output_col = getInputOutputColumns(data_frame_non_hot)\n\n col_name = output_col\n attributes_non_hot[col_name] = DatasetAttribute(\n attr_name_long = col_name,\n attr_name_kurz = 'y',\n attr_type = 'binary',\n node_type = 'output',\n actionability = 'none',\n mutability = False,\n parent_name_long = -1,\n parent_name_kurz = -1,\n lower_bound = data_frame_non_hot[col_name].min(),\n upper_bound = data_frame_non_hot[col_name].max())\n\n for col_idx, col_name in enumerate(input_cols):\n\n if col_name == 'Sex':\n attr_type = 'binary'\n actionability = 'any' # 'none'\n mutability = True\n elif col_name == 'Age':\n attr_type = 'binary' # 'numeric-int'\n actionability = 'any' # 'none'\n mutability = True\n elif col_name == 'NativeCountry': #~ RACE\n attr_type = 'binary'\n actionability = 'any' # 'none'\n mutability = True\n elif col_name == 'WorkClass':\n attr_type = 'categorical'\n actionability = 'any'\n mutability = True\n # elif col_name == 'EducationNumber':\n # attr_type = 'numeric-int'\n # actionability = 'any'\n # mutability = True\n elif col_name == 'EducationLevel':\n attr_type = 'numeric-int'\n actionability = 'any'\n mutability = True\n elif col_name == 'MaritalStatus':\n attr_type = 'categorical'\n actionability = 'any'\n mutability = True\n elif col_name == 'Occupation':\n attr_type = 'categorical'\n actionability = 'any'\n mutability = True\n # elif col_name == 'Relationship':\n # attr_type = 'categorical'\n # actionability = 'any'\n # mutability = True\n # elif col_name == 'CapitalGain':\n # attr_type = 'numeric-real'\n # actionability = 'any'\n # mutability = True\n # elif col_name == 'CapitalLoss':\n # attr_type = 'numeric-real'\n # actionability = 'any'\n # mutability = True\n elif col_name == 'HoursPerWeek':\n attr_type = 'numeric-int'\n actionability = 'any'\n mutability = True\n\n attributes_non_hot[col_name] = DatasetAttribute(\n attr_name_long = col_name,\n attr_name_kurz = f'x{col_idx + index_offset}',\n attr_type = attr_type,\n node_type = 'input',\n actionability = actionability,\n mutability = mutability,\n parent_name_long = -1,\n parent_name_kurz = -1,\n lower_bound = data_frame_non_hot[col_name].min(),\n upper_bound = data_frame_non_hot[col_name].max())\n\n elif dataset_name == 'german':\n\n data_frame_non_hot = load_german_data()\n data_frame_non_hot = data_frame_non_hot.reset_index(drop=True)\n attributes_non_hot = {}\n\n input_cols, output_col = getInputOutputColumns(data_frame_non_hot)\n\n col_name = output_col\n attributes_non_hot[col_name] = DatasetAttribute(\n attr_name_long = col_name,\n attr_name_kurz = 'y',\n attr_type = 'binary',\n node_type = 'output',\n actionability = 'none',\n mutability = False,\n parent_name_long = -1,\n parent_name_kurz = -1,\n lower_bound = data_frame_non_hot[col_name].min(),\n upper_bound = data_frame_non_hot[col_name].max())\n\n for col_idx, col_name in enumerate(input_cols):\n\n if col_name == 'Sex': # TODO: make sex and race immutable in all datasets!\n attr_type = 'binary'\n actionability = 'any'\n mutability = True\n elif col_name == 'Age':\n attr_type = 'numeric-int' # 'numeric-real'\n actionability = 'same-or-increase'\n mutability = True\n elif col_name == 'Credit':\n attr_type = 'numeric-real'\n actionability = 'any'\n mutability = True\n elif col_name == 'LoanDuration':\n attr_type = 'numeric-int'\n actionability = 'none'\n mutability = True\n # elif col_name == 'CheckingAccountBalance':\n # attr_type = 'ordinal' # 'numeric-real'\n # actionability = 'any'\n # mutability = True\n # elif col_name == 'SavingsAccountBalance':\n # attr_type = 'ordinal'\n # actionability = 'any'\n # mutability = True\n # elif col_name == 'HousingStatus':\n # attr_type = 'ordinal'\n # actionability = 'any'\n # mutability = True\n\n attributes_non_hot[col_name] = DatasetAttribute(\n attr_name_long = col_name,\n attr_name_kurz = f'x{col_idx + index_offset}',\n attr_type = attr_type,\n node_type = 'input',\n actionability = actionability,\n mutability = mutability,\n parent_name_long = -1,\n parent_name_kurz = -1,\n lower_bound = data_frame_non_hot[col_name].min(),\n upper_bound = data_frame_non_hot[col_name].max())\n\n elif dataset_name == 'credit':\n\n data_frame_non_hot = load_credit_data()\n data_frame_non_hot = data_frame_non_hot.reset_index(drop=True)\n attributes_non_hot = {}\n\n input_cols, output_col = getInputOutputColumns(data_frame_non_hot)\n\n col_name = output_col\n attributes_non_hot[col_name] = DatasetAttribute(\n attr_name_long = col_name,\n attr_name_kurz = 'y',\n attr_type = 'binary',\n node_type = 'output',\n actionability = 'none',\n mutability = False,\n parent_name_long = -1,\n parent_name_kurz = -1,\n lower_bound = data_frame_non_hot[col_name].min(),\n upper_bound = data_frame_non_hot[col_name].max())\n\n for col_idx, col_name in enumerate(input_cols):\n\n if col_name == 'isMale':\n attr_type = 'binary'\n actionability = 'any' # 'none'\n mutability = True\n elif col_name == 'isMarried':\n attr_type = 'binary'\n actionability = 'any'\n mutability = True\n elif col_name == 'AgeGroup':\n attr_type = 'ordinal'\n actionability = 'any' # 'none'\n mutability = True\n elif col_name == 'EducationLevel':\n attr_type = 'ordinal'\n actionability = 'any'\n mutability = True\n elif col_name == 'MaxBillAmountOverLast6Months':\n attr_type = 'numeric-real'\n actionability = 'any'\n mutability = True\n elif col_name == 'MaxPaymentAmountOverLast6Months':\n attr_type = 'numeric-real'\n actionability = 'any'\n mutability = True\n elif col_name == 'MonthsWithZeroBalanceOverLast6Months':\n attr_type = 'numeric-int'\n actionability = 'any'\n mutability = True\n elif col_name == 'MonthsWithLowSpendingOverLast6Months':\n attr_type = 'numeric-int'\n actionability = 'any'\n mutability = True\n elif col_name == 'MonthsWithHighSpendingOverLast6Months':\n attr_type = 'numeric-int'\n actionability = 'any'\n mutability = True\n elif col_name == 'MostRecentBillAmount':\n attr_type = 'numeric-real'\n actionability = 'any'\n mutability = True\n elif col_name == 'MostRecentPaymentAmount':\n attr_type = 'numeric-real'\n actionability = 'any'\n mutability = True\n elif col_name == 'TotalOverdueCounts':\n attr_type = 'numeric-int'\n actionability = 'any'\n mutability = True\n elif col_name == 'TotalMonthsOverdue':\n attr_type = 'numeric-int'\n actionability = 'any'\n mutability = True\n elif col_name == 'HasHistoryOfOverduePayments':\n attr_type = 'binary'\n actionability = 'any'\n mutability = True\n\n attributes_non_hot[col_name] = DatasetAttribute(\n attr_name_long = col_name,\n attr_name_kurz = f'x{col_idx + index_offset}',\n attr_type = attr_type,\n node_type = 'input',\n actionability = actionability,\n mutability = mutability,\n parent_name_long = -1,\n parent_name_kurz = -1,\n lower_bound = data_frame_non_hot[col_name].min(),\n upper_bound = data_frame_non_hot[col_name].max())\n\n elif dataset_name == 'compass':\n\n data_frame_non_hot = load_compas_data_new()\n data_frame_non_hot = data_frame_non_hot.reset_index(drop=True)\n attributes_non_hot = {}\n\n input_cols, output_col = getInputOutputColumns(data_frame_non_hot)\n\n col_name = output_col\n attributes_non_hot[col_name] = DatasetAttribute(\n attr_name_long = col_name,\n attr_name_kurz = 'y',\n attr_type = 'binary',\n node_type = 'output',\n actionability = 'none',\n mutability = False,\n parent_name_long = -1,\n parent_name_kurz = -1,\n lower_bound = data_frame_non_hot[col_name].min(),\n upper_bound = data_frame_non_hot[col_name].max())\n\n for col_idx, col_name in enumerate(input_cols):\n\n if col_name == 'AgeGroup':\n attr_type = 'ordinal'\n actionability = 'any' # 'none'\n mutability = True\n elif col_name == 'Race':\n attr_type = 'binary'\n actionability = 'any' # 'none'\n mutability = True\n elif col_name == 'Sex':\n attr_type = 'binary'\n actionability = 'any' # 'none'\n mutability = True\n elif col_name == 'PriorsCount':\n attr_type = 'numeric-int'\n actionability = 'any'\n mutability = True\n elif col_name == 'ChargeDegree':\n attr_type = 'binary'\n actionability = 'any'\n mutability = True\n\n attributes_non_hot[col_name] = DatasetAttribute(\n attr_name_long = col_name,\n attr_name_kurz = f'x{col_idx + index_offset}',\n attr_type = attr_type,\n node_type = 'input',\n actionability = actionability,\n mutability = mutability,\n parent_name_long = -1,\n parent_name_kurz = -1,\n lower_bound = data_frame_non_hot[col_name].min(),\n upper_bound = data_frame_non_hot[col_name].max())\n\n elif dataset_name == 'synthetic':\n\n variable_type = 'real'\n # variable_type = 'integer'\n\n scm_class = meta_param\n\n data_frame_non_hot = load_synthetic_data(scm_class, variable_type)\n data_frame_non_hot = data_frame_non_hot.reset_index(drop=True)\n attributes_non_hot = {}\n\n input_cols, output_col = getInputOutputColumns(data_frame_non_hot)\n # ordering of next two lines matters (shouldn't overwrite input_cols); silly code... :|\n meta_cols = [col_name for col_name in input_cols if 'u' in col_name]\n input_cols = [col_name for col_name in input_cols if 'x' in col_name] # endogenous variables must start with `x`\n\n if 'fair' in scm_class:\n # fair experiments (other than adult) rely on labels being in {-1/+1}\n # TODO (lowpri): can we change this?? can sklearn svm and lr predict 0,1 instead of -1/+1??\n data_frame_non_hot[output_col] = data_frame_non_hot[output_col] * 2 - 1\n col_name = output_col\n attributes_non_hot[col_name] = DatasetAttribute(\n attr_name_long = col_name,\n attr_name_kurz = 'y',\n attr_type = 'binary',\n node_type = 'output',\n actionability = 'none',\n mutability = False,\n parent_name_long = -1,\n parent_name_kurz = -1,\n lower_bound = data_frame_non_hot[col_name].min(),\n upper_bound = data_frame_non_hot[col_name].max())\n\n for col_idx, col_name in enumerate(input_cols):\n\n attr_type = 'numeric-real' if variable_type == 'real' else 'numeric-int'\n node_type = 'input'\n actionability = 'any'\n mutability = True\n\n attributes_non_hot[col_name] = DatasetAttribute(\n attr_name_long = col_name,\n attr_name_kurz = col_name,\n attr_type = attr_type,\n node_type = node_type,\n actionability = actionability,\n mutability = mutability,\n parent_name_long = -1,\n parent_name_kurz = -1,\n lower_bound = data_frame_non_hot[col_name].min(),\n upper_bound = data_frame_non_hot[col_name].max())\n\n for col_idx, col_name in enumerate(meta_cols):\n\n attr_type = 'numeric-real'\n node_type = 'meta'\n actionability = 'none'\n mutability = False\n\n attributes_non_hot[col_name] = DatasetAttribute(\n attr_name_long = col_name,\n attr_name_kurz = col_name,\n attr_type = attr_type,\n node_type = node_type,\n actionability = actionability,\n mutability = mutability,\n parent_name_long = -1,\n parent_name_kurz = -1,\n lower_bound = data_frame_non_hot[col_name].min(),\n upper_bound = data_frame_non_hot[col_name].max())\n\n elif dataset_name == 'mortgage':\n\n data_frame_non_hot = load_mortgage_data()\n data_frame_non_hot = data_frame_non_hot.reset_index(drop=True)\n attributes_non_hot = {}\n\n input_cols, output_col = getInputOutputColumns(data_frame_non_hot)\n\n col_name = output_col\n attributes_non_hot[col_name] = DatasetAttribute(\n attr_name_long = col_name,\n attr_name_kurz = 'y',\n attr_type = 'binary',\n node_type = 'output',\n actionability = 'none',\n mutability = False,\n parent_name_long = -1,\n parent_name_kurz = -1,\n lower_bound = data_frame_non_hot[col_name].min(),\n upper_bound = data_frame_non_hot[col_name].max())\n\n for col_idx, col_name in enumerate(input_cols):\n\n if col_name == 'x0':\n attr_type = 'numeric-real'\n actionability = 'any'\n mutability = True\n elif col_name == 'x1':\n attr_type = 'numeric-real'\n actionability = 'any'\n mutability = True\n\n attributes_non_hot[col_name] = DatasetAttribute(\n attr_name_long = col_name,\n attr_name_kurz = f'x{col_idx + index_offset}',\n attr_type = attr_type,\n node_type = 'input',\n actionability = actionability,\n mutability = mutability,\n parent_name_long = -1,\n parent_name_kurz = -1,\n lower_bound = data_frame_non_hot[col_name].min(),\n upper_bound = data_frame_non_hot[col_name].max())\n\n elif dataset_name == 'twomoon':\n\n variable_type = 'real'\n # variable_type = 'integer'\n\n data_frame_non_hot = load_twomoon_data(variable_type)\n data_frame_non_hot = data_frame_non_hot.reset_index(drop=True)\n attributes_non_hot = {}\n\n input_cols, output_col = getInputOutputColumns(data_frame_non_hot)\n\n col_name = output_col\n attributes_non_hot[col_name] = DatasetAttribute(\n attr_name_long = col_name,\n attr_name_kurz = 'y',\n attr_type = 'binary',\n node_type = 'output',\n actionability = 'none',\n mutability = False,\n parent_name_long = -1,\n parent_name_kurz = -1,\n lower_bound = data_frame_non_hot[col_name].min(),\n upper_bound = data_frame_non_hot[col_name].max())\n\n for col_idx, col_name in enumerate(input_cols):\n\n if col_name == 'x0':\n attr_type = 'numeric-real' if variable_type == 'real' else 'numeric-int'\n actionability = 'any'\n mutability = True\n elif col_name == 'x1':\n attr_type = 'numeric-real' if variable_type == 'real' else 'numeric-int'\n actionability = 'any'\n mutability = True\n\n attributes_non_hot[col_name] = DatasetAttribute(\n attr_name_long = col_name,\n attr_name_kurz = f'x{col_idx + index_offset}',\n attr_type = attr_type,\n node_type = 'input',\n actionability = actionability,\n mutability = mutability,\n parent_name_long = -1,\n parent_name_kurz = -1,\n lower_bound = data_frame_non_hot[col_name].min(),\n upper_bound = data_frame_non_hot[col_name].max())\n\n elif dataset_name == 'test':\n\n data_frame_non_hot = load_test_data()\n data_frame_non_hot = data_frame_non_hot.reset_index(drop=True)\n attributes_non_hot = {}\n\n input_cols, output_col = getInputOutputColumns(data_frame_non_hot)\n\n col_name = output_col\n attributes_non_hot[col_name] = DatasetAttribute(\n attr_name_long = col_name,\n attr_name_kurz = 'y',\n attr_type = 'binary',\n node_type = 'output',\n actionability = 'none',\n mutability = False,\n parent_name_long = -1,\n parent_name_kurz = -1,\n lower_bound = data_frame_non_hot[col_name].min(),\n upper_bound = data_frame_non_hot[col_name].max())\n\n for col_idx, col_name in enumerate(input_cols):\n\n if col_name == 'x0':\n attr_type = 'categorical'\n actionability = 'any'\n mutability = True\n\n attributes_non_hot[col_name] = DatasetAttribute(\n attr_name_long = col_name,\n attr_name_kurz = f'x{col_idx + index_offset}',\n attr_type = attr_type,\n node_type = 'input',\n actionability = actionability,\n mutability = mutability,\n parent_name_long = -1,\n parent_name_kurz = -1,\n lower_bound = data_frame_non_hot[col_name].min(),\n upper_bound = data_frame_non_hot[col_name].max())\n\n else:\n\n raise Exception(f'{dataset_name} not recognized as a valid dataset.')\n\n if return_one_hot:\n data_frame, attributes = getOneHotEquivalent(data_frame_non_hot, attributes_non_hot)\n else:\n data_frame, attributes = data_frame_non_hot, attributes_non_hot\n\n # save then return\n dataset_obj = Dataset(data_frame, attributes, return_one_hot, dataset_name)\n # if not loading from cache, we always overwrite the cache\n pickle.dump(dataset_obj, open(save_file_path, 'wb'))\n return dataset_obj\n\n\n# TODO: consider moving into Dataset class with getOneHot and getNonHot methods\ndef getOneHotEquivalent(data_frame_non_hot, attributes_non_hot):\n\n # TODO: see how we can switch between feature_names = col names for kurz and long (also maybe ordered)\n\n data_frame = copy.deepcopy(data_frame_non_hot)\n attributes = copy.deepcopy(attributes_non_hot)\n\n def setOneHotValue(val):\n return np.append(np.append(\n np.zeros(val - 1),\n np.ones(1)),\n np.zeros(num_unique_values - val)\n )\n\n def setThermoValue(val):\n return np.append(\n np.ones(val),\n np.zeros(num_unique_values - val)\n )\n\n for col_name in data_frame.columns.values:\n\n if attributes[col_name].attr_type not in {'categorical', 'ordinal'}:\n continue\n\n old_col_name_long = col_name\n new_col_names_long = []\n new_col_names_kurz = []\n\n old_attr_name_long = attributes[old_col_name_long].attr_name_long\n old_attr_name_kurz = attributes[old_col_name_long].attr_name_kurz\n old_attr_type = attributes[old_col_name_long].attr_type\n old_node_type = attributes[old_col_name_long].node_type\n old_actionability = attributes[old_col_name_long].actionability\n old_mutability = attributes[old_col_name_long].mutability\n old_lower_bound = attributes[old_col_name_long].lower_bound\n old_upper_bound = attributes[old_col_name_long].upper_bound\n\n num_unique_values = int(old_upper_bound - old_lower_bound + 1)\n\n assert old_col_name_long == old_attr_name_long\n\n new_attr_type = 'sub-' + old_attr_type\n new_node_type = old_node_type\n new_actionability = old_actionability\n new_mutability = old_mutability\n new_parent_name_long = old_attr_name_long\n new_parent_name_kurz = old_attr_name_kurz\n\n\n if attributes[col_name].attr_type == 'categorical': # do not do this for 'binary'!\n\n new_col_names_long = [f'{old_attr_name_long}_cat_{i}' for i in range(num_unique_values)]\n new_col_names_kurz = [f'{old_attr_name_kurz}_cat_{i}' for i in range(num_unique_values)]\n print(f'Replacing column {col_name} with {{{\", \".join(new_col_names_long)}}}')\n tmp = np.array(list(map(setOneHotValue, list(data_frame[col_name].astype(int).values))))\n data_frame_dummies = pd.DataFrame(data=tmp, columns=new_col_names_long)\n\n elif attributes[col_name].attr_type == 'ordinal':\n\n new_col_names_long = [f'{old_attr_name_long}_ord_{i}' for i in range(num_unique_values)]\n new_col_names_kurz = [f'{old_attr_name_kurz}_ord_{i}' for i in range(num_unique_values)]\n print(f'Replacing column {col_name} with {{{\", \".join(new_col_names_long)}}}')\n tmp = np.array(list(map(setThermoValue, list(data_frame[col_name].astype(int).values))))\n data_frame_dummies = pd.DataFrame(data=tmp, columns=new_col_names_long)\n\n # Update data_frame\n data_frame = pd.concat([data_frame.drop(columns = old_col_name_long), data_frame_dummies], axis=1)\n\n # Update attributes\n del attributes[old_col_name_long]\n for col_idx in range(len(new_col_names_long)):\n new_col_name_long = new_col_names_long[col_idx]\n new_col_name_kurz = new_col_names_kurz[col_idx]\n attributes[new_col_name_long] = DatasetAttribute(\n attr_name_long = new_col_name_long,\n attr_name_kurz = new_col_name_kurz,\n attr_type = new_attr_type,\n node_type = new_node_type,\n actionability = new_actionability,\n mutability = new_mutability,\n parent_name_long = new_parent_name_long,\n parent_name_kurz = new_parent_name_kurz,\n lower_bound = data_frame[new_col_name_long].min(),\n upper_bound = data_frame[new_col_name_long].max())\n\n return data_frame, attributes\n\n" ]
[ [ "pandas.concat", "numpy.random.seed", "numpy.unique", "numpy.array_equal", "sklearn.model_selection.train_test_split", "numpy.setdiff1d", "numpy.ones", "pandas.DataFrame", "numpy.floor", "numpy.array", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
mrosemeier/compmech
[ "f18f6d0471c72b26a3b014d2df41df3463505eae", "5387daab1ec656b065f7a9be820e2c234e53764e", "f18f6d0471c72b26a3b014d2df41df3463505eae", "5387daab1ec656b065f7a9be820e2c234e53764e" ]
[ "compmech/panel/tests/test_panel_field_outputs.py", "theory/stiffpanelbay/tstiff2d_clt_donnell_bardell/print_expressions_python.py", "compmech/composite/lamina.py", "theory/stiffpanelbay/bladestiff1d_clt_donnell_bardell/print_expressions_python.py" ]
[ "import numpy as np\n\nfrom compmech.panel import Panel\nfrom compmech.analysis import Analysis\nfrom compmech.sparse import solve\n\ndef test_panel_field_outputs():\n m = 7\n n = 6\n #TODO implement for conical panels\n strain_field = dict(exx=None, eyy=None, gxy=None, kxx=None, kyy=None, kxy=None)\n stress_field = dict(Nxx=None, Nyy=None, Nxy=None, Mxx=None, Myy=None, Mxy=None)\n for model in ['plate_clt_donnell_bardell',\n 'cpanel_clt_donnell_bardell']:\n p = Panel()\n p.model = model\n p.u1tx = 1\n p.u1ty = 1\n p.u2ty = 1\n p.v1tx = 0\n p.v2tx = 0\n p.v1ty = 0\n p.v2ty = 0\n\n p.a = 2.\n p.b = 1.\n p.r = 1.e5\n p.stack = [0, -45, +45, 90, +45, -45, 0, 0]\n p.plyt = 1e-3*0.125\n p.laminaprop = (142.5e9, 8.7e9, 0.28, 5.1e9, 5.1e9, 5.1e9)\n p.nx = m\n p.ny = n\n p.m = m\n p.n = n\n\n P = 1000.\n npts = 100\n p.forces_inc = []\n for y in np.linspace(0, p.b, npts):\n p.forces_inc.append([0., y, P/(npts-1.), 0, 0])\n p.forces_inc[0][2] /= 2.\n p.forces_inc[-1][2] /= 2.\n\n p.static()\n c = p.analysis.cs[0]\n Ns = p.stress(c, gridx=50, gridy=50)\n es = p.strain(c, gridx=50, gridy=50)\n for k, v in strain_field.items():\n if v is None:\n strain_field[k] = es.get(k).min()\n else:\n assert np.isclose(strain_field[k], es.get(k).min(), rtol=0.05)\n p.plot(c, vec=k, filename='tmp_test_panel_strain_field_%s.png' % k)\n for k, v in stress_field.items():\n if v is None:\n stress_field[k] = Ns.get(k).min()\n else:\n assert np.isclose(stress_field[k], Ns.get(k).min(), rtol=0.05)\n p.plot(c, vec=k, filename='tmp_test_panel_stress_field_%s.png' % k)\n", "import os\nimport glob\nfrom ast import literal_eval\n\nimport numpy as np\nimport sympy\nfrom sympy import pi, sin, cos, var\n\nfrom compmech.conecyl.sympytools import mprint_as_sparse\n\nvar('fAu, gAu, fAv, gAv, fAw, fAwxi, gAw, gAweta')\nvar('fBu, gBu, fBv, gBv, fBw, fBwxi, gBw, gBweta')\n\nvar('pAu, qAu, pAv, qAv, pAw, pAwxi, qAw, qAweta')\nvar('pBu, qBu, pBv, qBv, pBw, pBwxi, qBw, qBweta')\n\nvar('rAu, sAu, rAv, sAv, rAw, sAw, sAweta')\nvar('rBu, sBu, rBv, sBv, rBw, sBw, sBweta')\n\nvar('kt, kr, a, b, bb, bf, c1, dpb')\n\nsubs = {\n }\n\ndef List(*e):\n return list(e)\n\nfor i, filepath in enumerate(\n glob.glob(r'./output_expressions_mathematica/fortran_*.txt')):\n print(filepath)\n with open(filepath) as f:\n filename = os.path.basename(filepath)\n names = filename[:-4].split('_')\n lines = [line.strip() for line in f.readlines()]\n string = ''.join(lines)\n string = string.replace('\\\\','')\n tmp = eval(string)\n matrix = sympy.Matrix(np.atleast_2d(tmp))\n printstr = ''\n for i in range(matrix.shape[0]):\n for j in range(matrix.shape[1]):\n if matrix[i,j] == 0:\n continue\n else:\n printstr += '%s[row+%d, col+%d] = %s\\n' % (names[1], i, j, str(matrix[i, j]))\n printstr = mprint_as_sparse(matrix, names[1], \"11\",\n print_file=False, collect_for=None,\n subs=subs)\n\n with open('.\\\\output_expressions_python\\\\' + filename, 'w') as f:\n f.write(printstr)\n", "\"\"\"\nComposite Lamina Module (:mod:`compmech.composite.lamina`)\n==========================================================\n\n.. currentmodule:: compmech.composite.lamina\n\n\"\"\"\nfrom __future__ import division, absolute_import\n\nimport numpy as np\nfrom numpy import cos, sin\nfrom numpy.linalg import inv\n\nfrom compmech.constants import DOUBLE\nfrom .matlamina import MatLamina\n\n\nclass Lamina(object):\n \"\"\"\n ========= ===========================================================\n attribute description\n ========= ===========================================================\n plyid id of the composite lamina\n matobj a pointer to a MatLamina object\n t ply thickness\n theta ply angle in degrees\n L transformation matrix for displacements to laminate csys\n R transformation matrix for stresses to laminate csys\n T transformation matrix for stresses to lamina csys\n QL constitutive matrix for plane-stress in laminate csys\n laminates laminates that contain this lamina\n ========= ===========================================================\n\n References:\n -----------\n .. [1] Reddy, J. N., Mechanics of Laminated Composite Plates and\n Shells - Theory and Analysys. Second Edition. CRC PRESS, 2004.\n\n \"\"\"\n\n def __init__(self):\n self.plyid = None\n self.matobj = None\n self.t = None\n self.theta = None\n self.L = None\n self.R = None\n self.T = None\n self.QL = None\n self.laminates = []\n\n def rebuild(self):\n thetarad = np.deg2rad(self.theta)\n cost = cos(thetarad)\n sint = sin(thetarad)\n sin2t = sin(2 * thetarad)\n #\n cos2 = cost**2\n cos3 = cost**3\n cos4 = cost**4\n sin2 = sint**2\n sin3 = sint**3\n sin4 = sint**4\n sincos = sint * cost\n self.L = np.array([[cost, sint, 0],\n [-sint, cost, 0],\n [0, 0, 1]], dtype=DOUBLE)\n # STRESS\n # to lamina Reddy Eq. 2.3.10\n self.R = np.array(\n [[cos2, sin2, 0, 0, 0, sin2t],\n [sin2, cos2, 0, 0, 0, -sin2t],\n [0, 0, 1, 0, 0, 0],\n [0, 0, 0, cost, -sint, 0],\n [0, 0, 0, sint, cost, 0],\n [-sincos, sincos, 0, 0, 0, cos2 - sin2]], dtype=DOUBLE)\n # to laminate (VDI 2014 eq. 35, 36) # Reddy Eq. 2.3.8\n self.T = np.array(\n [[cos2, sin2, 0, 0, 0, -sin2t],\n [sin2, cos2, 0, 0, 0, sin2t],\n [0, 0, 1, 0, 0, 0],\n [0, 0, 0, cost, sint, 0],\n [0, 0, 0, -sint, cost, 0],\n [sincos, -sincos, 0, 0, 0, cos2 - sin2]], dtype=DOUBLE)\n # STRAINS\n self.Te = np.array(\n [[cos2, sin2, 0, 0, 0, -2 * sin2t],\n [sin2, cos2, 0, 0, 0, 2 * sin2t],\n [0, 0, 1, 0, 0, 0],\n [0, 0, 0, cost, sint, 0],\n [0, 0, 0, -sint, cost, 0],\n [2 * sincos, -2 * sincos, 0, 0, 0, cos2 - sin2]], dtype=DOUBLE)\n\n # different from stress due to:\n # 2*e12 = e6 2*e13 = e5 2*e23 = e4\n # to laminate\n # self.Rstrain = np.transpose(self.Tstress)\n # to lamina\n # self.Tstrain = np.transpose(self.Rstress)\n\n if isinstance(self.matobj, MatLamina):\n e1 = self.matobj.e1\n e2 = self.matobj.e2\n nu12 = self.matobj.nu12\n nu21 = self.matobj.nu21\n g12 = self.matobj.g12\n g13 = self.matobj.g13\n g23 = self.matobj.g23\n else:\n e1 = self.matobj.e\n e2 = self.matobj.e\n nu12 = self.matobj.nu\n nu21 = self.matobj.nu\n g12 = self.matobj.g\n g = self.matobj.g\n\n # plane stress\n q11 = e1 / (1 - nu12 * nu21)\n q12 = nu12 * e2 / (1 - nu12 * nu21)\n q22 = e2 / (1 - nu12 * nu21)\n q44 = g23\n q55 = g13\n q16 = 0\n q26 = 0\n q66 = g12\n\n self.Q = np.array([[q11, q12, q16, 0, 0],\n [q12, q22, q26, 0, 0],\n [q16, q26, q66, 0, 0],\n [0, 0, 0, q44, 0],\n [0, 0, 0, 0, q55]], dtype=DOUBLE)\n # Reddy Eq. 2.4.8\n q11L = q11 * cos4 + 2 * (q12 + 2 * q66) * sin2 * cos2 + q22 * sin4\n q12L = (q11 + q22 - 4 * q66) * sin2 * cos2 + q12 * (sin4 + cos4)\n q22L = q11 * sin4 + 2 * (q12 + 2 * q66) * sin2 * cos2 + q22 * cos4\n q16L = (q11 - q12 - 2 * q66) * sint * cos3 + \\\n (q12 - q22 + 2 * q66) * sin3 * cost\n q26L = (q11 - q12 - 2 * q66) * sin3 * cost + \\\n (q12 - q22 + 2 * q66) * sint * cos3\n q66L = (q11 + q22 - 2 * q12 - 2 * q66) * \\\n sin2 * cos2 + q66 * (sin4 + cos4)\n q44L = q44 * cos2 + q55 * sin2\n q45L = (q55 - q44) * sincos\n q55L = q55 * cos2 + q44 * sin2\n\n self.QL = np.array([[q11L, q12L, q16L, 0, 0],\n [q12L, q22L, q26L, 0, 0],\n [q16L, q26L, q66L, 0, 0],\n [0, 0, 0, q44L, q45L],\n [0, 0, 0, q45L, q55L]], dtype=DOUBLE)\n\n # Reddy Eq. 2.3.17\n C = self.matobj.c\n self.CL = np.dot(np.dot(self.T, C), np.transpose(\n self.T))\n\n # Bogetti Eq. 28\n self.delta_CL45 = self.CL[3, 3] * \\\n self.CL[4, 4] - self.CL[3, 4] * self.CL[4, 3]\n\n a1 = self.matobj.a1\n a2 = self.matobj.a2\n a3 = self.matobj.a3\n\n if not a1:\n a1 = 0.\n if not a2:\n a2 = 0.\n if not a3:\n a3 = 0.\n\n self.A = np.array([a1, a2, 0, 0, 0], dtype=DOUBLE)\n\n self.A3D = np.array([a1, a2, a3, 0, 0, 0], dtype=DOUBLE)\n\n # Reddy Eq 2.3.23\n a11L = a1 * cos2 + a2 * sin2\n a22L = a1 * sin2 + a2 * cos2\n a12L = (a1 - a2) * sint * cost\n a13L = 0.\n a23L = 0.\n a33L = a3\n\n self.AL = np.array([a11L, a22L, a12L, 0, 0], dtype=DOUBLE)\n\n self.AL3D = np.array(\n [a11L, a22L, a33L, a23L, a13L, a12L], dtype=DOUBLE)\n\n def calc_loading(self, eps_laminate, dT):\n ''' laminate strain needs to come in the following notation\n TODO: extend model to handle 3D stresses\n [eps_x, eps_y, eps_z, gamma_yz, gamma_xz, gamma_xy]\n and output is:\n [sigma_1, sigma_2, sigma_3, tau_23, tau_13, tau_12]\n '''\n # transform from engineering strain\n # 2*e12 = e6 2*e13 = e5 2*e23 = e4\n # self.rebuild()\n # self.theta\n '''\n # calculate thermal loads\n _eps_therm = self.AL * dT\n eps_therm = np.zeros_like(eps_laminate)\n eps_therm[0] = _eps_therm[0]\n eps_therm[1] = _eps_therm[1]\n eps_therm[5] = _eps_therm[2]\n '''\n T = self.Te\n\n # transform strain to lamina coordinate sysytem\n\n # Rstrain = np.transpose(T) # np.transpose(T)\n Rstrain = inv(T)\n\n #Rstrain = T\n\n eps = np.dot(Rstrain, eps_laminate)\n\n # recover stress\n # reorder vector to plane\n # [eps_1, eps_2, gamma_21, gamma_23, gamma_13]\n eps_plane = np.zeros(len(eps) - 1)\n eps_plane[0] = eps[0]\n eps_plane[1] = eps[1]\n eps_plane[2] = eps[5]\n eps_plane[3] = eps[3]\n eps_plane[4] = eps[4]\n sig_plane = np.dot(self.Q, (eps_plane - self.AL * dT))\n # reorder back to 3D COS\n # [sigma_1, sigma_2, sigma_3, tau_23, tau_13, tau_12]\n sig = np.zeros_like(eps)\n sig[0] = sig_plane[0]\n sig[1] = sig_plane[1]\n sig[2] = 0. # sigma_3\n sig[3] = sig_plane[3]\n sig[4] = sig_plane[4]\n sig[5] = sig_plane[2]\n\n return eps, sig\n", "import os\nimport glob\nfrom ast import literal_eval\n\nimport numpy as np\nimport sympy\nfrom sympy import pi, sin, cos, var\n\nfrom compmech.conecyl.sympytools import mprint_as_sparse\n\nvar('fAuxi, fBuxi, gAu, gBu, fAu, fBu, gAueta, gBueta')\nvar('fAvxi, fBvxi, fAv, fBv, gAv, gBv, gAveta, gBveta')\nvar('fAwxixi, fBwxixi, gAw, gBw, fAwxi, fBwxi, gAweta, gBweta')\nvar('fAw, fBw, gAwetaeta, gBwetaeta')\n\nvar('df, bf, a, b, h, hb, hf, mu')\nvar('E1, S1, F1, Jxx, Fx, aeromu')\n\nsubs = {\n }\n\ndef List(*e):\n return list(e)\n\nfor i, filepath in enumerate(\n glob.glob(r'./output_expressions_mathematica/fortran_*.txt')):\n print(filepath)\n with open(filepath) as f:\n filename = os.path.basename(filepath)\n names = filename[:-4].split('_')\n lines = [line.strip() for line in f.readlines()]\n string = ''.join(lines)\n string = string.replace('\\\\','')\n tmp = eval(string)\n matrix = sympy.Matrix(np.atleast_2d(tmp))\n printstr = ''\n for i in range(matrix.shape[0]):\n for j in range(matrix.shape[1]):\n if matrix[i,j] == 0:\n continue\n else:\n printstr += '%s[row+%d, col+%d] = %s\\n' % (names[1], i, j, str(matrix[i, j]))\n printstr = mprint_as_sparse(matrix, names[1], \"11\",\n print_file=False, collect_for=None,\n subs=subs)\n\n with open('.\\\\output_expressions_python\\\\' + filename, 'w') as f:\n f.write(printstr)\n" ]
[ [ "numpy.linspace" ], [ "numpy.atleast_2d" ], [ "numpy.dot", "numpy.linalg.inv", "numpy.cos", "numpy.sin", "numpy.deg2rad", "numpy.zeros_like", "numpy.transpose", "numpy.array" ], [ "numpy.atleast_2d" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
kunalk3/Machine_Learning_using_Python
[ "6a70e66151dc8358c429be71ebbf3fbc19479847" ]
[ "Hypothesis_testing/HypothesisTesting1.py" ]
[ "#---------------------------------------------------------------------\n# File Name : HypothesisTesting1.py\n# Author : Kunal K.\n# Description : Implementing hypothesis test methods\n# Date: : 9 Nov. 2020\n# Version : V1.0\n# Ref No : DS_Code_P_K07\n#---------------------------------------------------------------------\n\n#importing the pacakages which are required \nimport pandas as pd\nimport numpy as np\nimport scipy \nfrom scipy import stats\nimport statsmodels.api as sm\n\n#install plotly package \nimport plotly.plotly as py\nimport plotly.graph_objs as go\nfrom plotly.tools import FigureFactory as FF\n\n#Mann-whitney test \ndata=pd.read_csv(\"Hypothesis testing/with and without additive.csv\")\n\n#doing Normality test for Mann whitney\n#without additive Normality test\nwithoutAdditive_data=stats.shapiro(data.Without_additive)\nwithoutAdditive_pValue=withoutAdditive_data[1]\nprint(\"p-value is: \"+str(withoutAdditive_pValue))\n\n#Additive normality test\nAdditive=stats.shapiro(data.With_Additive)\nAdditive_pValue=Additive[1]\nprint(\"p-value is: \"+str(Additive_pValue))\n\n#Doing Mann-Whiteny test\nfrom scipy.stats import mannwhitneyu\nmannwhitneyu(data.Without_additive, data.With_Additive)\n\n#############################End of Mann-whiteny test#####################################\n\n#2- Sample T-Test\n#Creditcard Promotion data set \npromotion=pd.read_csv(\"Hypothesis testing/Promotion.csv\")\n#Ho: Avg of purchases made by FIW < = Avg purchases made by SC =>default/ current/ no action\n#Ha: Avg of purchases made by FIW > Avg purchases made by SC =>take action \n#Doing Normality test \n#We consider Ho: Data are normal\n#We consider Ha: Data are not normal\n\nPromotion=stats.shapiro(promotion.InterestRateWaiver)\nPromotion_pValue=Promotion[1]\nprint(\"p-value is: \"+str(Promotion_pValue))\n\nSDPromotion=stats.shapiro(promotion.StandardPromotion)\nSDPromotion_pValue=Promotion[1]\nprint(\"p-value is: \"+str(SDPromotion_pValue))\n#we can proceed with the model \n#Varience test \nscipy.stats.levene(promotion.InterestRateWaiver, promotion.StandardPromotion)\n\n#2 Sample T test \nscipy.stats.ttest_ind(promotion.InterestRateWaiver,promotion.StandardPromotion)\n\nscipy.stats.ttest_ind(promotion.InterestRateWaiver,promotion.StandardPromotion,equal_var = True)\n###########################End of 2-Sample T-Test############################################\n\n#One way Anova\n#Importing the data set of contractrenewal \nfrom statsmodels.formula.api import ols\ncof=pd.read_csv(\"ContractRenewal_Data(unstacked).csv\")\ncof.columns=\"SupplierA\",\"SupplierB\",\"SupplierC\"\n\n#Normality test \nSupA=stats.shapiro(cof.SupplierA) #Shapiro Test\nSupA_pValue=SupA[1]\nprint(\"p-value is: \"+str(SupA_pValue))\n\nSupB=stats.shapiro(cof.SupplierB)\nSupB_pValue=SupB[1]\nprint(\"p-value is: \"+str(SupB_pValue))\n\nSupC=stats.shapiro(cof.SupplierC)\nSupC_pValue=SupC[1]\nprint(\"p-value is: \"+str(SupC_pValue))\n\n#Varience Test \nscipy.stats.levene(cof.SupplierA, cof.SupplierB)\nscipy.stats.levene(cof.SupplierB, cof.SupplierC)\nscipy.stats.levene(cof.SupplierC, cof.SupplierA)\n\n#One-Way Anova\n\nmod=ols('SupplierA~SupplierB+SupplierC',data=cof).fit()\naov_table=sm.stats.anova_lm(mod,type=2)\nprint(aov_table)\n###########################End of One-Way Anova###################################################\n\n#Chi-Square test \n#Importing the data set of bahaman \nBahaman=pd.read_csv(\"Hypothesis testing/Bahaman.csv\")\ncount=pd.crosstab(Bahaman[\"Defective\"],Bahaman[\"Country\"])\ncount\n\nChisquares_results=scipy.stats.chi2_contingency(count)\nChi_pValue=Chisquares_results[1]\nprint(\"p-value is: \"+str(Chi_pValue))\n\n##########################End of chi-square test################################################\n\n#1 Sample Sign Test \nimport statsmodels.stats.descriptivestats as sd\n#importing the data set of signtest.csv\ndata=pd.read_csv(\"C:/Users/suri/Desktop/practice programs/Hypothesis testing/Signtest.csv\")\n#normality test \ndata_socres=stats.shapiro(data.Scores)\ndata_pValue=data_socres[1]\nprint(\"p-value is: \"+str(data_pValue))\n\n#1 Sample Sign Test \nsd.sign_test(data.Scores,mu0=0)\n############################End of 1 Sample Sign test###########################################\n\n#2-Proportion Test \ntwo_prop_test=pd.read_csv(\"Hypothesis testing/JohnyTalkers.csv\")\n#importing packages to do 2 proportion test\nfrom statsmodels.stats.proportion import proportions_ztest\n#we do the cross table and see How many adults or children are purchasing\ntab = two_prop_test.groupby(['Person', 'Icecream']).size()\ncount = np.array([58, 152]) #How many adults and childeren are purchasing\nnobs = np.array([480, 740]) #Total number of adults and childern are there \n\nstat, pval = proportions_ztest(count, nobs,alternative='two-sided') \n#Alternative The alternative hypothesis can be either two-sided or one of the one- sided tests\n#smaller means that the alternative hypothesis is prop < value\n#larger means prop > value.\nprint('{0:0.3f}'.format(pval))\n# two. sided -> means checking for equal proportions of Adults and children under purchased\n# p-value = 6.261e-05 < 0.05 accept alternate hypothesis i.e.\n# Unequal proportions \n\nstat, pval = proportions_ztest(count, nobs,alternative='larger')\nprint('{0:0.3f}'.format(pval))\n# Ha -> Proportions of Adults > Proportions of Children\n# Ho -> Proportions of Children > Proportions of Adults\n# p-value = 0.999 >0.05 accept null hypothesis \n# so proportion of Children > proportion of children \n# Do not launch the ice cream shop\n\n###################################End of Two proportion test####################################" ]
[ [ "pandas.crosstab", "pandas.read_csv", "scipy.stats.chi2_contingency", "scipy.stats.mannwhitneyu", "scipy.stats.shapiro", "scipy.stats.levene", "numpy.array", "scipy.stats.ttest_ind" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "1.3", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "0.16", "1.8" ], "tensorflow": [] } ]
cofibit/bitmexbot
[ "765ec4efaf710b334b0c9f20a094d003261c297f" ]
[ "indicators.py" ]
[ "\"\"\"\r\nThis file contains a collection of common indicators, which are based on third party or custom libraries\r\n\r\n\"\"\"\r\nfrom numpy.core.records import ndarray\r\nfrom pandas import Series, DataFrame\r\nimport pandas as pd\r\nimport numpy as np\r\n# from math import log\r\n\r\n\r\ndef heikinashi(bars):\r\n bars = bars.copy()\r\n bars['ha_close'] = (bars['open'] + bars['high'] +\r\n bars['low'] + bars['close']) / 4\r\n\r\n bars['ha_open'] = (bars['open'].shift(1) + bars['close'].shift(1)) / 2\r\n bars.loc[:1, 'ha_open'] = bars['open'].values[0]\r\n for x in range(2):\r\n bars.loc[1:, 'ha_open'] = (\r\n (bars['ha_open'].shift(1) + bars['ha_close'].shift(1)) / 2)[1:]\r\n\r\n bars['ha_high'] = bars.loc[:, ['high', 'ha_open', 'ha_close']].max(axis=1)\r\n bars['ha_low'] = bars.loc[:, ['low', 'ha_open', 'ha_close']].min(axis=1)\r\n\r\n return pd.DataFrame(\r\n index=bars.index,\r\n data={\r\n 'open': bars['ha_open'],\r\n 'high': bars['ha_high'],\r\n 'low': bars['ha_low'],\r\n 'close': bars['ha_close']})\r\n\r\n\r\ndef crossed(series1, series2, direction=None):\r\n if isinstance(series1, np.ndarray):\r\n series1 = Series(series1)\r\n\r\n if isinstance(series2, int) or isinstance(series2, float) or isinstance(series2, np.ndarray):\r\n series2 = Series(index=series1.index, data=series2)\r\n\r\n if direction is None or direction == \"above\":\r\n above = Series((series1 > series2) & (\r\n series1.shift(1) <= series2.shift(1)))\r\n\r\n if direction is None or direction == \"below\":\r\n below = Series((series1 < series2) & (\r\n series1.shift(1) >= series2.shift(1)))\r\n\r\n if direction is None:\r\n return above or below\r\n\r\n return above if direction is \"above\" else below\r\n\r\n\r\ndef crossed_above(series1, series2):\r\n return crossed(series1, series2, \"above\")\r\n\r\n\r\ndef crossed_below(series1, series2):\r\n return crossed(series1, series2, \"below\")\r\n\r\n\r\ndef aroon(dataframe, period=25, field='close', colum_prefix=\"aroon\") -> DataFrame:\r\n from pyti.aroon import aroon_up as up\r\n from pyti.aroon import aroon_down as down\r\n dataframe[\"{}_up\".format(colum_prefix)] = up(dataframe[field], period)\r\n dataframe[\"{}_down\".format(colum_prefix)] = down(dataframe[field], period)\r\n return dataframe\r\n\r\n\r\ndef atr(dataframe, period, field='close') -> ndarray:\r\n from pyti.average_true_range import average_true_range\r\n return average_true_range(dataframe[field], period)\r\n\r\n\r\ndef atr_percent(dataframe, period, field='close') -> ndarray:\r\n from pyti.average_true_range_percent import average_true_range_percent\r\n return average_true_range_percent(dataframe[field], period)\r\n\r\n\r\ndef bollinger_bands(dataframe, period=21, stdv=2, field='close', colum_prefix=\"bb\") -> DataFrame:\r\n from pyti.bollinger_bands import lower_bollinger_band, middle_bollinger_band, upper_bollinger_band\r\n dataframe[\"{}_lower\".format(colum_prefix)] = lower_bollinger_band(dataframe[field], period, stdv)\r\n dataframe[\"{}_middle\".format(colum_prefix)] = middle_bollinger_band(dataframe[field], period, stdv)\r\n dataframe[\"{}_upper\".format(colum_prefix)] = upper_bollinger_band(dataframe[field], period, stdv)\r\n\r\n return dataframe\r\n\r\n\r\ndef cmf(dataframe, period=14) -> ndarray:\r\n from pyti.chaikin_money_flow import chaikin_money_flow\r\n\r\n return chaikin_money_flow(dataframe['close'], dataframe['high'], dataframe['low'], dataframe['volume'], period)\r\n\r\n\r\ndef accumulation_distribution(dataframe) -> ndarray:\r\n from pyti.accumulation_distribution import accumulation_distribution as acd\r\n\r\n return acd(dataframe['close'], dataframe['high'], dataframe['low'], dataframe['volume'])\r\n\r\n\r\ndef osc(dataframe, periods=14) -> ndarray:\r\n \"\"\"\r\n 1. Calculating DM (i).\r\n If HIGH (i) > HIGH (i - 1), DM (i) = HIGH (i) - HIGH (i - 1), otherwise DM (i) = 0.\r\n 2. Calculating DMn (i).\r\n If LOW (i) < LOW (i - 1), DMn (i) = LOW (i - 1) - LOW (i), otherwise DMn (i) = 0.\r\n 3. Calculating value of OSC:\r\n OSC (i) = SMA (DM, N) / (SMA (DM, N) + SMA (DMn, N)).\r\n\r\n :param dataframe:\r\n :param periods:\r\n :return:\r\n \"\"\"\r\n df = dataframe\r\n df['DM'] = (df['high'] - df['high'].shift()).apply(lambda x: max(x, 0))\r\n df['DMn'] = (df['low'].shift() - df['low']).apply(lambda x: max(x, 0))\r\n return Series.rolling_mean(df.DM, periods) / (\r\n Series.rolling_mean(df.DM, periods) + Series.rolling_mean(df.DMn, periods))\r\n\r\n\r\ndef cmo(dataframe, period, field='close') -> ndarray:\r\n from pyti.chande_momentum_oscillator import chande_momentum_oscillator\r\n return chande_momentum_oscillator(dataframe[field], period)\r\n\r\n\r\ndef hull_moving_average(dataframe, period, field='close') -> ndarray:\r\n from pyti.hull_moving_average import hull_moving_average as hma\r\n return hma(dataframe[field], period)\r\n\r\n\r\ndef cci(dataframe, period) -> ndarray:\r\n from pyti.commodity_channel_index import commodity_channel_index\r\n\r\n return commodity_channel_index(dataframe['close'], dataframe['high'], dataframe['low'], period)\r\n\r\n\r\ndef vfi(dataframe, length=130, coef=0.2, vcoef=2.5, signalLength=5, smoothVFI=False):\r\n \"\"\"\r\n Volume Flow Indicator conversion\r\n\r\n Author: creslinux, June 2018 - Python\r\n Original Author: Chris Moody, TradingView - Pinescript\r\n To return vfi, vfima and histogram\r\n\r\n A simplified interpretation of the VFI is:\r\n * Values above zero indicate a bullish state and the crossing of the zero line is the trigger or buy signal.\r\n * The strongest signal with all money flow indicators is of course divergence.\r\n * A crossover of vfi > vfima is uptrend\r\n * A crossunder of vfima > vfi is downtrend\r\n * smoothVFI can be set to smooth for a cleaner plot to ease false signals\r\n * histogram can be used against self -1 to check if upward or downward momentum\r\n\r\n\r\n Call from strategy to populate vfi, vfima, vfi_hist into dataframe\r\n\r\n Example how to call:\r\n # Volume Flow Index: Add VFI, VFIMA, Histogram to DF\r\n dataframe['vfi'], dataframe['vfima'], dataframe['vfi_hist'] = \\\r\n vfi(dataframe, length=130, coef=0.2, vcoef=2.5, signalLength=5, smoothVFI=False)\r\n\r\n :param dataframe:\r\n :param length: - VFI Length - 130 default\r\n :param coef: - price coef - 0.2 default\r\n :param vcoef: - volume coef - 2.5 default\r\n :param signalLength: - 5 default\r\n :param smoothVFI: bool - False detault\r\n :return: vfi, vfima, vfi_hist\r\n \"\"\"\r\n\r\n \"\"\"\"\r\n Original Pinescript \r\n From: https://www.tradingview.com/script/MhlDpfdS-Volume-Flow-Indicator-LazyBear/\r\n\r\n length = input(130, title=\"VFI length\")\r\n coef = input(0.2)\r\n vcoef = input(2.5, title=\"Max. vol. cutoff\")\r\n signalLength=input(5)\r\n smoothVFI=input(false, type=bool)\r\n\r\n #### Conversion summary to python \r\n - ma(x,y) => smoothVFI ? sma(x,y) : x // Added as smoothVFI test on vfi\r\n\r\n - typical = hlc3 // Added to DF as HLC\r\n - inter = log(typical) - log(typical[1]) // Added to DF as inter\r\n - vinter = stdev(inter, 30) // Added to DF as vinter\r\n - cutoff = coef * vinter * close // Added to DF as cutoff\r\n - vave = sma(volume, length)[1] // Added to DF as vave\r\n - vmax = vave * vcoef // Added to Df as vmax\r\n - vc = iff(volume < vmax, volume, vmax) // Added np.where test, result in DF as vc\r\n - mf = typical - typical[1] // Added into DF as mf - typical is hlc3\r\n - vcp = iff(mf > cutoff, vc, iff(mf < -cutoff, -vc, 0)) // added in def vcp, in DF as vcp\r\n\r\n - vfi = ma(sum(vcp, length) / vave, 3) // Added as DF vfi. Will sma vfi 3 if smoothVFI flag set\r\n - vfima = ema(vfi, signalLength) // added to DF as vfima\r\n - d = vfi-vfima // Added to df as histogram\r\n\r\n ### Pinscript plotout - nothing to do here for freqtrade.\r\n plot(0, color=gray, style=3)\r\n showHisto=input(false, type=bool)\r\n plot(showHisto ? d : na, style=histogram, color=gray, linewidth=3, transp=50)\r\n plot( vfima , title=\"EMA of vfi\", color=orange)\r\n plot( vfi, title=\"vfi\", color=green,linewidth=2)\r\n \"\"\"\r\n import talib as ta\r\n from math import log\r\n from pyti.simple_moving_average import simple_moving_average as sma\r\n from numpy import where\r\n\r\n length = length\r\n coef = coef\r\n vcoef = vcoef\r\n signalLength = signalLength\r\n smoothVFI = smoothVFI\r\n df = dataframe\r\n # Add hlc3 and populate inter to the dataframe\r\n df['hlc'] = ((df['high'] + df['low'] + df['close']) / 3).astype(float)\r\n df['inter'] = df['hlc'].map(log) - df['hlc'].shift(+1).map(log)\r\n df['vinter'] = df['inter'].rolling(30).std(ddof=0)\r\n df['cutoff'] = (coef * df['vinter'] * df['close'])\r\n # Vave is to be calculated on volume of the past bar\r\n df['vave'] = sma(df['volume'].shift(+1), length)\r\n df['vmax'] = df['vave'] * vcoef\r\n df['vc'] = where((df['volume'] < df['vmax']), df['volume'], df['vmax'])\r\n df['mf'] = df['hlc'] - df['hlc'].shift(+1)\r\n\r\n # more logic for vcp, so create a def and df.apply it\r\n def vcp(x):\r\n if x['mf'] > x['cutoff']:\r\n return x['vc']\r\n elif x['mf'] < -(x['cutoff']):\r\n return -(x['vc'])\r\n else:\r\n return 0\r\n\r\n df['vcp'] = df.apply(vcp, axis=1)\r\n # vfi has a smooth option passed over def call, sma if set\r\n df['vfi'] = (df['vcp'].rolling(length).sum()) / df['vave']\r\n if smoothVFI == True:\r\n df['vfi'] = sma(df['vfi'], 3)\r\n df['vfima'] = ta.EMA(df['vfi'], signalLength)\r\n df['vfi_hist'] = df['vfi'] - df['vfima']\r\n\r\n # clean up columns used vfi calculation but not needed for strat\r\n df.drop('hlc', axis=1, inplace=True)\r\n df.drop('inter', axis=1, inplace=True)\r\n df.drop('vinter', axis=1, inplace=True)\r\n df.drop('cutoff', axis=1, inplace=True)\r\n df.drop('vave', axis=1, inplace=True)\r\n df.drop('vmax', axis=1, inplace=True)\r\n df.drop('vc', axis=1, inplace=True)\r\n df.drop('mf', axis=1, inplace=True)\r\n df.drop('vcp', axis=1, inplace=True)\r\n\r\n return df['vfi'], df['vfima'], df['vfi_hist']\r\n\r\n\r\ndef mmar(dataframe, matype=\"EMA\", src=\"close\", debug=False):\r\n \"\"\"\r\n Madrid Moving Average Ribbon\r\n\r\n Returns: MMAR\r\n \"\"\"\r\n \"\"\"\r\n Author(Freqtrade): Creslinux\r\n Original Author(TrdingView): \"Madrid\"\r\n\r\n Pinescript from TV Source Code and Description \r\n //\r\n // Madrid : 17/OCT/2014 22:51M: Moving Average Ribbon : 2.0 : MMAR\r\n // http://madridjourneyonws.blogspot.com/\r\n //\r\n // This plots a moving average ribbon, either exponential or standard.\r\n // This study is best viewed with a dark background. It provides an easy\r\n // and fast way to determine the trend direction and possible reversals.\r\n //\r\n // Lime : Uptrend. Long trading\r\n // Green : Reentry (buy the dip) or downtrend reversal warning\r\n // Red : Downtrend. Short trading\r\n // Maroon : Short Reentry (sell the peak) or uptrend reversal warning\r\n //\r\n // To best determine if this is a reentry point or a trend reversal\r\n // the MMARB (Madrid Moving Average Ribbon Bar) study is used.\r\n // This is the bar located at the bottom. This bar signals when a\r\n // current trend reentry is found (partially filled with opposite dark color)\r\n // or when a trend reversal is ahead (completely filled with opposite dark color).\r\n //\r\n\r\n study(title=\"Madrid Moving Average Ribbon\", shorttitle=\"MMAR\", overlay=true)\r\n exponential = input(true, title=\"Exponential MA\")\r\n\r\n src = close\r\n\r\n ma05 = exponential ? ema(src, 05) : sma(src, 05)\r\n ma10 = exponential ? ema(src, 10) : sma(src, 10)\r\n ma15 = exponential ? ema(src, 15) : sma(src, 15)\r\n ma20 = exponential ? ema(src, 20) : sma(src, 20)\r\n ma25 = exponential ? ema(src, 25) : sma(src, 25)\r\n ma30 = exponential ? ema(src, 30) : sma(src, 30)\r\n ma35 = exponential ? ema(src, 35) : sma(src, 35)\r\n ma40 = exponential ? ema(src, 40) : sma(src, 40)\r\n ma45 = exponential ? ema(src, 45) : sma(src, 45)\r\n ma50 = exponential ? ema(src, 50) : sma(src, 50)\r\n ma55 = exponential ? ema(src, 55) : sma(src, 55)\r\n ma60 = exponential ? ema(src, 60) : sma(src, 60)\r\n ma65 = exponential ? ema(src, 65) : sma(src, 65)\r\n ma70 = exponential ? ema(src, 70) : sma(src, 70)\r\n ma75 = exponential ? ema(src, 75) : sma(src, 75)\r\n ma80 = exponential ? ema(src, 80) : sma(src, 80)\r\n ma85 = exponential ? ema(src, 85) : sma(src, 85)\r\n ma90 = exponential ? ema(src, 90) : sma(src, 90)\r\n ma100 = exponential ? ema(src, 100) : sma(src, 100)\r\n\r\n leadMAColor = change(ma05)>=0 and ma05>ma100 ? lime\r\n : change(ma05)<0 and ma05>ma100 ? maroon\r\n : change(ma05)<=0 and ma05<ma100 ? red\r\n : change(ma05)>=0 and ma05<ma100 ? green\r\n : gray\r\n maColor(ma, maRef) =>\r\n change(ma)>=0 and ma05>maRef ? lime\r\n : change(ma)<0 and ma05>maRef ? maroon\r\n : change(ma)<=0 and ma05<maRef ? red\r\n : change(ma)>=0 and ma05<maRef ? green\r\n : gray\r\n\r\n plot( ma05, color=leadMAColor, style=line, title=\"MMA05\", linewidth=3)\r\n plot( ma10, color=maColor(ma10,ma100), style=line, title=\"MMA10\", linewidth=1)\r\n plot( ma15, color=maColor(ma15,ma100), style=line, title=\"MMA15\", linewidth=1)\r\n plot( ma20, color=maColor(ma20,ma100), style=line, title=\"MMA20\", linewidth=1)\r\n plot( ma25, color=maColor(ma25,ma100), style=line, title=\"MMA25\", linewidth=1)\r\n plot( ma30, color=maColor(ma30,ma100), style=line, title=\"MMA30\", linewidth=1)\r\n plot( ma35, color=maColor(ma35,ma100), style=line, title=\"MMA35\", linewidth=1)\r\n plot( ma40, color=maColor(ma40,ma100), style=line, title=\"MMA40\", linewidth=1)\r\n plot( ma45, color=maColor(ma45,ma100), style=line, title=\"MMA45\", linewidth=1)\r\n plot( ma50, color=maColor(ma50,ma100), style=line, title=\"MMA50\", linewidth=1)\r\n plot( ma55, color=maColor(ma55,ma100), style=line, title=\"MMA55\", linewidth=1)\r\n plot( ma60, color=maColor(ma60,ma100), style=line, title=\"MMA60\", linewidth=1)\r\n plot( ma65, color=maColor(ma65,ma100), style=line, title=\"MMA65\", linewidth=1)\r\n plot( ma70, color=maColor(ma70,ma100), style=line, title=\"MMA70\", linewidth=1)\r\n plot( ma75, color=maColor(ma75,ma100), style=line, title=\"MMA75\", linewidth=1)\r\n plot( ma80, color=maColor(ma80,ma100), style=line, title=\"MMA80\", linewidth=1)\r\n plot( ma85, color=maColor(ma85,ma100), style=line, title=\"MMA85\", linewidth=1)\r\n plot( ma90, color=maColor(ma90,ma100), style=line, title=\"MMA90\", linewidth=3)\r\n :return:\r\n \"\"\"\r\n import talib as ta\r\n\r\n matype = matype\r\n src = src\r\n df = dataframe\r\n debug = debug\r\n\r\n # Default to EMA, allow SMA if passed to def.\r\n if matype == \"EMA\" or matype == \"ema\":\r\n ma = ta.EMA\r\n elif matype == \"SMA\" or matype == \"sma\":\r\n ma = ta.SMA\r\n else:\r\n ma = ta.EMA\r\n\r\n # Get MAs, also last MA in own column to pass to def later\r\n df[\"ma05\"] = ma(df[src], 5)\r\n df['ma05l'] = df['ma05'].shift(+1)\r\n df[\"ma10\"] = ma(df[src], 10)\r\n df['ma10l'] = df['ma10'].shift(+1)\r\n df[\"ma20\"] = ma(df[src], 20)\r\n df['ma20l'] = df['ma20'].shift(+1)\r\n df[\"ma30\"] = ma(df[src], 30)\r\n df['ma30l'] = df['ma30'].shift(+1)\r\n df[\"ma40\"] = ma(df[src], 40)\r\n df['ma40l'] = df['ma40'].shift(+1)\r\n df[\"ma50\"] = ma(df[src], 50)\r\n df['ma50l'] = df['ma50'].shift(+1)\r\n df[\"ma60\"] = ma(df[src], 60)\r\n df['ma60l'] = df['ma60'].shift(+1)\r\n df[\"ma70\"] = ma(df[src], 70)\r\n df['ma70l'] = df['ma70'].shift(+1)\r\n df[\"ma80\"] = ma(df[src], 80)\r\n df['ma80ll'] = df['ma80'].shift(+1)\r\n df[\"ma90\"] = ma(df[src], 90)\r\n df['ma90l'] = df['ma90'].shift(+1)\r\n df[\"ma100\"] = ma(df[src], 100)\r\n df['ma100l'] = df['ma100'].shift(+1)\r\n\r\n \"\"\" logic for LeadMA\r\n : change(ma05)>=0 and ma05>ma100 ? lime +2\r\n : change(ma05)<0 and ma05>ma100 ? maroon -1\r\n : change(ma05)<=0 and ma05<ma100 ? red -2\r\n : change(ma05)>=0 and ma05<ma100 ? green +1\r\n : gray\r\n \"\"\"\r\n\r\n def leadMAc(x):\r\n if (x['ma05'] - x['ma05l']) >= 0 and (x['ma05'] > x['ma100']):\r\n # Lime: Uptrend.Long trading\r\n x[\"leadMA\"] = \"lime\"\r\n return x[\"leadMA\"]\r\n elif (x['ma05'] - x['ma05l']) < 0 and (x['ma05'] > x['ma100']):\r\n # Maroon : Short Reentry (sell the peak) or uptrend reversal warning\r\n x[\"leadMA\"] = \"maroon\"\r\n return x[\"leadMA\"]\r\n elif (x['ma05'] - x['ma05l']) <= 0 and (x['ma05'] < x['ma100']):\r\n # Red : Downtrend. Short trading\r\n x[\"leadMA\"] = \"red\"\r\n return x[\"leadMA\"]\r\n elif (x['ma05'] - x['ma05l']) >= 0 and (x['ma05'] < x['ma100']):\r\n # Green: Reentry(buy the dip) or downtrend reversal warning\r\n x[\"leadMA\"] = \"green\"\r\n return x[\"leadMA\"]\r\n else:\r\n # If its great it means not enough ticker data for lookback\r\n x[\"leadMA\"] = \"grey\"\r\n return x[\"leadMA\"]\r\n\r\n df['leadMA'] = df.apply(leadMAc, axis=1)\r\n\r\n \"\"\" Logic for MAs \r\n : change(ma)>=0 and ma05>ma100 ? lime\r\n : change(ma)<0 and ma05>ma100 ? maroon\r\n : change(ma)<=0 and ma05<ma100 ? red\r\n : change(ma)>=0 and ma05<ma100 ? green\r\n : gray\r\n \"\"\"\r\n\r\n def maColor(x, ma):\r\n col_label = '_'.join([ma, \"c\"])\r\n col_lable_l = ''.join([ma, \"l\"])\r\n\r\n if (x[ma] - x[col_lable_l]) >= 0 and (x[ma] > x['ma100']):\r\n # Lime: Uptrend.Long trading\r\n x[col_label] = \"lime\"\r\n return x[col_label]\r\n elif (x[ma] - x[col_lable_l]) < 0 and (x[ma] > x['ma100']):\r\n # Maroon : Short Reentry (sell the peak) or uptrend reversal warning\r\n x[col_label] = \"maroon\"\r\n return x[col_label]\r\n\r\n elif (x[ma] - x[col_lable_l]) <= 0 and (x[ma] < x['ma100']):\r\n # Red : Downtrend. Short trading\r\n x[col_label] = \"red\"\r\n return x[col_label]\r\n\r\n elif (x[ma] - x[col_lable_l]) >= 0 and (x[ma] < x['ma100']):\r\n # Green: Reentry(buy the dip) or downtrend reversal warning\r\n x[col_label] = \"green\"\r\n return x[col_label]\r\n else:\r\n # If its great it means not enough ticker data for lookback\r\n x[col_label] = 'grey'\r\n return x[col_label]\r\n\r\n df['ma05_c'] = df.apply(maColor, ma=\"ma05\", axis=1)\r\n df['ma10_c'] = df.apply(maColor, ma=\"ma10\", axis=1)\r\n df['ma20_c'] = df.apply(maColor, ma=\"ma20\", axis=1)\r\n df['ma30_c'] = df.apply(maColor, ma=\"ma30\", axis=1)\r\n df['ma40_c'] = df.apply(maColor, ma=\"ma40\", axis=1)\r\n df['ma50_c'] = df.apply(maColor, ma=\"ma50\", axis=1)\r\n df['ma60_c'] = df.apply(maColor, ma=\"ma60\", axis=1)\r\n df['ma70_c'] = df.apply(maColor, ma=\"ma70\", axis=1)\r\n df['ma80_c'] = df.apply(maColor, ma=\"ma80\", axis=1)\r\n df['ma90_c'] = df.apply(maColor, ma=\"ma90\", axis=1)\r\n\r\n if debug:\r\n from pandas import set_option\r\n set_option('display.max_rows', 10)\r\n print(df[[\"date\", \"leadMA\",\r\n \"ma05\", \"ma05l\", \"ma05_c\",\r\n \"ma10\", \"ma10l\", \"ma10_c\",\r\n # \"ma20\", \"ma20l\", \"ma20_c\",\r\n # \"ma30\", \"ma30l\", \"ma30_c\",\r\n # \"ma40\", \"ma40l\", \"ma40_c\",\r\n # \"ma50\", \"ma50l\", \"ma50_c\",\r\n # \"ma60\", \"ma60l\", \"ma60_c\",\r\n # \"ma70\", \"ma70l\", \"ma70_c\",\r\n # \"ma80\", \"ma80l\", \"ma80_c\",\r\n \"ma90\", \"ma90l\", \"ma90_c\",\r\n \"ma100\", \"leadMA\"]].tail(200))\r\n\r\n print(df[[\"date\", 'close',\r\n \"leadMA\",\r\n \"ma10_c\",\r\n \"ma20_c\",\r\n \"ma30_c\",\r\n \"ma40_c\",\r\n \"ma50_c\",\r\n \"ma60_c\",\r\n \"ma70_c\",\r\n \"ma80_c\",\r\n \"ma90_c\"\r\n ]].tail(684))\r\n\r\n return df['leadMA'], df['ma10_c'], df['ma20_c'], df['ma30_c'], \\\r\n df['ma40_c'], df['ma50_c'], df['ma60_c'], df['ma70_c'], \\\r\n df['ma80_c'], df['ma90_c']\r\n\r\n\r\ndef madrid_sqz(datafame, length=34, src='close', ref=13, sqzLen=5):\r\n \"\"\"\r\n Squeeze Madrid Indicator\r\n\r\n Author: Creslinux\r\n Original Author: Madrid - Tradingview\r\n https://www.tradingview.com/script/9bUUSzM3-Madrid-Trend-Squeeze/\r\n\r\n :param datafame:\r\n :param lenght: min 14 - default 34\r\n :param src: default close\r\n :param ref: default 13\r\n :param sqzLen: default 5\r\n :return: df['sqz_cma_c'], df['sqz_rma_c'], df['sqz_sma_c']\r\n\r\n\r\n There are seven colors used for the study\r\n\r\n Green : Uptrend in general\r\n Lime : Spots the current uptrend leg\r\n Aqua : The maximum profitability of the leg in a long trade\r\n The Squeeze happens when Green+Lime+Aqua are aligned (the larger the values the better)\r\n\r\n Maroon : Downtrend in general\r\n Red : Spots the current downtrend leg\r\n Fuchsia: The maximum profitability of the leg in a short trade\r\n The Squeeze happens when Maroon+Red+Fuchsia are aligned (the larger the values the better)\r\n\r\n Yellow : The trend has come to a pause and it is either a reversal warning or a continuation. These are the entry, re-entry or closing position points.\r\n \"\"\"\r\n\r\n \"\"\" \r\n Original Pinescript source code\r\n\r\n ma = ema(src, len)\r\n closema = close - ma\r\n refma = ema(src, ref) - ma\r\n sqzma = ema(src, sqzLen) - ma\r\n\r\n hline(0)\r\n plotcandle(0, closema, 0, closema, color=closema >= 0?aqua: fuchsia)\r\n plotcandle(0, sqzma, 0, sqzma, color=sqzma >= 0?lime: red)\r\n plotcandle(0, refma, 0, refma, color=(refma >= 0 and closema < refma) or (\r\n refma < 0 and closema > refma) ? yellow: refma >= 0 ? green: maroon)\r\n \"\"\"\r\n import talib as ta\r\n from numpy import where\r\n\r\n len = length\r\n src = src\r\n ref = ref\r\n sqzLen = sqzLen\r\n df = datafame\r\n ema = ta.EMA\r\n\r\n \"\"\" Original code logic\r\n ma = ema(src, len)\r\n closema = close - ma\r\n refma = ema(src, ref) - ma\r\n sqzma = ema(src, sqzLen) - ma\r\n \"\"\"\r\n df['sqz_ma'] = ema(df[src], len)\r\n df['sqz_cma'] = df['close'] - df['sqz_ma']\r\n df['sqz_rma'] = ema(df[src], ref) - df['sqz_ma']\r\n df['sqz_sma'] = ema(df[src], sqzLen) - df['sqz_ma']\r\n\r\n \"\"\" Original code logic\r\n plotcandle(0, closema, 0, closema, color=closema >= 0?aqua: fuchsia)\r\n plotcandle(0, sqzma, 0, sqzma, color=sqzma >= 0?lime: red)\r\n\r\n plotcandle(0, refma, 0, refma, color=\r\n (refma >= 0 and closema < refma) or (refma < 0 and closema > refma) ? yellow: \r\n refma >= 0 ? green: maroon)\r\n \"\"\"\r\n\r\n # print(df[['sqz_cma', 'sqz_rma', 'sqz_sma']])\r\n\r\n def sqz_cma_c(x):\r\n if x['sqz_cma'] >= 0:\r\n x['sqz_cma_c'] = \"aqua\"\r\n return x['sqz_cma_c']\r\n else:\r\n x['sqz_cma_c'] = \"fuchsia\"\r\n return x['sqz_cma_c']\r\n\r\n df['sqz_cma_c'] = df.apply(sqz_cma_c, axis=1)\r\n\r\n def sqz_sma_c(x):\r\n if x['sqz_sma'] >= 0:\r\n x['sqz_sma_c'] = \"lime\"\r\n return x['sqz_sma_c']\r\n else:\r\n x['sqz_sma_c'] = \"red\"\r\n return x['sqz_sma_c']\r\n\r\n df['sqz_sma_c'] = df.apply(sqz_sma_c, axis=1)\r\n\r\n def sqz_rma_c(x):\r\n if x['sqz_rma'] >= 0 and x['sqz_cma'] < x['sqz_rma']:\r\n x['sqz_rma_c'] = \"yellow\"\r\n return x['sqz_rma_c']\r\n elif x['sqz_rma'] < 0 and x['sqz_cma'] > x['sqz_rma']:\r\n x['sqz_rma_c'] = \"yellow\"\r\n return x['sqz_rma_c']\r\n elif x['sqz_rma'] >= 0:\r\n x['sqz_rma_c'] = \"green\"\r\n return x['sqz_rma_c']\r\n else:\r\n x['sqz_rma_c'] = \"maroon\"\r\n return x['sqz_rma_c']\r\n\r\n df['sqz_rma_c'] = df.apply(sqz_rma_c, axis=1)\r\n\r\n # print(df[['sqz_cma_c', 'sqz_rma_c', 'sqz_sma_c']])\r\n return df['sqz_cma_c'], df['sqz_rma_c'], df['sqz_sma_c']\r\n\r\n\r\ndef stc(dataframe, fast=23, slow=50, length=10):\r\n import pandas as pd\r\n # First, the 23-period and the 50-period EMA and the MACD values are calculated:\r\n # EMA1 = EMA (Close, Short Length);\r\n # EMA2 = EMA (Close, Long Length);\r\n # MACD = EMA1 – EMA2.\r\n # Second, the 10-period Stochastic from the MACD values is calculated:\r\n # %K (MACD) = %KV (MACD, 10);\r\n # %D (MACD) = %DV (MACD, 10);\r\n # Schaff = 100 x (MACD – %K (MACD)) / (%D (MACD) – %K (MACD))\r\n\r\n import talib.abstract as ta\r\n\r\n MACD = ta.EMA(dataframe, timeperiod=fast) - ta.EMA(dataframe, timeperiod=slow)\r\n STOK = ((MACD - MACD.rolling(window=length).min()) / (\r\n MACD.rolling(window=length).max() - MACD.rolling(window=length).min())) * 100\r\n STOD = STOK.rolling(window=length).mean()\r\n dataframe['stc'] = 100 * (MACD - (STOK * MACD)) / ((STOD * MACD) - (STOK * MACD))\r\n\r\n return dataframe['stc']\r\n\r\n\r\ndef laguerre(dataframe, gamma=0.75, smooth=1, debug=bool):\r\n \"\"\"\r\n laguerre RSI\r\n Author Creslin\r\n Original Author: John Ehlers 1979\r\n\r\n\r\n :param dataframe: df\r\n :param gamma: Between 0 and 1, default 0.75\r\n :param smooth: 1 is off. Valid values over 1 are alook back smooth for an ema\r\n :param debug: Bool, prints to console\r\n :return: Laguerre RSI:values 0 to +1\r\n \"\"\"\r\n \"\"\"\r\n Laguerra RSI \r\n How to trade lrsi: (TL, DR) buy on the flat 0, sell on the drop from top,\r\n not when touch the top\r\n http://systemtradersuccess.com/testing-laguerre-rsi/\r\n\r\n http://www.davenewberg.com/Trading/TS_Code/Ehlers_Indicators/Laguerre_RSI.html\r\n \"\"\"\r\n import talib as ta\r\n import pandas as pd\r\n ema = ta.EMA\r\n\r\n df = dataframe\r\n g = gamma\r\n smooth = smooth\r\n debug = debug\r\n if debug:\r\n from pandas import set_option\r\n set_option('display.max_rows', 2000)\r\n set_option('display.max_columns', 8)\r\n\r\n \"\"\"\r\n Vectorised pandas or numpy calculations are not used\r\n in Laguerre as L0 is self referencing.\r\n Therefore we use an intertuples loop as next best option. \r\n \"\"\"\r\n lrsi_l = []\r\n L0, L1, L2, L3 = 0.0, 0.0, 0.0, 0.0\r\n for row in df.itertuples(index=True, name='lrsi'):\r\n \"\"\" Original Pine Logic Block1\r\n p = close\r\n L0 = ((1 - g)*p)+(g*nz(L0[1]))\r\n L1 = (-g*L0)+nz(L0[1])+(g*nz(L1[1]))\r\n L2 = (-g*L1)+nz(L1[1])+(g*nz(L2[1]))\r\n L3 = (-g*L2)+nz(L2[1])+(g*nz(L3[1])) \r\n \"\"\"\r\n # Feed back loop\r\n L0_1, L1_1, L2_1, L3_1 = L0, L1, L2, L3\r\n\r\n L0 = (1 - g) * row.close + g * L0_1\r\n L1 = -g * L0 + L0_1 + g * L1_1\r\n L2 = -g * L1 + L1_1 + g * L2_1\r\n L3 = -g * L2 + L2_1 + g * L3_1\r\n\r\n \"\"\" Original Pinescript Block 2 \r\n cu=(L0 > L1? L0 - L1: 0) + (L1 > L2? L1 - L2: 0) + (L2 > L3? L2 - L3: 0)\r\n cd=(L0 < L1? L1 - L0: 0) + (L1 < L2? L2 - L1: 0) + (L2 < L3? L3 - L2: 0)\r\n \"\"\"\r\n cu = 0.0\r\n cd = 0.0\r\n if (L0 >= L1):\r\n cu = L0 - L1\r\n else:\r\n cd = L1 - L0\r\n\r\n if (L1 >= L2):\r\n cu = cu + L1 - L2\r\n else:\r\n cd = cd + L2 - L1\r\n\r\n if (L2 >= L3):\r\n cu = cu + L2 - L3\r\n else:\r\n cd = cd + L3 - L2\r\n\r\n \"\"\"Original Pinescript Block 3 \r\n lrsi=ema((cu+cd==0? -1: cu+cd)==-1? 0: (cu/(cu+cd==0? -1: cu+cd)), smooth)\r\n \"\"\"\r\n if (cu + cd) != 0:\r\n lrsi_l.append(cu / (cu + cd))\r\n else:\r\n lrsi_l.append(0)\r\n\r\n return lrsi_l\r\n\r\n\r\ndef ichimoku(dataframe):\r\n \"Ichimoku cloud indicator\"\r\n\r\n from datetime import timedelta\r\n\r\n df = dataframe.copy()\r\n\r\n high_9 = df['high'].rolling(window=9).max()\r\n low_9 = df['low'].rolling(window=9).min()\r\n df['tenkan_sen'] = (high_9 + low_9) / 2\r\n\r\n high_26 = df['high'].rolling(window=26).max()\r\n low_26 = df['low'].rolling(window=26).min()\r\n df['kijun_sen'] = (high_26 + low_26) / 2\r\n\r\n # this is to extend the 'df' in future for 26 days\r\n # the 'df' here is numerical indexed df\r\n last_index = df.iloc[-1:].index[0]\r\n last_date = df['date'].iloc[-1].date()\r\n for i in range(26):\r\n df.loc[last_index + 1 + i, 'date'] = last_date + timedelta(days=i)\r\n\r\n df['senkou_span_a'] = ((df['tenkan_sen'] + df['kijun_sen']) / 2).shift(26)\r\n\r\n high_52 = df['high'].rolling(window=52).max()\r\n low_52 = df['low'].rolling(window=52).min()\r\n df['senkou_span_b'] = ((high_52 + low_52) / 2).shift(26)\r\n\r\n # most charting softwares dont plot this line\r\n df['chikou_span'] = df['close'].shift(-22) # sometimes -26\r\n\r\n return {\r\n 'tenkan_sen': df['tenkan_sen'],\r\n 'kijun_sen': df['kijun_sen'],\r\n 'senkou_span_a': df['senkou_span_a'],\r\n 'senkou_span_b': df['senkou_span_b'],\r\n 'chikou_span': df['chikou_span'],\r\n }\r\n\r\n\r\ndef ema(dataframe, period, field='close'):\r\n import talib.abstract as ta\r\n return ta.EMA(dataframe, timeperiod=period, price=field)\r\n\r\n\r\ndef tema(dataframe, period, field='close'):\r\n import talib.abstract as ta\r\n return ta.TEMA(dataframe, timeperiod=period, price=field)\r\n\r\n\r\ndef sma(dataframe, period, field='close'):\r\n import talib.abstract as ta\r\n return ta.SMA(dataframe, timeperiod=period, price=field)\r\n\r\n\r\ndef vpcii(dataframe, period_short=5, period_long=20, hist=8,hist_long=30):\r\n \"\"\"\r\n improved version of the vpcii\r\n\r\n\r\n :param dataframe:\r\n :param period_short:\r\n :param period_long:\r\n :param hist:\r\n :return:\r\n \"\"\"\r\n\r\n dataframe = dataframe.copy()\r\n dataframe['vpci'] = vpci(dataframe,period_short,period_long)\r\n dataframe['vpcis'] = dataframe['vpci'].rolling(hist).mean()\r\n dataframe['vpci_hist'] = (dataframe['vpci'] - dataframe['vpcis']).pct_change()\r\n\r\n return dataframe['vpci_hist'].abs()\r\n\r\ndef vpci(dataframe, period_short=5,period_long=20):\r\n \"\"\"\r\n volume confirming indicator as seen here\r\n\r\n https://www.tradingview.com/script/lmTqKOsa-Indicator-Volume-Price-Confirmation-Indicator-VPCI/\r\n\r\n\r\n should be used with bollinger bands, for deccision making\r\n :param dataframe:\r\n :param period_long:\r\n :param period_short:\r\n :return:\r\n \"\"\"\r\n\r\n vpc = vwma(dataframe, period_long) - sma(dataframe, period_long)\r\n vpr = vwma(dataframe, period_short) / sma(dataframe, period_short)\r\n vm = sma(dataframe, period_short,field='volume') / sma(dataframe, period_long,field='volume')\r\n\r\n vpci = vpc * vpr * vm\r\n\r\n return vpci\r\n\r\n\r\ndef williams_percent(dataframe):\r\n from pyti.williams_percent_r import williams_percent_r\r\n return williams_percent_r(dataframe['close'])\r\n\r\n\r\ndef momentum(dataframe, field='close', period=9):\r\n from pyti.momentum import momentum as m\r\n return m(dataframe[field], period)\r\n\r\n\r\ndef vwma(df, window):\r\n return df.apply(lambda x: x.close * x.volume, axis=1).rolling(window).sum() / df.volume.rolling(window).sum()\r\n\r\n\r\ndef ultimate_oscilator(dataframe):\r\n from pyti.ultimate_oscillator import ultimate_oscillator as uo\r\n uo(dataframe['close'], dataframe['low'])\r\n" ]
[ [ "pandas.Series", "pandas.DataFrame", "pandas.set_option", "pandas.Series.rolling_mean", "numpy.where" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
zhong-lab/optics
[ "9de1942d9a128183ecb3d360b160b27126e7b8f0", "9de1942d9a128183ecb3d360b160b27126e7b8f0" ]
[ "spyre/spyre/spyrelets/twopulsephotonecho_spyrelet.py", "spyre/spyre/plotting.py" ]
[ "import numpy as np\nimport pyqtgraph as pg\nimport time\nimport csv\nimport os\n\nfrom PyQt5.Qsci import QsciScintilla, QsciLexerPython\nimport matplotlib.pyplot as plt\n\nfrom spyre import Spyrelet, Task, Element\nfrom spyre.widgets.task import TaskWidget\nfrom spyre.plotting import LinePlotWidget\nfrom spyre.widgets.rangespace import Rangespace\nfrom spyre.widgets.param_widget import ParamWidget\nfrom spyre.widgets.repository_widget import RepositoryWidget\n\nfrom lantz import Q_\nimport time\n\nfrom lantz.drivers.keysight import Arbseq_Class\nfrom lantz.drivers.keysight.seqbuild import SeqBuild\n\nfrom lantz.drivers.keysight import Keysight_33622A\n\nclass TwoPulsePhotonEcho(Spyrelet):\n\trequires = {\n\t\t'fungen': Keysight_33622A\n\t\t# 'srs': SRS900\n\t}\n\tqutag = None\n\txs = np.array([])\n\tys= np.array([])\n\thist=[]\n\n\tdef configureQutag(self):\n\t\tqutagparams = self.qutag_params.widget.get()\n\t\tstart = qutagparams['Start Channel']\n\t\tstop = qutagparams['Stop Channel']\n\t\t##True = rising edge, False = falling edge. Final value is threshold voltage\n\t\tself.qutag.setSignalConditioning(start,self.qutag.SIGNALCOND_MISC,True,1)\n\t\tself.qutag.setSignalConditioning(stop,self.qutag.SIGNALCOND_MISC,True,0.1)\n\t\tself.qutag.enableChannels((start,stop))\n\n\tdef createHistogram(self,stoparray, timebase, bincount, totalWidth, tau):\n\t\tlowBound=1.9*tau\n\t\thighBound=2.1*tau\n\t\thist = [0]*bincount\n\t\tfor stoptime in stoparray:\n\t\t\tbinNumber = int(stoptime*timebase*bincount/(totalWidth))\n\t\t\tif binNumber >= bincount:\n\t\t\t\tcontinue\n\t\t\t\tprint('error')\n\t\t\telse:\n\t\t\t\thist[binNumber]+=1\n\t\tout_name = \"D:\\\\Data\\\\12.18.2019\\\\230_20dB\"\n\t\tx=[]\n\t\tfor i in range(bincount):\n\t\t\tx.append(i*totalWidth/bincount)\n\t\tnp.savez(os.path.join(out_name,str(int(round(tau*1e6,0)))),hist,x)\n\t\tprint('Data stored under File Name: ' + str(tau))\n\n\tdef createPlottingHist(self, stoparray, timebase, bincount, totalWidth):\n\t\tfor stoptime in stoparray:\n\t\t\tbinNumber = int(stoptime*timebase*bincount/(totalWidth))\n\t\t\tif binNumber >= bincount:\n\t\t\t\tcontinue\n\t\t\telse:\n\t\t\t\tself.hist[binNumber]+=1\n\n\tdef initHist(self, bincount):\n\t\tself.hist=[0]*bincount\n\n\t@Task()\n\tdef startpulse(self, timestep=1e-9):\n\t\tparams = self.pulse_parameters.widget.get()\n\t\ttau = params['start tau']\n\t\tperiod = params['period'].magnitude\n\t\trepeat_unit = params['repeat unit'].magnitude\n\t\tpulse_width = params['pulse width'].magnitude\n\t\tbuffer_time = params['buffer time'].magnitude\n\t\tshutter_offset = params['shutter offset'].magnitude\n\t\twholeRange=params['measuring range'].magnitude\n\n\t\tself.configureQutag()\n\t\tfor i in range(int((params['stop tau']-params['start tau'])/params['step tau'])+1):\n\t\t\txs = np.array([])\n\t\t\tys= np.array([])\n\t\t\thist=[]\n\t\t\tself.dataset.clear()\n\t\t\tself.fungen.output[1] = 'OFF'\n\t\t\tself.fungen.output[2] = 'OFF'\n\t\t\tself.fungen.clear_mem(1)\n\t\t\tself.fungen.clear_mem(2)\n\t\t\tself.fungen.wait()\n\t\t\t# self.srs.module_reset[5]\n\t\t\t# self.srs.SIM928_voltage[5]=params['srs bias'].magnitude+0.000000001*i\n\t\t\t# self.srs.SIM928_on[5]\n\n\t\t\t## build pulse sequence for AWG channel 1\n\t\t\tchn1buffer = Arbseq_Class('chn1buffer', timestep)\n\t\t\tchn1buffer.delays = [0]\n\t\t\tchn1buffer.heights = [0]\n\t\t\tchn1buffer.widths = [repeat_unit]\n\t\t\tchn1buffer.totaltime = repeat_unit\n\t\t\tchn1buffer.nrepeats = buffer_time/repeat_unit\n\t\t\tchn1buffer.repeatstring = 'repeat'\n\t\t\tchn1buffer.markerstring = 'lowAtStart'\n\t\t\tchn1buffer.markerloc = 0\n\t\t\tchn1bufferwidth = repeat_unit*chn1buffer.nrepeats\n\t\t\tchn1buffer.create_sequence()\n\n\t\t\tchn1pulse = Arbseq_Class('chn1pulse', timestep)\n\t\t\tchn1pulse.delays = [0]\n\t\t\tchn1pulse.heights = [1]\n\t\t\tchn1pulse.widths = [pulse_width]\n\t\t\tchn1pulse.totaltime = pulse_width\n\t\t\tchn1pulse.nrepeats = 0\n\t\t\tchn1pulse.repeatstring = 'once'\n\t\t\tchn1pulse.markerstring = 'highAtStartGoLow'\n\t\t\tchn1pulse.markerloc = 0\n\t\t\tchn1pulsewidth = pulse_width\n\t\t\tchn1pulse.create_sequence()\n\n\t\t\tchn1dc = Arbseq_Class('chn1dc', timestep)\n\t\t\tchn1dc.delays = [0]\n\t\t\tchn1dc.heights = [0]\n\t\t\tchn1dc.widths = [repeat_unit]\n\t\t\tchn1dc.totaltime = repeat_unit\n\t\t\tchn1dc.repeatstring = 'repeat'\n\t\t\tchn1dc.markerstring = 'lowAtStart'\n\t\t\tchn1dc.markerloc = 0\n\t\t\tchn1dcrepeats = int((tau.magnitude-1.5*pulse_width)/repeat_unit)\n\t\t\tchn1dc.nrepeats = chn1dcrepeats\n\t\t\tchn1dcwidth = repeat_unit*chn1dcrepeats\n\t\t\tprint(tau.magnitude, pulse_width, chn1dcrepeats)\n\t\t\tchn1dc.create_sequence()\n\t\t\n\t\t\tchn1pulse2 = Arbseq_Class('chn1pulse2', timestep)\n\t\t\tchn1pulse2.delays = [0]\n\t\t\tchn1pulse2.heights = [0]\n\t\t\tchn1pulse2.widths = [pulse_width*2]\n\t\t\tchn1pulse2.totaltime = pulse_width*2 \n\t\t\tchn1pulse2width = pulse_width*2\n\t\t\tchn1pulse2.nrepeats = 0\n\t\t\tchn1pulse2.repeatstring = 'once'\n\t\t\tchn1pulse2.markerstring = 'lowAtStart'\n\t\t\tchn1pulse2.markerloc = 0\n\t\t\tchn1pulse2.create_sequence()\n\t\t\n\t\t\tchn1pulse3 = Arbseq_Class('chn1pulse3', timestep)\n\t\t\tchn1pulse3.delays = [0]\n\t\t\tchn1pulse3.heights = [0]\n\t\t\tchn1pulse3.widths = [repeat_unit]\n\t\t\tchn1pulse3.totaltime = repeat_unit \n\t\t\tchn1pulse3width = shutter_offset\n\t\t\tchn1pulse3.nrepeats = shutter_offset/repeat_unit\n\t\t\tchn1pulse3.repeatstring = 'repeat'\n\t\t\tchn1pulse3.markerstring = 'lowAtStart'\n\t\t\tchn1pulse3.markerloc = 0\n\t\t\tchn1pulse3.create_sequence()\n\t\t\n\t\t\tchn1dc2 = Arbseq_Class('chn1dc2', timestep)\n\t\t\tchn1dc2.delays = [0]\n\t\t\tchn1dc2.heights = [0]\n\t\t\tchn1dc2.widths = [repeat_unit]\n\t\t\tchn1dc2.totaltime = repeat_unit\n\t\t\tchn1dc2.repeatstring = 'repeat'\n\t\t\tchn1dc2.markerstring = 'lowAtStart'\n\t\t\tchn1dc2repeats = int((period-chn1bufferwidth-chn1pulsewidth-chn1dcwidth-chn1pulse2width-chn1pulse3width)/repeat_unit)\n\t\t\tchn1dc2.nrepeats = chn1dc2repeats\n\t\t\tchn1dc2.markerloc = 0\n\t\t\t#print((chn1dc2repeats*params['repeat unit'].magnitude) + tau.magnitude + params['pulse width'].magnitude)\n\t\t\tprint(params['repeat unit'].magnitude*chn1dc2.nrepeats)\n\t\t\tchn1dc2.create_sequence()\n\n\t\t\t## build pulse sequence for AWG channel 2\n\t\t\tchn2buffer = Arbseq_Class('chn2buffer', timestep)\n\t\t\tchn2buffer.delays = [0]\n\t\t\tchn2buffer.heights = [1]\n\t\t\tchn2buffer.widths = [repeat_unit]\n\t\t\tchn2buffer.totaltime = repeat_unit\n\t\t\tchn2buffer.nrepeats = buffer_time/repeat_unit\n\t\t\tchn2buffer.repeatstring = 'repeat'\n\t\t\tchn2buffer.markerstring = 'lowAtStart'\n\t\t\tchn2buffer.markerloc = 0\n\t\t\tchn2bufferwidth = repeat_unit*chn2buffer.nrepeats\n\t\t\tchn2buffer.create_sequence()\n\n\t\t\tchn2pulse1 = Arbseq_Class('chn2pulse1', timestep)\n\t\t\tchn2pulse1.delays = [0]\n\t\t\tchn2pulse1.heights = [1]\n\t\t\tchn2pulse1.widths = [pulse_width]\n\t\t\tchn2pulse1.totaltime = pulse_width\n\t\t\tchn2pulse1width = pulse_width\n\t\t\tchn2pulse1.nrepeats = 0\n\t\t\tchn2pulse1.repeatstring = 'once'\n\t\t\tchn2pulse1.markerstring = 'highAtStartGoLow'\n\t\t\tchn2pulse1.markerloc = 0\n\t\t\tchn2pulse1.create_sequence()\n\n\t\t\tchn2dc1 = Arbseq_Class('chn2dc1', timestep)\n\t\t\tchn2dc1.delays = [0]\n\t\t\tchn2dc1.heights = [1]\n\t\t\tchn2dc1.widths = [repeat_unit]\n\t\t\tchn2dc1.totaltime = repeat_unit\n\t\t\tchn2dc1.repeatstring = 'repeat'\n\t\t\tchn2dc1.markerstring = 'lowAtStart'\n\t\t\tchn2dc1.markerloc = 0\n\t\t\tchn2dc1repeats = int((tau.magnitude-1.5*pulse_width)/repeat_unit)\n\t\t\tchn2dc1.nrepeats = chn2dc1repeats\n\t\t\tchn2dc1width = repeat_unit*chn2dc1repeats\n\t\t\tchn2dc1.create_sequence()\n\t\n\t\t\tchn2pulse2 = Arbseq_Class('chn2pulse2', timestep)\n\t\t\tchn2pulse2.delays = [0]\n\t\t\tchn2pulse2.heights = [1]\n\t\t\tchn2pulse2.widths = [pulse_width*2]\n\t\t\tchn2pulse2.totaltime = pulse_width*2\n\t\t\tchn2pulse2width = pulse_width*2\n\t\t\tchn2pulse2.nrepeats = 0\n\t\t\tchn2pulse2.repeatstring = 'once'\n\t\t\tchn2pulse2.markerstring = 'lowAtStart'\n\t\t\tchn2pulse2.markerloc = 0\n\t\t\tchn2pulse2.create_sequence()\n\n\t\t\tchn2pulse3 = Arbseq_Class('chn2pulse3', timestep)\n\t\t\tchn2pulse3.delays = [0]\n\t\t\tchn2pulse3.heights = [1]\n\t\t\tchn2pulse3.widths = [repeat_unit]\n\t\t\tchn2pulse3.totaltime = repeat_unit\n\t\t\tchn2pulse3width = shutter_offset\n\t\t\tchn2pulse3.nrepeats = shutter_offset/repeat_unit\n\t\t\tchn2pulse3.repeatstring = 'repeat'\n\t\t\tchn2pulse3.markerstring = 'lowAtStart'\n\t\t\tchn2pulse3.markerloc = 0\n\t\t\tchn2pulse3.create_sequence()\n\n\t\t\tchn2dc2 = Arbseq_Class('chn2dc2', timestep)\n\t\t\tchn2dc2.delays = [0]\n\t\t\tchn2dc2.heights = [-1]\n\t\t\tchn2dc2.widths = [repeat_unit]\n\t\t\tchn2dc2.totaltime = repeat_unit\n\t\t\tchn2dc2.repeatstring = 'repeat'\n\t\t\tchn2dc2.markerstring = 'lowAtStart'\n\t\t\tchn2dc2repeats = int((period-chn2bufferwidth-chn2pulse1width-chn2dc1width-chn2pulse2width-chn2pulse3width)/repeat_unit)\n\t\t\tchn2dc2.nrepeats = chn2dc2repeats\n\t\t\tchn2dc2.markerloc = 0\n\t\t\tprint(repeat_unit*chn2dc2.nrepeats)\n\t\t\tchn2dc2.create_sequence()\n\n\t\t\tself.fungen.send_arb(chn1buffer, 1)\n\t\t\tself.fungen.send_arb(chn1pulse, 1)\n\t\t\tself.fungen.send_arb(chn1dc, 1)\n\t\t\tself.fungen.send_arb(chn1pulse2, 1)\n\t\t\tself.fungen.send_arb(chn1pulse3, 1)\n\t\t\tself.fungen.send_arb(chn1dc2, 1)\n\t\t\tself.fungen.send_arb(chn2buffer, 2)\n\t\t\tself.fungen.send_arb(chn2pulse1, 2)\n\t\t\tself.fungen.send_arb(chn2dc1, 2)\n\t\t\tself.fungen.send_arb(chn2pulse2, 2)\n\t\t\tself.fungen.send_arb(chn2pulse3, 2)\n\t\t\tself.fungen.send_arb(chn2dc2, 2)\n\n\t\t\tseq = [chn1buffer, chn1pulse, chn1dc, chn1pulse2, chn1pulse3, chn1dc2]\n\t\t\tseq2 = [chn2buffer, chn2pulse1, chn2dc1, chn2pulse2, chn2pulse3, chn2dc2]\n\t\t\t\n\t\t\tself.fungen.create_arbseq('twoPulse', seq, 1)\n\t\t\tself.fungen.create_arbseq('shutter', seq2, 2)\n\t\t\tself.fungen.wait()\n\t\t\tself.fungen.voltage[1] = params['pulse height'].magnitude+0.000000000001*i\n\t\t\tself.fungen.voltage[2] = 7.1+0.0000000000001*i\n\t\t\t\n\t\t\tprint(self.fungen.voltage[1], self.fungen.voltage[2])\n\t\t\tself.fungen.output[2] = 'OFF'\n\t\t\tself.fungen.trigger_delay(1,shutter_offset)\n\t\t\tself.fungen.sync()\n\t\t\ttime.sleep(1)\n\t\t\tself.fungen.output[1] = 'ON'\n\t\t\t#self.fungen.output[2] = 'OFF'\n\t\t\ttime.sleep(1)\n\t\t\t\n\n\t\t\t##Qutag Part\n\t\t\tself.configureQutag()\n\t\t\tqutagparams = self.qutag_params.widget.get()\n\t\t\tlost = self.qutag.getLastTimestamps(True) # clear Timestamp buffer\n\t\t\tstoptimestamp = 0\n\t\t\tsynctimestamp = 0\n\t\t\tbincount = qutagparams['Bin Count']\n\t\t\ttimebase = self.qutag.getTimebase()\n\t\t\tstart = qutagparams['Start Channel']\n\t\t\tstop = qutagparams['Stop Channel']\n\t\t\tstoparray = []\n\t\t\ttempStopArray = []\n\t\t\thistCounter = 0\n\t\t\tquenchCounter = 0\n\t\t\tself.initHist(bincount)\n\t\t\tfor j in range(int(self.exp_parameters.widget.get()['# of Passes'])):\n\t\t\t\tlost = self.qutag.getLastTimestamps(True)\n\t\t\t\ttime.sleep(period)\n\t\t\t\ttimestamps = self.qutag.getLastTimestamps(True)\n\n\t\t\t\ttstamp = timestamps[0] # array of timestamps\n\t\t\t\ttchannel = timestamps[1] # array of channels\n\t\t\t\tvalues = timestamps[2] # number of recorded timestamps\n\t\t\t\t# print(values)\n\t\t\t\tfor k in range(values):\n\t\t\t\t\t# output all stop events together with the latest start event\n\t\t\t\t\t# if tchannel[k] == start:\n\t\t\t\t\t# \tsynctimestamp = tstamp[k]\n\t\t\t\t\tif tchannel[k]==stop:\n\t\t\t\t\t\t#stoptimestamp = tstamp[k]\n\t\t\t\t\t# if tstamp[k]*1e-6>2*tau.magnitude-1 and tstamp[k]*1e-6<2*tau.magnitude+2:\n\t\t\t\t\t\tstoparray.append(tstamp[k])\n\t\t\t\t\t\t#tempStopArray.append(stoptimestamp)\n\t\t\t\t# histCounter+=1\n\t\t\t\t# if histCounter%20==0:\n\t\t\t\t# \tself.createPlottingHist(tempStopArray, timebase, bincount,qutagparams['Total Hist Width Multiplier']*tau.magnitude)\n\t\t\t\t# \tself.xs = np.asarray(range(len(self.hist)))\n\t\t\t\t# \tself.ys=np.asarray(self.hist)\n\t\t\t\t# \tvalues = {\n\t\t\t\t# \t't': np.asarray(range(len(self.hist))),\n\t\t\t\t# \t'y': np.asarray(self.hist),\n\t\t\t\t# \t}\n\t\t\t\t# \tself.startpulse.acquire(values)\n\t\t\t\t# \ttempStopArray = []\n\t\t\t\t\t# TODO: quench protection\n\t\t\t\t\t# if self.srs.SIM928_voltage[???] >= qunech threshold and quenchCounter<=10:\n\t\t\t\t\t# \tself.srs.SIM928_off[6]\n\t\t\t\t\t# \ttime.sleep(period*10)\n\t\t\t\t\t# \tself.srs.SIM928_on[6]\n\t\t\t\t\t# \tquenchCounter+=1\n\t\t\t\t\t# elif quenchCounter>10:\n\t\t\t\t\t# \tprint('quenched more than 10 times')\n\t\t\t\t\t# \tbreak\n\t\t\t\t\t# else:\n\t\t\t\t\t# \tcontinue\n\t\t\t\t\t\n\t\t\tself.createHistogram(stoparray, timebase, bincount,wholeRange,tau.magnitude)\n\t\t\tprint(\"here\")\n\n\n\t\t\ttau+=params['step tau']\n\t\t\t#self.fungen.output[1] = 'OFF'\n\n\t@Task()\n\tdef qutagInit(self):\n\t\tprint('qutag successfully initialized')\n\n\t@Element(name='QuTAG Parameters')\n\tdef qutag_params(self):\n\t\tparams = [\n\t# ('arbname', {'type': str, 'default': 'arbitrary_name'}),,\n\t\t('Start Channel', {'type': int, 'default': 0}),\n\t\t('Stop Channel', {'type': int, 'default': 2}),\n\t\t('Total Hist Width Multiplier', {'type': int, 'default': 5}),\n\t\t('Bin Count', {'type': int, 'default': 1000})\n\t\t]\n\t\tw = ParamWidget(params)\n\t\treturn w\n\n\t@Element(name='Experiment Parameters')\n\tdef exp_parameters(self):\n\t\tparams = [\n\t# ('arbname', {'type': str, 'default': 'arbitrary_name'}),,\n\t\t('# of Passes', {'type': int, 'default': 100}),\n\t\t# ('File Name', {'type': str})\n\t\t]\n\t\tw = ParamWidget(params)\n\t\treturn w\n\n\t@Element(name='Histogram')\n\tdef averaged(self):\n\t\tp = LinePlotWidget()\n\t\tp.plot('Channel 1')\n\t\treturn p\n\n\[email protected](startpulse.acquired)\n\tdef averaged_update(self, ev):\n\t\tw = ev.widget\n\t\txs = self.xs\n\t\tys = self.ys\n\t\tw.set('Channel 1', xs=xs, ys=ys)\n\t\treturn\n\n\t@Element(name='Pulse parameters')\n\tdef pulse_parameters(self):\n\t\tparams = [\n\t# ('arbname', {'type': str, 'default': 'arbitrary_name'}),,\n\t\t('pulse height', {'type': float, 'default': 3, 'units':'V'}),\n\t\t('pulse width', {'type': float, 'default': 300e-9, 'units':'s'}),\n\t\t('period', {'type': float, 'default': 0.1, 'units':'s'}),\n\t\t('repeat unit', {'type': float, 'default': 50e-9, 'units':'s'}),\n\t\t('start tau', {'type': float, 'default': 3e-6, 'units':'s'}),\n\t\t('stop tau', {'type': float, 'default': 10e-6, 'units':'s'}),\n\t\t('step tau', {'type': float, 'default': 1e-6, 'units':'s'}),\n\t\t# ('srs bias', {'type': float, 'default': 1.2, 'units':'V'}),\n\t\t('shutter offset', {'type': float, 'default': 500e-9, 'units':'s'}),\n\t\t('measuring range', {'type': float, 'default': 70e-6, 'units':'s'}),\n\t\t('buffer time', {'type': float, 'default': 100e-6, 'units':'s'}),\n\t\t]\n\t\tw = ParamWidget(params)\n\t\treturn w\n\n\[email protected]\n\tdef initialize(self):\n\t\tfrom lantz.drivers.qutools import QuTAG\n\t\tself.qutag = QuTAG()\n\t\tdevType = self.qutag.getDeviceType()\n\t\tif (devType == self.qutag.DEVTYPE_QUTAG):\n\t\t\tprint(\"found quTAG!\")\n\t\telse:\n\t\t\tprint(\"no suitable device found - demo mode activated\")\n\t\tprint(\"Device timebase:\" + str(self.qutag.getTimebase()))\n\t\treturn\n\n\[email protected]\n\tdef finalize(self):\n\t\treturn\n\n\[email protected]\n\tdef initialize(self):\n\t\tself.fungen.output[1] = 'OFF'\n\t\tself.fungen.output[2] = 'OFF'\n\t\tself.fungen.clear_mem(1)\n\t\tself.fungen.clear_mem(2)\n\t\tself.fungen.wait()\n\n\[email protected]\n\tdef finalize(self):\n\t\tself.fungen.output[1] = 'OFF'\n\t\tself.fungen.output[2] = 'OFF'\n\t\tprint('Two Pulse measurements complete.')\n\t\treturn", "import itertools as it\n\nfrom PyQt5 import QtWidgets, QtCore\n\nimport pyqtgraph as pg\nfrom pyqtgraph.graphicsItems.GraphicsObject import GraphicsObject\nfrom pyqtgraph import functions as fn\n\npg.setConfigOptions(imageAxisOrder='row-major')\nimport numpy as np\n\nfrom .colormap import viridis\nfrom .colors import cyclic_colors, colors\nfrom .widgets.splitter_widget import Splitter, SplitterOrientation\n\nfrom spyre.repository import pd, Node\nfrom spyre.widgets.spinbox import SpinBox\n\nclass BasePlotWidget(QtWidgets.QWidget):\n\n def __init__(self, w=None, plot_item=None, parent=None):\n super().__init__(parent=parent)\n if w is None:\n w = pg.PlotWidget()\n if plot_item is None:\n plot_item = w.getPlotItem()\n self.w = w\n self.plot_item = plot_item\n self.xonbottom = True\n self.yonleft = True\n self.traces = dict()\n\n self._title = ''\n self._xlabel = ''\n self._ylabel = ''\n self.invertY = False\n self.invertX = False\n\n self.init_plot()\n\n return\n\n def init_plot(self):\n self.build_toolbox()\n\n splitter_config = {\n 'main_w': self.w,\n 'side_w': self.toolbox,\n 'orientation': SplitterOrientation.vertical_left_button,\n }\n splitter = Splitter(**splitter_config)\n\n def show_event(ev):\n h = splitter.size().height()\n splitter.setSizes([1, 0])\n splitter.resize(h, h)\n\n # Collaspe the tools to start\n splitter.showEvent = show_event\n\n layout = QtWidgets.QGridLayout()\n layout.addWidget(splitter)\n layout.setContentsMargins(0, 0, 0, 0)\n\n self.setLayout(layout)\n return\n\n def build_toolbox(self):\n # create default tools\n self.crosshairs = CrosshairAddon(self.plot_item)\n\n # create toolbox and add tools\n self.toolbox = QtWidgets.QToolBox()\n self.toolbox.addItem(self.crosshairs, \"Crosshairs\")\n return\n\n @property\n def title(self):\n return self._title\n\n @title.setter\n def title(self, _title):\n self._title = _title\n self.plot_item.setTitle(title=_title)\n return\n\n @property\n def xlabel(self):\n return self._xlabel\n\n @xlabel.setter\n def xlabel(self, _xlabel):\n self._xlabel = _xlabel\n pos = 'bottom' if self.xonbottom else 'top'\n self.plot_item.setLabel(pos, _xlabel)\n return\n\n @property\n def ylabel(self):\n return self._ylabel\n\n @ylabel.setter\n def ylabel(self, _ylabel):\n self._ylabel = _ylabel\n pos = 'left' if self.yonleft else 'right'\n self.plot_item.setLabel(pos, _ylabel)\n return\n\n @property\n def invertY(self):\n return self._invertY\n\n @invertY.setter\n def invertY(self, invert):\n self.plot_item.getViewBox().invertY(invert)\n self._invertY = invert\n return\n\n @property\n def invertX(self):\n return self._invertX\n\n @invertX.setter\n def invertX(self, invert):\n self.plot_item.getViewBox().invertX(invert)\n self._invertX = invert\n return\n\n def generate_node(self, name):\n # This must be implemented for each subclass\n return None\n\n def load_node(self, node):\n # This must be implemented for each subclass\n pass\n\n def generate_meta(self, **kwargs):\n d = {'xlabel':self._xlabel, 'ylabel':self._ylabel, 'title':self._title, 'invertX':self._invertX, 'invertY':self._invertY}\n d.update(kwargs)\n return d\n\n\n def load_meta(self, meta, params=['xlabel', 'ylabel', 'title', 'invertX', 'invertY']):\n for key in params:\n if key in meta and hasattr(self, key):\n setattr(self, key, meta[key])\n\nclass HeatmapPlotWidget(BasePlotWidget):\n\n def __init__(self, parent=None, cmap=None):\n plot_item = pg.PlotItem(enableMouse=False)\n w = pg.ImageView(view=plot_item)\n super().__init__(parent=parent, w=w, plot_item=plot_item)\n w.ui.roiBtn.clicked.connect(self._set_roi_pos)\n self.grid(False)\n gradient = self.w.ui.histogram.gradient\n if cmap is None:\n cmap = viridis\n gradient.setColorMap(cmap)\n for tick in gradient.ticks:\n tick.hide()\n gradient.setFixedWidth(300)\n # disable gradient editing\n gradient.mouseClickEvent = lambda ev: None\n self.add_plotting_options()\n self._pos = None\n self._scale = None\n\n self.invertY = True\n return\n\n def _set_roi_pos(self):\n #Tries to place the ROI in a good spot when clicking the button\n r = np.array(self.plot_item.getViewBox().viewRange())\n pos = r.mean(axis=1)\n size = np.diff(r, axis=1)[:,0]\n if any(abs(pos-self.w.roi.pos())>abs(size)):\n self.w.roi.setPos(pos)\n if any(2*size<self.w.roi.size()):\n self.w.roi.setSize(size/2)\n\n\n def add_plotting_options(self):\n layout = QtWidgets.QVBoxLayout()\n self.plot_opts_checkboxes = dict()\n for k,val in [('autoHistogramRange',False), ('autoLevels',True), ('autoRange',True)]:\n self.plot_opts_checkboxes[k] = w = QtWidgets.QCheckBox(k)\n w.setChecked(val)\n layout.addWidget(w)\n tool_w = QtWidgets.QWidget()\n tool_w.setLayout(layout)\n self.toolbox.addItem(tool_w, \"Plotting Options\")\n\n\n def grid(self, toggle, alpha=0.4):\n self.plot_item.showGrid(x=toggle, y=toggle, alpha=alpha)\n return\n\n @property\n def im_pos(self):\n return self._pos\n\n @im_pos.setter\n def im_pos(self, pos):\n self._pos = pos\n\n @property\n def im_scale(self):\n return self._scale\n\n @im_scale.setter\n def im_scale(self, scale):\n self._scale = scale\n\n def set(self, im):\n self.w.setImage(im, pos=self._pos, scale=self._scale,\n autoRange=self.plot_opts_checkboxes['autoRange'].isChecked(),\n autoLevels=self.plot_opts_checkboxes['autoLevels'].isChecked(),\n autoHistogramRange=self.plot_opts_checkboxes['autoHistogramRange'].isChecked(),)\n return\n\n def get(self):\n return self.w.getImageItem().image\n\n plot_type_str = '2DPlotWidget_v1.0'\n def generate_node(self, name):\n meta = self.generate_meta(BasePlotWidget_type=self.plot_type_str, im_scale=self.im_scale, im_pos=self.im_pos)\n return Node(name, dataframe=pd.DataFrame(self.w.getImageItem().image), metadata=meta)\n\n def load_node(self, node):\n meta = node.get_meta()\n if not 'BasePlotWidget_type' in meta or meta['BasePlotWidget_type'] != self.plot_type_str:\n raise Exception('Can only load data from <{}>'.format(self.plot_type_str))\n self.load_meta(meta, params=['xlabel', 'ylabel', 'title', 'im_pos', 'im_scale', 'invertX', 'invertY'])\n self.set(node.get_data().as_matrix())\n\n\nclass LinePlotWidget(BasePlotWidget):\n\n plots_updated = QtCore.pyqtSignal(list)\n\n def __init__(self, parent=None):\n super().__init__(parent=parent)\n self.legend = self.w.addLegend()\n self.grid(True)\n self._colors = it.cycle(cyclic_colors)\n self.install_fitter()\n return\n\n def install_fitter(self):\n self.fitter = FitterWidget(self.w)\n self.fitter.traces = self.traces\n self.toolbox.addItem(self.fitter, \"Fitter\")\n return\n\n def grid(self, toggle, alpha=0.4):\n self.w.showGrid(x=toggle, y=toggle, alpha=alpha)\n return\n\n def plot(self, tracename, **kwargs):\n symbol_pen = kwargs.get('symbol_pen', pg.mkPen(color=(255, 255, 255, 100)))\n symbol_brush = kwargs.get('symbol_brush', pg.mkBrush(color=(255, 255, 255, 100)))\n symbol_size = kwargs.get('symbol_size', 5)\n symbol = kwargs.get('symbol', 's')\n pen = kwargs.get('pen', pg.mkPen(color=(next(self._colors) + (200,)), width=1))\n antialias = kwargs.get('antialias', False)\n trace = self.w.plot(\n symbol=symbol,\n symbolSize=symbol_size,\n symbolPen=symbol_pen,\n symbolBrush=symbol_brush,\n pen=pen,\n antialias=antialias,\n )\n trace.curve.setClickable(True)\n trace_err = None\n self.legend.addItem(trace, tracename)\n self.traces[tracename] = trace, trace_err\n self.fitter.update_traces()\n return\n\n def remove_trace(self, tracename):\n for item in self.traces.pop(tracename):\n self.plot_item.removeItem(item)\n self.legend.removeItem(tracename)\n self.fitter.update_traces()\n\n def clear(self):\n for tracename in list(self.traces.keys()):\n self.remove_trace(tracename)\n self._colors = it.cycle(cyclic_colors)\n\n def set(self, tracename, **kwargs):\n trace, trace_err = self.traces[tracename]\n data = kwargs.get('data')\n xs = kwargs.get('xs')\n ys = kwargs.get('ys')\n yerrs = kwargs.get('yerrs')\n if not any(item is None for item in [data, xs, ys]):\n raise ValueError('No plot points supplied (either data or xs and ys must be given)')\n if data is not None:\n xs, ys = list(zip(*data))\n if not isinstance(xs, np.ndarray):\n xs = xs.values\n if not isinstance(ys, np.ndarray):\n ys = ys.values\n trace.setData(x=xs, y=ys)\n if yerrs is not None:\n if trace_err is None:\n error_bar_params = {\n 'x': xs,\n 'y': ys,\n 'top': 0,\n 'bottom': 0,\n 'beam': 0.0,\n 'pen': pg.mkPen(color=(255, 255, 255, 80)),\n }\n trace_err = pg.ErrorBarItem(**error_bar_params)\n trace_err.setZValue(-999)\n self.w.addItem(trace_err)\n self.traces[tracename] = trace, trace_err\n ylines = list(zip(*[(yerr / 2, yerr / 2) if not np.isnan(yerr) else (0, 0) for y, yerr in zip(ys, yerrs)]))\n if ylines:\n ybottoms, ytops = ylines\n trace_err.setData(x=xs, y=ys, top=ytops, bottom=ybottoms, beam=0.0)\n return\n\n def get(self, tracename):\n pditem, _ = self.traces[tracename]\n x, y = pditem.getData()\n if x is None or y is None:\n return None, None\n else:\n return x.astype(np.float), y.astype(np.float)\n\n plot_type_str = 'LinePlotWidget_v1.0'\n def generate_node(self, name):\n data = dict()\n max_size = 0\n #Extract all the data and figure out the maximum lenght of the array\n for tracename in self.traces:\n x,y = self.get(tracename)\n if not x is None:\n data[tracename+'_x'] = x[~np.isnan(x)]\n data[tracename+'_y'] = y[~np.isnan(y)]\n max_size = max(max_size, len(data[tracename+'_x']), len(data[tracename+'_y']))\n print(max_size)\n\n #Resize all the array to max_size\n for tracename in data:\n temp_data = data[tracename]\n if len(temp_data)<max_size:\n new_data = np.empty(max_size)\n new_data[:] = np.NaN\n new_data[:len(temp_data)] = temp_data\n data[tracename] = new_data\n\n meta = self.generate_meta(BasePlotWidget_type=self.plot_type_str)\n return Node(name, dataframe=pd.DataFrame(data), metadata=meta)\n\n def load_node(self, node):\n meta = node.get_meta()\n if not 'BasePlotWidget_type' in meta or meta['BasePlotWidget_type'] != self.plot_type_str:\n raise Exception('Can only load data from <{}>'.format(self.plot_type_str))\n df = node.get_data()\n cols = list(df.columns)\n self.clear()\n self.load_meta(meta)\n while len(cols)>0:\n tracename = cols[0][:-2]\n x_i, y_i = cols.index(tracename+'_x'), cols.index(tracename+'_y')\n if x_i>y_i:\n x_name, y_name = cols.pop(x_i), cols.pop(y_i)\n else:\n y_name, x_name = cols.pop(y_i), cols.pop(x_i)\n x, y = df[x_name].as_matrix(), df[y_name].as_matrix()\n\n #Remove NaN and resize if necessary\n x,y = x[~np.isnan(x)], y[~np.isnan(y)]\n if len(x)!=len(y):\n l = min(len(x), len(y))\n x,y = x[:l], y[:l]\n self.plot(tracename)\n self.set(tracename, xs=x, ys=y)\n\n def __iter__(self):\n for tracename in self.traces:\n yield tracename\n\n def __getitem__(self, tracename):\n return self.get(tracename)\n\n\n\nclass FastImageWidget(BasePlotWidget):\n\n def __init__(self, parent=None):\n graphic_view = pg.GraphicsView()\n plot_item = pg.PlotItem(enableMouse=False)\n self.img_item = pg.ImageItem()\n plot_item.addItem(self.img_item)\n graphic_view.setCentralItem(plot_item)\n super().__init__(parent=parent, w=graphic_view, plot_item=plot_item)\n\n def set(self, image=None, autoLevels=None, **kargs):\n self.img_item.setImage(image=image, autoLevels=autoLevels, **kargs)\n\n plot_type_str = '2DPlotWidget_v1.0'\n def generate_node(self, name):\n meta = self.generate_meta(BasePlotWidget_type=self.plot_type_str)\n return Node(name, dataframe=pd.DataFrame(self.img_item.image), metadata=meta)\n\n def load_node(self, node):\n meta = node.get_meta()\n if not 'BasePlotWidget_type' in meta or meta['BasePlotWidget_type'] != self.plot_type_str:\n raise Exception('Can only load data from <{}>'.format(self.plot_type_str))\n self.load_meta(meta)\n self.set(node.get_data().as_matrix())\n\n##---------------------------------------------------------------\n## Crosshairs\n##---------------------------------------------------------------\nclass Crosshair(QtCore.QObject):\n sigPositionChanged = QtCore.pyqtSignal(object)\n sigPositionChangeFinished = QtCore.pyqtSignal(object)\n\n def __init__(self, plot_item, pos, **kwargs):\n super().__init__()\n self.kwargs = kwargs\n self.moving = False\n self.hovering = False\n\n pen = fn.mkPen((255, 255, 0, 127))\n self.vLine = pg.InfiniteLine(angle=90, pen=pen, movable=False)\n self.hLine = pg.InfiniteLine(angle=0, pen=pen, movable=False)\n self.vLine.hoverEvent, self.hLine.hoverEvent = self.hoverEvent, self.hoverEvent\n self.vLine.mouseDragEvent, self.hLine.mouseDragEvent = self.mouseDragEvent, self.mouseDragEvent\n\n self.center_dot = pg.ScatterPlotItem(pos=[pos], pen=fn.mkPen((255,0,0, 127)), brush=(255,0,0), symbol='o', size=3)\n\n plot_item.addItem(self.vLine, ignoreBounds=True)\n plot_item.addItem(self.hLine, ignoreBounds=True)\n plot_item.addItem(self.center_dot, ignoreBounds=True)\n self.plot_item = plot_item\n self.set_pos(pos)\n\n def set_pos(self, pos, emit_sig=True):\n if isinstance(pos, QtCore.QPointF):\n self.pos = [pos.x(), pos.y()]\n else:\n self.pos = list(pos)\n self.vLine.setPos(self.pos[0])\n self.hLine.setPos(self.pos[1])\n self.center_dot.setData(pos=[pos])\n if emit_sig: self.sigPositionChanged.emit(self.get_pos())\n\n def mouseDragEvent(self, ev):\n if ev.button() == QtCore.Qt.LeftButton:\n if ev.isStart():\n self.moving = True\n ev.accept()\n\n if not self.moving:\n return\n self.set_pos(self.plot_item.vb.mapSceneToView(ev.scenePos()))\n if ev.isFinish():\n self.moving = False\n self.sigPositionChangeFinished.emit(self.get_pos())\n\n def hoverEvent(self, ev):\n if (not ev.isExit()) and ev.acceptDrags(QtCore.Qt.LeftButton):\n self.hovering = True\n for line in [self.vLine, self.hLine]: line.currentPen = fn.mkPen(255, 0,0)\n else:\n self.hovering = False\n for line in [self.vLine, self.hLine]: line.currentPen = line.pen\n for line in [self.vLine, self.hLine]:\n line.update()\n\n def get_pos(self):\n return self.pos\n\n def delete(self):\n self.hLine.deleteLater()\n self.vLine.deleteLater()\n self.center_dot.deleteLater()\n\n\nclass CrosshairAddon(QtWidgets.QWidget):\n sigCrosshairAdded = QtCore.pyqtSignal(object)\n sigCrosshairRemoved = QtCore.pyqtSignal(object)\n def __init__(self, plot_item, **kwargs):\n super().__init__(**kwargs)\n self.plot_item = plot_item\n self.cross_list = list()\n self._spinbox_decimals = 4\n\n self.build_ui()\n\n\n @property\n def spinbox_decimals(self):\n return self._spinbox_decimals\n\n @spinbox_decimals.setter\n def spinbox_decimals(self, val):\n if self._spinbox_decimals != val:\n for r in range(self.table.rowCount()):\n self.table.cellWidget(r,0).setDecimals(val)\n self.table.cellWidget(r,1).setDecimals(val)\n self._spinbox_decimals = val\n\n def build_ui(self):\n self.table = QtWidgets.QTableWidget()\n self.table.setColumnCount(3)\n self.table.setHorizontalHeaderLabels(['x','y','Delete'])\n\n #Add control (for now just an add button)\n add_btn = QtWidgets.QPushButton('+ Add')\n def add():\n r = np.array(self.plot_item.getViewBox().viewRange())\n self.add_crosshair(r.mean(axis=1))\n add_btn.clicked.connect(lambda: add())\n\n #Add a decimal precision box\n decimal_input = SpinBox(value=self.spinbox_decimals, minStep=1, dec=False, int=True, bounds=(0, None), step=1)\n decimal_input.valueChanged.connect(lambda x: setattr(self, 'spinbox_decimals', decimal_input.value()))\n\n ctrl_layout = QtWidgets.QFormLayout()\n ctrl_layout.addRow('Add Crosshair', add_btn)\n ctrl_layout.addRow('Floating point precision', decimal_input)\n ctrl_widget = QtWidgets.QWidget()\n ctrl_widget.setLayout(ctrl_layout)\n\n\n layout = QtWidgets.QGridLayout()\n layout.addWidget(ctrl_widget)\n layout.addWidget(self.table)\n layout.setContentsMargins(0, 0, 0, 0)\n self.setLayout(layout)\n\n def add_crosshair(self, pos, **kwargs):\n # Add the Crosshair to the list\n cross = Crosshair(self.plot_item, pos, **kwargs)\n self.cross_list.append(cross)\n\n # Add the table entry\n row = len(self.cross_list)-1\n self.table.insertRow(row)\n\n # Add the x,y widgets\n def update_pos(axis, value):\n if cross.moving:\n return\n cur = cross.get_pos()\n if axis==0:\n cross.set_pos([value, cur[1]], emit_sig=False)\n elif axis==1:\n cross.set_pos([cur[0], value], emit_sig=False)\n def lambda_gen(axis):\n return lambda obj: update_pos(axis, obj.value())\n\n for i in range(2):\n w = SpinBox(value = pos[i], dec=True, decimals=self.spinbox_decimals)\n self.table.setCellWidget(row, i, w)\n w.sigValueChanged.connect(lambda_gen(i))\n\n\n # Add a remove button\n btn = QtWidgets.QPushButton('X')\n btn.clicked.connect(lambda: self.remove_crosshair(cross))\n self.table.setCellWidget(row, 2, btn)\n\n # Link the position of the cross to the numbers in the table\n cross.sigPositionChanged.connect(lambda: self.update_table_entry(cross))\n self.sigCrosshairAdded.emit(cross)\n\n\n def _find_index(self, cross):\n for i in range(len(self.cross_list)):\n if self.cross_list[i] == cross:\n return i\n\n def update_table_entry(self, cross):\n row = self._find_index(cross)\n self.table.cellWidget(row, 0).setValue(cross.get_pos()[0])\n self.table.cellWidget(row, 1).setValue(cross.get_pos()[1])\n\n def remove_crosshair(self, cross):\n index = self._find_index(cross)\n self.table.removeRow(index)\n cross.delete()\n self.cross_list.pop(index)\n self.sigCrosshairRemoved.emit(index)\n\n\n def __getitem__(self, k):\n return self.cross_list[k].get_pos()\n\n def __iter__(self):\n for cross in self.cross_list:\n yield cross\n\n def __len__(self):\n return len(self.cross_list)\n\nfrom PyQt5 import Qsci, QtGui\nimport traceback\nimport inspect\nfrom scipy import optimize\n\nclass ExpressionEditor(Qsci.QsciScintilla):\n\n def __init__(self, parent=None):\n super().__init__(parent=parent)\n self.lexer = Qsci.QsciLexerPython()\n api = Qsci.QsciAPIs(self.lexer)\n api.prepare()\n self.setLexer(self.lexer)\n self.setAutoCompletionThreshold(1)\n self.setAutoCompletionSource(Qsci.QsciScintilla.AcsAPIs)\n self.init_ui()\n return\n\n def init_ui(self):\n # line highlighting\n self.setCaretLineVisible(True)\n self.setCaretLineBackgroundColor(QtGui.QColor(\"gainsboro\"))\n\n # autoindentation\n self.setAutoIndent(True)\n self.setIndentationGuides(True)\n self.setIndentationsUseTabs(True)\n self.setIndentationWidth(4)\n\n # margins\n self.setMarginsBackgroundColor(QtGui.QColor(\"gainsboro\"))\n self.setMarginsFont(QtGui.QFont(\"Consolas\", 9, 87))\n self.setMarginLineNumbers(1, True)\n self.setMarginLineNumbers(2, False)\n\n # editor font\n self.font = QtGui.QFont()\n self.font.setFamily('Consolas')\n self.font.setPointSize(10)\n self.font.setFixedPitch(True)\n self.lexer.setFont(self.font)\n\n # horiz scrollbar\n self.SendScintilla(self.SCI_SETHSCROLLBAR, 1)\n return\n\nclass FitterWidget(QtWidgets.QWidget):\n\n def __init__(self, w, parent=None):\n super().__init__(parent=parent)\n self.w = w\n self.traces = dict()\n self.fits = dict()\n self.init_ui()\n return\n\n def init_ui(self):\n self.plots = QtWidgets.QComboBox()\n self.editor = ExpressionEditor()\n self.update_traces()\n self.fit = QtWidgets.QPushButton('Fit')\n self.remove = QtWidgets.QPushButton('Remove fit')\n self.remove_all = QtWidgets.QPushButton('Remove all fits')\n self.fit.clicked.connect(self.compile_and_fit)\n self.remove.clicked.connect(self.remove_fit)\n self.remove_all.clicked.connect(self.remove_all_fits)\n layout = QtWidgets.QVBoxLayout()\n layout.addWidget(self.plots)\n layout.addWidget(self.editor)\n layout.addWidget(self.fit)\n layout.addWidget(self.remove)\n layout.addWidget(self.remove_all)\n self.setLayout(layout)\n self.debug_text()\n return\n\n def debug_text(self):\n text = (\"import numpy as np\\ndef test(xs, amplitude=1, phase=0, f=1, decay=1):\\n\"\n \" return amplitude * np.square(np.sin(f * xs + phase)) * np.exp(-xs / decay)\")\n self.editor.setText(text)\n return\n\n def update_traces(self):\n self.plots.clear()\n self.plots.addItems([plot_name for plot_name in sorted(self.traces.keys())])\n return\n\n def update_fits(self):\n for trace_name, data in self.fits.items():\n xs = data['xs']\n ys = data['ys']\n curve = data['curve']\n if curve is None:\n curve = self.w.plot(pen=pg.mkPen(color=colors['yellow'], width=1), antialias=True)\n data['curve'] = curve\n curve.setData(x=xs, y=ys)\n return\n\n def remove_fit(self):\n selected_trace_name = self.plots.currentText()\n try:\n fit_data = self.fits[selected_trace_name]\n fit = fit_data['curve']\n except KeyError:\n return\n self.w.removeItem(fit)\n del self.fits[selected_trace_name]\n return\n\n def remove_all_fits(self):\n for name in (self.plots.itemText(idx) for idx in range(self.plots.count())):\n try:\n fit_data = self.fits[name]\n fit = fit_data['curve']\n except KeyError:\n continue\n self.w.removeItem(fit)\n del self.fits[name]\n return\n\n def compile_and_fit(self):\n selected_trace_name = self.plots.currentText()\n editor_text = self.editor.text()\n try:\n code = compile(editor_text, '<string>', 'exec')\n except Exception as e:\n tb = e.__traceback__\n namespace = dict()\n exec(code, namespace)\n if not code.co_names:\n # raise something...no functions found\n return\n for name in code.co_names:\n try:\n f = namespace[name]\n except KeyError:\n continue\n try:\n sig = inspect.signature(f)\n except TypeError:\n continue\n var_params = list()\n for param_name, param in sig.parameters.items():\n if param.kind == param.POSITIONAL_OR_KEYWORD:\n if param.default == param.empty:\n continue\n var_params.append((param.name, param.default))\n\n trace, _ = self.traces[selected_trace_name]\n plot_xs, plot_ys = trace.xData, trace.yData\n x0 = [value for _, value in var_params]\n max_nfev = 10000\n try:\n popt, pcov = optimize.curve_fit(f, plot_xs, plot_ys, p0=x0)\n except RuntimeError:\n raise\n print(popt)\n fit_ys = f(plot_xs, *popt)\n self.fits[selected_trace_name] = {\n 'xs': plot_xs,\n 'ys': fit_ys,\n 'curve': self.fits.get(selected_trace_name, dict()).get('curve'),\n }\n self.update_fits()\n return\n" ]
[ [ "numpy.array" ], [ "numpy.isnan", "scipy.optimize.curve_fit", "numpy.diff", "numpy.empty" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] } ]
govindansriram/CobraML
[ "d231d2e446df7e7860071f5d7cfa1e31afa99c6b" ]
[ "Classification/LogisticModel.py" ]
[ "from torch import nn\nimport torch\n\n\nclass LogisticRegression(nn.Module):\n\n def __init__(self,\n theta_params: int):\n\n super(LogisticRegression, self).__init__()\n self.__linear = nn.Linear(theta_params, 1)\n self.__sigmoid_layer = nn.Sigmoid()\n\n def forward(self,\n x_input: torch.tensor) -> torch.tensor:\n\n return self.__sigmoid_layer(self.__linear(x_input))\n" ]
[ [ "torch.nn.Linear", "torch.nn.Sigmoid" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
octonion/betting
[ "d3be4dc3c3d2f5aa77006e5c9f388c1b79414efb" ]
[ "kelly/fractional.py" ]
[ "#!/usr/bin/env python3\n\nimport csv\nimport sys\n\nimport numpy as np\nimport pandas as pd\n\ncsv_name = sys.argv[1]\nfraction = float(sys.argv[2])\n\nwith open(sys.argv[1], 'r') as f:\n csv = csv.reader(f)\n for p in csv:\n ip = next(csv)\n m = int(p[0])\n \n # Our probabilities\n \n p = np.array([float(i) for i in p[1:]])\n n = int(ip[0])\n\n # Implied probabilities\n \n ip = np.array([float(i) for i in ip[1:]])\n print(\"Race:\",n)\n print()\n\n race = pd.DataFrame()\n\n race['p'] = p\n race['p*'] = ip\n race['r'] = race['p']/race['p*']\n race = race.sort_values(by=['r'], ascending=[False])\n race['bet'] = False\n\n p_total = 0.0\n ip_total = 0.0\n \n for i, row in race.iterrows():\n # Must be a positive hedge\n if (row['p'] > row['p*']*(1-p_total)/(1-ip_total)):\n race.at[i,'bet'] = True\n p_total = p_total + row['p']\n ip_total = ip_total + row['p*']\n else:\n break\n\n # Fractions as per binary Kelly\n\n race['f'] = 0.0\n for i, row in race.iterrows():\n if (row['bet']):\n race.at[i,'f'] = row['p']-row['p*']*(1-p_total)/(1-ip_total)\n\n f = p_total-(1-p_total)*ip_total/(1-ip_total)\n growth = 0.0\n for i, row in race.iterrows():\n if (row['bet']):\n growth += row['p']*np.log(1-f+row['f']/row['p*'])\n \n growth += (1-p_total)*np.log(1-f)\n #print(\"Expected log-growth =\",growth)\n\n # Optimal bet fraction is as per binary Kelly\n\n optimal_f = p_total - (1-p_total)*ip_total/(1-ip_total)\n print(\"Optimal Kelly fraction =\",optimal_f)\n\n # Optimal expected log growth is Kullback-Leibler divergence\n\n klg = 0.0\n for i, row in race.iterrows():\n if (row['bet']):\n klg = klg + row['p']*np.log(row['p']/row['p*'])\n\n klg = klg + (1-p_total)*np.log((1-p_total)/(1-ip_total))\n print(\"Kullback-Leibler growth =\",klg)\n\n print()\n print(\"Full Kelly optimal bets:\")\n print()\n print(race)\n print()\n\n # Fractional Kelly\n\n print(\"Fraction of optimal Kelly =\",fraction)\n f = fraction*optimal_f\n print(\"Fraction of bankroll =\",f)\n print()\n \n p_growth = 0.0\n for i, row in race.iterrows():\n if (row['bet']):\n p_growth += row['p']*np.log(1-f+fraction*row['f']/row['p*'])\n\n p_growth += (1-p_total)*np.log(1-f)\n\n print(\"Proportional fractional expected log-growth =\",p_growth)\n\n for i in reversed(race.index):\n if (race.at[i,'bet']) and (f*(race.at[i,'p']*(1-ip_total)/p_total+race.at[i,'p*'])+(race.at[i,'p']*ip_total/p_total-race.at[i,'p*']) < 0):\n race.at[i,'bet'] = False\n p_total = p_total-race.at[i,'p']\n ip_total = ip_total-race.at[i,'p*']\n\n growth = 0.0\n race['f'] = 0.0\n for i, row in race.iterrows():\n if (row['bet']):\n race.at[i,'f'] = f*(row['p']*(1-ip_total)/p_total+row['p*'])+(row['p']*ip_total/p_total-row['p*'])\n\n for i, row in race.iterrows():\n if (row['bet']):\n growth += row['p']*np.log(1-f+row['f']/row['p*'])\n\n growth += (1-p_total)*np.log(1-f)\n print(\"Optimal fractional expected log-growth =\",growth)\n\n print()\n print(\"Fractional Kelly optimal bets:\")\n print()\n print(race)\n print()\n" ]
[ [ "numpy.log", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
GroupLe/grouple-face-tagger
[ "5fd87c074dc50a5fc341e9f30774094a1616a87f", "5fd87c074dc50a5fc341e9f30774094a1616a87f" ]
[ "models/NER/models/lstm/token_dataset.py", "models/NER/reproduce/03-split_data.py" ]
[ "from torch.utils.data import Dataset\nimport numpy as np\nimport torch\nfrom . import functions\n\n\nclass TokensDataset(Dataset):\n def __init__(self, X, Y):\n self.X = self.encode_x(X)\n self.y = Y\n\n @staticmethod\n def encode_x(x: list) -> list:\n max_len = len(max(x, key=lambda i: len(i)))\n encoded = []\n for i in x:\n encoded.append(np.array(functions.encode(i, max_len)))\n return encoded\n\n @staticmethod\n def collate_fn(objs: list) -> (torch.LongTensor, torch.Tensor):\n data = ([i[0] for i in objs])\n labels = ([i[1] for i in objs])\n data = torch.LongTensor(data)\n labels = torch.tensor(labels)\n return data, labels\n\n def __len__(self):\n return len(self.y)\n\n def __getitem__(self, idx):\n return self.X[idx], self.y[idx]\n", "# Splits data for train-test\nfrom pathlib import Path\nfrom sklearn.model_selection import train_test_split\n\nTEST_PART = 0.2\nPATH = Path('../../../data/NER/processed/comments/augmented_10/')\n\nif __name__ == '__main__':\n data = open(PATH / 'raw.txt').read().split('\\n\\n')\n train, test = train_test_split(data, test_size=TEST_PART, random_state=2021)\n\n with open(PATH / 'train.txt', 'w') as f:\n f.write('\\n\\n'.join(train))\n with open(PATH / 'test.txt', 'w') as f:\n f.write('\\n\\n'.join(test))\n" ]
[ [ "torch.LongTensor", "torch.tensor" ], [ "sklearn.model_selection.train_test_split" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
MattiooFR/plugins
[ "90a686609fb5be2e83221c1f0e8fce18cb2b6021" ]
[ "v7/pyplots/pyplots.py" ]
[ "# -*- coding: utf-8 -*-\n\n# Copyright © 2012-2015 Roberto Alsina and others.\n\n# Permission is hereby granted, free of charge, to any\n# person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the\n# Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the\n# Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice\n# shall be included in all copies or substantial portions of\n# the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY\n# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR\n# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS\n# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR\n# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\nfrom hashlib import md5\nimport io\nimport os\n\nfrom docutils import nodes\nfrom docutils.parsers.rst import directives\nfrom docutils.parsers.rst.directives import images\n\ntry:\n import matplotlib\n import matplotlib._pylab_helpers\n import matplotlib.pyplot as plt\nexcept ImportError:\n matplotlib = None\n\nfrom nikola.plugin_categories import RestExtension\nfrom nikola.utils import req_missing, makedirs\n\n_site = None\n\n\nclass Plugin(RestExtension):\n\n name = \"pyplots\"\n\n def set_site(self, site):\n global _site\n _site = self.site = site\n directives.register_directive('plot', PyPlot)\n PyPlot.out_dir = os.path.join(site.config['OUTPUT_FOLDER'], 'pyplots')\n return super(Plugin, self).set_site(site)\n\n\npyplot_spec = images.Image.option_spec\npyplot_spec['include-source'] = directives.flag\n\n\nclass PyPlot(images.Image):\n \"\"\" Reimplementation of http://matplotlib.org/sampledoc/extensions.html#inserting-matplotlib-plots.\"\"\"\n\n has_content = True\n option_spec = pyplot_spec\n optional_arguments = 1\n required_arguments = 0\n\n def run(self):\n if matplotlib is None:\n msg = req_missing(['matplotlib'], 'use the plot directive', optional=True)\n return [nodes.raw('', '<div class=\"text-error\">{0}</div>'.format(msg), format='html')]\n\n if not self.arguments and not self.content:\n raise self.error('The plot directive needs either an argument or content.')\n\n if self.arguments and self.content:\n raise self.error('The plot directive needs either an argument or content, not both.')\n\n if self.arguments:\n plot_path = self.arguments[0]\n with io.open(plot_path, encoding='utf-8') as fd:\n data = fd.read()\n elif self.content:\n data = '\\n'.join(self.content)\n plot_path = md5(data).hexdigest()\n\n # Always reset context\n plt.close('all')\n matplotlib.rc_file_defaults()\n # Run plot\n exec(data)\n\n out_path = os.path.join(self.out_dir, plot_path + '.svg')\n plot_url = '/' + os.path.join('pyplots', plot_path + '.svg').replace(os.sep, '/')\n\n figures = [manager.canvas.figure for manager in matplotlib._pylab_helpers.Gcf.get_all_fig_managers()]\n for figure in figures:\n makedirs(os.path.dirname(out_path))\n figure.savefig(out_path, format='svg') # Yes, if there's more than one, it's overwritten, sucks.\n self.arguments = [plot_url]\n return super(PyPlot, self).run()\n" ]
[ [ "matplotlib._pylab_helpers.Gcf.get_all_fig_managers", "matplotlib.rc_file_defaults", "matplotlib.pyplot.close" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
sporreking/Evaluation-of-Synthetic-Face-Generation-Approaches
[ "480608524751c8f48445404ad92708133472cd0e", "480608524751c8f48445404ad92708133472cd0e" ]
[ "src/metric/FIDCompoundMetric.py", "src/util/AuxUtil.py" ]
[ "from src.metric.CompoundMetric import CompoundMetric\nfrom src.metric.SampleMetricManager import SampleMetricManager\nfrom src.core.Setupable import SetupMode\nfrom src.metric.CompoundMetricManager import CompoundMetricManager\nfrom typing import Any\nfrom cleanfid import fid\nfrom src.population.Population import Population\nfrom src.dataset.FFHQDataset import FFHQDataset\nimport numpy as np\nfrom pathlib import Path\n\nFID_NAME = \"FID\"\nFID_CALC_MODES = [\"clean\", \"legacy_tensorflow\", \"legacy_pytorch\"]\nFID_DEFAULT_CALC_MODE = \"clean\"\n\n# TODO: Fix this:\n#! Only works when equal to zero, gets pickle error otherwise\nNUM_WORKERS = 0\n\n\nclass FIDCompoundMetric(CompoundMetric):\n def __init__(\n self,\n cmm: CompoundMetricManager,\n smm: SampleMetricManager = None,\n ):\n \"\"\"\n Constructor for FIDCompoundMetric class, subclass of the CompoundMetric class.\n\n Args:\n cmm (CompoundMetricManager): Manager used by metrics. Population and dataset is derived\n from this manager.\n smm (SampleMetricManager, optional): Not used for this metric. Defaults to None.\n \"\"\"\n super(FIDCompoundMetric, self).__init__(FID_NAME, cmm, smm)\n\n # Init storage structure for this metric\n self._fid = dict()\n\n def reg_setup_modes(self) -> dict[str, SetupMode]:\n ds = self.get_dataset()\n return {\n f\"statistics_{fcm}_{ds.get_name(ds.get_resolution())}\": SetupMode(\n True,\n lambda _, fcm=fcm: self._setup(fcm),\n lambda fcm=fcm: self._is_ready(fcm),\n )\n for fcm in FID_CALC_MODES\n }\n\n def _setup(self, calc_mode: str = FID_DEFAULT_CALC_MODE) -> None:\n \"\"\"\n Setup the needed statistics to calculate the metric.\n\n Note that each dataset and `calc_mode` combination needs a calculated statistic.\n\n For more information regarding `calc_mode`, see the documentation for `calc()`.\n\n Args:\n calc_mode (str, optional): Calc mode determines FID implementation, different statistics\n needed for different implementations. See documentation on `calc()` for more information.\n Defaults to `FID_DEFAULT_CALC_MODE` (\"clean\").\n\n Raises:\n ValueError: Error when non-valid `calc_mode`, valid modes are defined by `FID_CALC_MODES`.\n \"\"\"\n # Check calc_mode\n if calc_mode not in FID_CALC_MODES:\n raise ValueError(\n f\"{calc_mode} not supported, supported modes: {FID_CALC_MODES}\"\n )\n\n # Calculate custom statistics\n ds = self.get_dataset()\n fid.make_custom_stats(\n ds.get_name(ds.get_resolution()), str(ds.get_image_dir()), mode=calc_mode\n )\n\n def _is_ready(self, calc_mode=FID_DEFAULT_CALC_MODE) -> bool:\n \"\"\"\n Checks if compound metric is ready for calculations.\n\n Args:\n calc_mode (str, optional): Calc mode determines FID implementation, different statistics\n needed for different implementations. See documentation on `calc()` for more information.\n Defaults to `FID_DEFAULT_CALC_MODE` (\"clean\").\n\n Raises:\n ValueError: Error when non-valid `calc_mode`, valid modes are defined by `FID_CALC_MODES`.\n\n Returns:\n bool: True if the compound metrics is ready for calculations.\n \"\"\"\n # Check calc_mode\n if calc_mode not in FID_CALC_MODES:\n raise ValueError(\n f\"{calc_mode} not supported, supported modes: {FID_CALC_MODES}\"\n )\n\n ds = self.get_dataset()\n\n if type(ds).get_resolution_invariant_name() == \"FFHQ\" and (\n ds.get_resolution() == 256 or ds.get_resolution() == 1024\n ):\n # pre-computed statistic by clean-fid\n return True\n else:\n return fid.test_stats_exists(ds.get_name(), calc_mode)\n\n def _move_filtered_files(self, source_files: list[Path]) -> Path:\n # Look for temp name not taken\n while True:\n target = (\n Population.POPULATION_ROOT_DIR\n / f\"temp_filtered_population{np.random.randint(10000,99999)}\"\n )\n if not (target.is_file() or target.is_dir()):\n break\n\n # Create temp directory\n Path.mkdir(target)\n\n # Move filtered files to temp directory\n for image_file in source_files:\n image_file.rename(target.joinpath(image_file.name))\n\n return target\n\n def _move_filtered_files_back(self, source: Path) -> None:\n target = Population.POPULATION_ROOT_DIR / self.get_population().get_name()\n # Check if source file exists\n if not source.exists():\n raise FileNotFoundError(f\"Could not find file: '{source.absolute()}'\")\n\n # Check if target file exists\n if not target.exists():\n raise FileNotFoundError(f\"Could not find file: '{target.absolute()}'\")\n\n # Move filtered files back to population directory\n for image_file in source.glob(\"*\"):\n image_file.rename(target.joinpath(image_file.name))\n\n # Remove temp directory\n source.rmdir()\n\n def calc(self, filter_bit: int = 1, **parameters: Any) -> Any:\n \"\"\"\n Calculates the FID given the dataset and the population.\n\n No setup is needed for FFHQ 256/1024, if using other custom datasets `calc()`\n requires the user to run `setup()` first.\n\n Args:\n filter_bit (int, optional): Filter bit used to select a subset of the\n population. Defaults to 1 (IdentityFilter).\n calc_mode (str, optional): Either \"clean\", \"legacy_tensorflow, or \"legacy_pytorch\".\n This decides how the FID score should be calculated, i.e., using clean-fid,\n regular tensorflow implementation, or pytorch implementation. Default is \"clean\" (clean-fid).\n Raises:\n ValueError: Error when non-valid `calc_mode`, valid modes are defined by `FID_CALC_MODES`.\n ValueError: Error when the name of the dataset in conjunction with the\n specified `calc_mode` don't have a pre-computed statistic.\n RuntimeError: When clean-fid library gets an error.\n FileNotFoundError: When incorrect path is provided when moving files.\n Returns:\n Any: The FID value.\n \"\"\"\n # Fetch parameters\n calc_mode = self._check_calc_mode(parameters)\n\n # Move files to temp folder\n uris = self._population.get_filtered_data(filter_bit)[\n self._population.COLUMN_URI\n ]\n uris = [Path(str_path) for str_path in list(uris)]\n pop_path = self._move_filtered_files(uris)\n\n # Get variables for use in FID\n ds = self.get_dataset()\n resolution = ds.get_resolution()\n fid_score = None\n\n if type(ds).get_resolution_invariant_name() == \"FFHQ\" and (\n ds.get_resolution() == 256 or ds.get_resolution() == 1024\n ):\n # Use pre-computed statistic by clean-fid\n try:\n fid_score = fid.compute_fid(\n str(pop_path),\n dataset_name=type(ds).get_resolution_invariant_name(),\n dataset_res=resolution,\n mode=calc_mode,\n dataset_split=\"trainval70k\",\n num_workers=NUM_WORKERS,\n )\n except Exception as error:\n self._move_filtered_files_back(pop_path)\n print(\"Something went wrong when calculating FID using clean-fid.\")\n print(repr(error))\n raise\n else:\n # Use custom pre-computed statistic\n dataset_name = ds.get_name(resolution)\n\n # Check if statistic exists\n if fid.test_stats_exists(dataset_name, calc_mode):\n try:\n fid_score = fid.compute_fid(\n str(pop_path),\n dataset_name=dataset_name,\n mode=calc_mode,\n dataset_split=\"custom\",\n num_workers=NUM_WORKERS,\n )\n except Exception as error:\n # Move back files\n self._move_filtered_files_back(pop_path)\n print(\"Something went when calculating FID using clean-fid.\")\n print(repr(error))\n raise\n\n else:\n # Move back files\n self._move_filtered_files_back(pop_path)\n raise ValueError(\n f\"Statistic named '{dataset_name}' with `calc_mode` '{calc_mode}'\"\n \" has no statistic. Double check `calc_mode` or run 'setup()'\"\n )\n\n # Move back files\n self._move_filtered_files_back(pop_path)\n\n # Save result\n self._fid[calc_mode] = fid_score\n return fid_score\n\n def get(self, calc_if_missing: bool = False, **parameters: Any) -> Any:\n # Check parameters\n calc_mode = self._check_calc_mode(parameters)\n\n # Check if metric already calculated\n if calc_mode in self._fid.keys() and self._fid[calc_mode] is not None:\n return self._fid[calc_mode]\n\n # Check if calculate when missing\n elif calc_if_missing:\n return self.calc(**parameters)\n\n else:\n return None\n\n def print_result(self) -> None:\n for calc_mode, fid_score in self._fid.items():\n print(calc_mode + \" FID: \", fid_score)\n\n def plot_result(self) -> None:\n pass\n\n def _check_calc_mode(self, parameters) -> str:\n # Fetch parameters\n if \"calc_mode\" in parameters.keys():\n calc_mode = parameters[\"calc_mode\"]\n\n # Check calc_mode\n if calc_mode not in FID_CALC_MODES:\n raise ValueError(\n f\"{calc_mode} not supported, supported modes: {FID_CALC_MODES}\"\n )\n else:\n calc_mode = FID_DEFAULT_CALC_MODE\n return calc_mode\n", "from collections import OrderedDict\nfrom pathlib import Path\nimport re\nfrom typing import Tuple, List\nimport inspect\nfrom src.util.FileJar import FileJar\n\nfrom matplotlib import pyplot as plt\nimport torch\nimport numpy as np\n\n\nTRAIN_LOSS_STATE_KEY = \"train_loss\"\nVALID_LOSS_STATE_KEY = \"valid_loss\"\nEPOCH_STATE_KEY = \"epoch\"\nBATCH_STATE_KEY = \"batch\"\nNUM_BATCHES_PER_EPOCH_KEY = \"num_batches_per_epoch\"\n\nSAVE_FILE_EXT = \"pt\"\n\n_file_jar = FileJar(Path(\"auxiliary\"), create_root_dir=True)\n\n\nclass AuxModelInfo:\n \"\"\"\n Used to pass auxiliary model data.\n \"\"\"\n\n def __init__(\n self,\n state: OrderedDict,\n epoch: int,\n batch: int,\n num_batches_per_epoch: int,\n train_loss: float,\n valid_loss: float,\n ):\n \"\"\"\n Constructs a new AuxModelInfo instace for carying model metadata.\n\n Args:\n state (OrderedDict[str, torch.Tensor]): The state_dict of the model to save.\n epoch (int): The epoch with which the specified `state` is associated.\n batch (int): The batch with which the specified `state` is associated.\n num_batches_per_epoch (int): Number of batches per epoch.\n train_loss (float): The training loss score of the specified `state`.\n valid_loss (float): The validation loss score of the specified `state`.\n \"\"\"\n self._state = state\n self._epoch = epoch\n self._batch = batch\n self._num_batches_per_epoch = num_batches_per_epoch\n self._train_loss = train_loss\n self._valid_loss = valid_loss\n\n @property\n def state(self) -> OrderedDict:\n \"\"\"\n Get the state_dict of the auxiliary model.\n \"\"\"\n return self._state\n\n @property\n def epoch(self) -> int:\n \"\"\"\n Get the epoch with which the `state` is associated.\n \"\"\"\n return self._epoch\n\n @property\n def batch(self) -> int:\n \"\"\"\n Get the batch with which the `state` is associated.\n \"\"\"\n return self._batch\n\n @property\n def num_batches_per_epoch(self) -> int:\n \"\"\"\n Get the number of batches per epoch.\n \"\"\"\n return self._num_batches_per_epoch\n\n @property\n def train_loss(self) -> float:\n \"\"\"\n Get the training loss score.\n \"\"\"\n return self._train_loss\n\n @property\n def valid_loss(self) -> float:\n \"\"\"\n Get the validation loss score.\n \"\"\"\n return self._valid_loss\n\n\ndef _full_name(name: str, epoch: int, batch: int) -> str:\n return f\"{name}_e{epoch}_b{batch}.{SAVE_FILE_EXT}\"\n\n\ndef _is_epoch_batch_format(name: str) -> bool:\n return bool(re.match(r\"^[A-Za-z0-9_\\.]+_e[0-9]+_b[0-9]+\\.pt$\", name))\n\n\ndef _epoch_batch_from_full_name(full_name: str) -> Tuple[int, int]:\n e, b = full_name.split(\"_\")[-2:]\n return (int(e[1:]), int(b.split(\".\")[0][1:]))\n\n\ndef _best_name(name: str) -> str:\n return f\"{name}_best.{SAVE_FILE_EXT}\"\n\n\ndef _aux_save_func(info: AuxModelInfo):\n return (\n (lambda p: torch.save(info.state, p))\n if \"_use_new_zipfile_serialization\"\n not in inspect.signature(torch.save).parameters\n else (lambda p: torch.save(info.state, p, _use_new_zipfile_serialization=False))\n )\n\n\ndef _load_aux_with_full_name(\n full_name: str,\n) -> AuxModelInfo:\n state = _file_jar.get_file(full_name, lambda p: torch.load(p, map_location=\"cpu\"))\n\n # Check if load failed\n if state is None:\n return None\n\n epoch = state[EPOCH_STATE_KEY].item()\n batch = state[BATCH_STATE_KEY].item()\n num_batches_per_epoch = state[NUM_BATCHES_PER_EPOCH_KEY].item()\n train_loss = state[TRAIN_LOSS_STATE_KEY].item()\n valid_loss = state[VALID_LOSS_STATE_KEY].item()\n\n del state[EPOCH_STATE_KEY]\n del state[BATCH_STATE_KEY]\n del state[NUM_BATCHES_PER_EPOCH_KEY]\n del state[TRAIN_LOSS_STATE_KEY]\n del state[VALID_LOSS_STATE_KEY]\n\n return AuxModelInfo(\n state, epoch, batch, num_batches_per_epoch, train_loss, valid_loss\n )\n\n\ndef delete_aux(name: str) -> None:\n \"\"\"\n Deletes all saved instances of the specified auxiliary model from the disk.\n\n Args:\n name (str): The name of the auxiliary model to delete.\n \"\"\"\n for p in _file_jar.iterdir():\n if (\n _is_epoch_batch_format(p.name) and \"_\".join(p.name.split(\"_\")[:-2]) == name\n ) or p.name == _best_name(name):\n p.unlink(missing_ok=True)\n\n\ndef save_aux(\n name: str,\n info: AuxModelInfo,\n save_best: bool = True,\n) -> None:\n \"\"\"\n Associates the specified `name` with an auxiliary model `state`,\n and saves it to disk for the specified `epoch` and `batch`. The\n provided training and validation loss scores are also saved.\n\n Args:\n name (str): The name of the model to save.\n info (AudModelInfo): Information about the model to save.\n save_best (bool, optional): If `True`, the model will be saved as the \"best\"\n model if its validation loss score is lower than that of the previous\n best one. Defaults to True.\n \"\"\"\n\n # Add loss scores to output\n info.state[EPOCH_STATE_KEY] = torch.tensor(info.epoch)\n info.state[BATCH_STATE_KEY] = torch.tensor(info.batch)\n info.state[NUM_BATCHES_PER_EPOCH_KEY] = torch.tensor(info.num_batches_per_epoch)\n info.state[TRAIN_LOSS_STATE_KEY] = torch.tensor(info.train_loss)\n info.state[VALID_LOSS_STATE_KEY] = torch.tensor(info.valid_loss)\n\n # Save model\n _file_jar.store_file(\n _full_name(name, info.epoch, info.batch),\n _aux_save_func(info),\n )\n\n # Save model as best if applicable\n if save_best:\n best = load_aux_best(name)\n if best is None or info.valid_loss < best.valid_loss:\n _file_jar.store_file(\n _best_name(name),\n _aux_save_func(info),\n )\n\n\ndef load_aux(name: str, epoch: int, batch: int) -> AuxModelInfo:\n \"\"\"\n Loads the auxiliary model associated with the specified `name`,\n `epoch`, and `batch`. Metadata such as loss scores and epoch / batch\n info is also loaded.\n\n Args:\n name (str): The name of the model to load.\n epoch (int): The epoch to load from.\n batch (int): The batch to load from.\n\n Returns:\n AuxModelInfo: Information about the loaded model.\n If no such model exists, `None` is returned instead.\n \"\"\"\n return _load_aux_with_full_name(_full_name(name, epoch, batch))\n\n\ndef load_aux_best(\n name: str,\n) -> AuxModelInfo:\n \"\"\"\n Loads the best auxiliary model associated with the specified `name`.\n Metadata such as loss scores and epoch / batch info is also loaded.\n\n Args:\n name (str): The name of the model to load.\n\n Returns:\n AuxModelInfo: Information about the loaded model.\n If no best model exists, `None` is returned instead.\n \"\"\"\n return _load_aux_with_full_name(_best_name(name))\n\n\ndef get_available_aux_epoch_batch_pairs(name: str) -> List[Tuple[int, int]]:\n \"\"\"\n Returns tuples of available epoch and batch versions of the auxiliary\n model associated with the specified name.\n\n Returns:\n list[tuple[int, int]]: Tuples of (epoch, batch) format.\n \"\"\"\n return [\n _epoch_batch_from_full_name(path.name)\n for path in _file_jar.iterdir()\n if _is_epoch_batch_format(path.name) and path.name.startswith(name)\n ]\n\n\ndef plot_aux_loss(name: str, title: str = None):\n \"\"\"\n Plots the training and validation loss for the auxiliary model\n associated with the specified name.\n\n Args:\n name (str): The name of the model to plot for.\n title (str, optional): A title to use for the plot, or `None`\n if no title should be used. Note that the suffix `\" Training\"`\n will be appended to the specified title. Defaults to None.\n\n Raises:\n ValueError: If there exists no model with the specified name.\n \"\"\"\n\n # Get available versions\n epoch_batch_pairs = get_available_aux_epoch_batch_pairs(name)\n\n # Sanity check\n if not epoch_batch_pairs:\n raise ValueError(f\"No auxiliary model with name: '{name}'\")\n\n # Load data\n epochs = []\n train_loss_scores = []\n valid_loss_scores = []\n for e, b in epoch_batch_pairs:\n info = load_aux(name, e, b)\n\n # Derive the epoch points\n epochs.append((info.epoch - 1) + (info.batch) / info.num_batches_per_epoch)\n\n # Extract the loss scores and sort according to epoch order\n train_loss_scores.append(info.train_loss)\n valid_loss_scores.append(info.valid_loss)\n\n # Sort / convert data\n epochs = np.array(epochs)\n order = epochs.argsort()\n epochs = epochs[order]\n train_loss_scores = np.array(train_loss_scores)[order]\n valid_loss_scores = np.array(valid_loss_scores)[order]\n\n # Plot scores\n plt.plot(epochs, train_loss_scores, label=\"Training Loss Score\")\n plt.plot(epochs, valid_loss_scores, label=\"Validation Loss Score\")\n if title is not None:\n plt.title(f\"{title} Training\")\n plt.ylabel(\"Loss\")\n plt.xlabel(\"Epoch\")\n plt.legend(loc=\"upper right\")\n plt.show()\n\n\ndef get_file_jar() -> FileJar:\n \"\"\"\n Returns the file jar used by the model util.\n\n Returns:\n FileJar: The file jar used by the model util.\n \"\"\"\n return _file_jar\n" ]
[ [ "numpy.random.randint" ], [ "matplotlib.pyplot.legend", "matplotlib.pyplot.title", "torch.load", "torch.tensor", "matplotlib.pyplot.plot", "torch.save", "matplotlib.pyplot.xlabel", "numpy.array", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
kif/pyFAI
[ "67226eb0adefbfd3fc5f7576a90a17bbc6bfb351" ]
[ "pyFAI/goniometer.py" ]
[ "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Project: Fast Azimuthal integration\n# https://github.com/silx-kit/pyFAI\n#\n# Copyright (C) 2017-2021 European Synchrotron Radiation Facility, Grenoble, France\n#\n# Principal author: Jérôme Kieffer ([email protected])\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n# .\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n# .\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\n\"\"\"Everything you need to calibrate a detector mounted on a goniometer or any\ntranslation table\n\"\"\"\n\n__author__ = \"Jérôme Kieffer\"\n__contact__ = \"[email protected]\"\n__license__ = \"MIT\"\n__copyright__ = \"European Synchrotron Radiation Facility, Grenoble, France\"\n__date__ = \"19/08/2021\"\n__status__ = \"development\"\n__docformat__ = 'restructuredtext'\n\nimport os\nimport logging\nimport json\nimport numpy\nfrom collections import OrderedDict, namedtuple\nfrom scipy.optimize import minimize\nfrom silx.image import marchingsquares\nfrom .massif import Massif\nfrom .control_points import ControlPoints\nfrom .detectors import detector_factory, Detector\nfrom .geometry import Geometry\nfrom .geometryRefinement import GeometryRefinement\nfrom .azimuthalIntegrator import AzimuthalIntegrator\nfrom .utils import StringTypes\nfrom .multi_geometry import MultiGeometry\nfrom .units import CONST_hc, CONST_q\n\nlogger = logging.getLogger(__name__)\n\ntry:\n import numexpr\nexcept ImportError:\n logger.debug(\"Backtrace\", exc_info=True)\n numexpr = None\n\n# Parameter set used in PyFAI:\nPoniParam = namedtuple(\"PoniParam\", [\"dist\", \"poni1\", \"poni2\", \"rot1\", \"rot2\", \"rot3\"])\n\n\nclass BaseTransformation(object):\n \"\"\"This class, once instanciated, behaves like a function (via the __call__\n method). It is responsible for taking any input geometry and translate it\n into a set of parameters compatible with pyFAI, i.e. a tuple with:\n (dist, poni1, poni2, rot1, rot2, rot3)\n\n This class relies on a user provided function which does the work.\n \"\"\"\n\n def __init__(self, funct, param_names, pos_names=None):\n \"\"\"Constructor of the class\n\n :param funct: function which takes as parameter the param_names and the pos_name\n :param param_names: list of names of the parameters used in the model\n :param pos_names: list of motor names for gonio with >1 degree of freedom\n \"\"\"\n self.callable = funct\n self.variables = {}\n self.param_names = tuple(param_names)\n if pos_names is not None:\n self.pos_names = tuple(pos_names)\n else:\n self.pos_names = (\"pos\",)\n for key in self.param_names + self.pos_names:\n if key in self.variables:\n raise RuntimeError(\"The keyword %s is already defined, please chose another variable name\")\n self.variables[key] = numpy.NaN\n self.codes = {}\n\n def __call__(self, param, pos):\n \"\"\"This makes the class instance behave like a function,\n actually a function that translates the n-parameter of the detector\n positioning on the goniometer and the m-parameters.\n :param param: parameter of the fit\n :param pos: position of the goniometer (representation from the\n goniometer)\n :return: 6-tuple with (dist, poni1, poni2, rot1, rot2, rot3) as needed\n for pyFAI.\n \"\"\"\n variables = self.variables.copy()\n for name, value in zip(self.param_names, param):\n variables[name] = value\n if len(self.pos_names) == 1:\n variables[self.pos_names[0]] = pos\n else:\n for name, value in zip(self.pos_names, pos):\n variables[name] = value\n\n res = self.callable(**variables)\n return PoniParam(*res)\n\n def __repr__(self):\n return \"BaseTransformation with param: %s and pos: %s\" % (self.param_names, self.pos_names)\n\n def to_dict(self):\n \"\"\"Export the instance representation for serialization as a dictionary\n \"\"\"\n raise RuntimeError(\"BaseTransformation is not serializable\")\n\n\nclass GeometryTransformation(object):\n \"\"\"This class, once instanciated, behaves like a function (via the __call__\n method). It is responsible for taking any input geometry and translate it\n into a set of parameters compatible with pyFAI, i.e. a tuple with:\n (dist, poni1, poni2, rot1, rot2, rot3)\n This function uses numexpr for formula evaluation.\n \"\"\"\n\n def __init__(self, dist_expr, poni1_expr, poni2_expr,\n rot1_expr, rot2_expr, rot3_expr,\n param_names, pos_names=None, constants=None,\n content=None):\n \"\"\"Constructor of the class\n\n :param dist_expr: formula (as string) providing with the dist\n :param poni1_expr: formula (as string) providing with the poni1\n :param poni2_expr: formula (as string) providing with the poni2\n :param rot1_expr: formula (as string) providing with the rot1\n :param rot2_expr: formula (as string) providing with the rot2\n :param rot3_expr: formula (as string) providing with the rot3\n :param param_names: list of names of the parameters used in the model\n :param pos_names: list of motor names for gonio with >1 degree of freedom\n :param constants: a dictionary with some constants the user may want to use\n :param content: Should be None or the name of the class (may be used\n in the future to dispatch to multiple derivative classes)\n \"\"\"\n if content is not None:\n # Ensures we use the constructor of the right class\n assert content in (self.__class__.__name__, \"GeometryTransformation\")\n if numexpr is None:\n raise RuntimeError(\"Geometry translation requires the *numexpr* package\")\n self.expressions = OrderedDict()\n if dist_expr is not None:\n self.expressions[\"dist\"] = dist_expr\n if poni1_expr is not None:\n self.expressions[\"poni1\"] = poni1_expr\n if poni2_expr is not None:\n self.expressions[\"poni2\"] = poni2_expr\n if rot1_expr is not None:\n self.expressions[\"rot1\"] = rot1_expr\n if rot2_expr is not None:\n self.expressions[\"rot2\"] = rot2_expr\n if rot3_expr is not None:\n self.expressions[\"rot3\"] = rot3_expr\n\n self.variables = {\"pi\": numpy.pi}\n if constants is not None:\n self.variables.update(constants)\n\n self.param_names = tuple(param_names)\n if pos_names is not None:\n self.pos_names = tuple(pos_names)\n else:\n self.pos_names = (\"pos\",)\n for key in self.param_names + self.pos_names:\n if key in self.variables:\n raise RuntimeError(f\"The keyword `{key}` is already defined, please chose another variable name\")\n self.variables[key] = numpy.NaN\n self.codes = OrderedDict(((name, numexpr.NumExpr(expr)) for name, expr in self.expressions.items()))\n\n @property\n def dist_expr(self):\n return self.expressions.get(\"dist\")\n\n @property\n def poni1_expr(self):\n return self.expressions.get(\"poni1\")\n\n @property\n def poni2_expr(self):\n return self.expressions.get(\"poni2\")\n\n @property\n def rot1_expr(self):\n return self.expressions.get(\"rot1\")\n\n @property\n def rot2_expr(self):\n return self.expressions.get(\"rot2\")\n\n @property\n def rot3_expr(self):\n return self.expressions.get(\"rot3\")\n\n def __call__(self, param, pos):\n \"\"\"This makes the class instance behave like a function,\n actually a function that translates the n-parameter of the detector\n positioning on the goniometer and the m-parameters.\n :param param: parameter of the fit\n :param pos: position of the goniometer (representation from the\n goniometer)\n :return: 6-tuple with (dist, poni1, poni2, rot1, rot2, rot3) as needed\n for pyFAI.\n \"\"\"\n res = {}\n variables = self.variables.copy()\n for name, value in zip(self.param_names, param):\n variables[name] = value\n if len(self.pos_names) == 1:\n variables[self.pos_names[0]] = pos\n else:\n for name, value in zip(self.pos_names, pos):\n variables[name] = value\n for name, code in self.codes.items():\n signa = [variables.get(name, numpy.NaN) for name in code.input_names]\n res[name] = (float(code(*signa)))\n # could ne done in a single liner but harder to understand !\n return PoniParam(**res)\n\n def __repr__(self):\n res = [\"GeometryTransformation with param: %s and pos: %s\" % (self.param_names, self.pos_names),\n \" dist= %s\" % self.dist_expr,\n \" poni1= %s\" % self.poni1_expr,\n \" poni2= %s\" % self.poni2_expr,\n \" rot1= %s\" % self.rot1_expr,\n \" rot2= %s\" % self.rot2_expr,\n \" rot3= %s\" % self.rot3_expr]\n return os.linesep.join(res)\n\n def to_dict(self):\n \"\"\"Export the instance representation for serialization as a dictionary\n \"\"\"\n res = OrderedDict([(\"content\", self.__class__.__name__),\n (\"param_names\", self.param_names),\n (\"pos_names\", self.pos_names),\n (\"dist_expr\", self.dist_expr),\n (\"poni1_expr\", self.poni1_expr),\n (\"poni2_expr\", self.poni2_expr),\n (\"rot1_expr\", self.rot1_expr),\n (\"rot2_expr\", self.rot2_expr),\n (\"rot3_expr\", self.rot3_expr),\n ])\n constants = OrderedDict()\n for key, val in self.variables.items():\n if key in self.param_names:\n continue\n if self.pos_names and key in self.pos_names:\n continue\n constants[key] = val\n res[\"constants\"] = constants\n return res\n\n\nclass ExtendedTransformation(object):\n \"\"\"This class behaves like GeometryTransformation and extends transformation\n to the wavelength parameter.\n\n This function uses numexpr for formula evaluation.\n \"\"\"\n\n def __init__(self, dist_expr=None, poni1_expr=None, poni2_expr=None,\n rot1_expr=None, rot2_expr=None, rot3_expr=None, wavelength_expr=None,\n param_names=None, pos_names=None, constants=None,\n content=None):\n \"\"\"Constructor of the class\n\n :param dist_expr: formula (as string) providing with the dist\n :param poni1_expr: formula (as string) providing with the poni1\n :param poni2_expr: formula (as string) providing with the poni2\n :param rot1_expr: formula (as string) providing with the rot1\n :param rot2_expr: formula (as string) providing with the rot2\n :param rot3_expr: formula (as string) providing with the rot3\n :param wavelength_expr: formula (as a string) to calculate wavelength used in angstrom\n :param param_names: list of names of the parameters used in the model\n :param pos_names: list of motor names for gonio with >1 degree of freedom\n :param constants: a dictionary with some constants the user may want to use\n :param content: Should be None or the name of the class (may be used\n in the future to dispatch to multiple derivative classes)\n \"\"\"\n if content is not None:\n # Ensures we use the constructor of the right class\n assert content in (self.__class__.__name__, \"ExtendedTransformation\")\n if numexpr is None:\n raise RuntimeError(\"This Transformation requires the *numexpr* package\")\n self.expressions = OrderedDict()\n\n if dist_expr is not None:\n self.expressions[\"dist\"] = dist_expr\n if poni1_expr is not None:\n self.expressions[\"poni1\"] = poni1_expr\n if poni2_expr is not None:\n self.expressions[\"poni2\"] = poni2_expr\n if rot1_expr is not None:\n self.expressions[\"rot1\"] = rot1_expr\n if rot2_expr is not None:\n self.expressions[\"rot2\"] = rot2_expr\n if rot3_expr is not None:\n self.expressions[\"rot3\"] = rot3_expr\n if wavelength_expr is not None:\n self.expressions[\"wavelength\"] = wavelength_expr\n self.ParamNT = namedtuple(\"ParamNT\", list(self.expressions.keys()))\n self.variables = {\"pi\": numpy.pi,\n \"hc\": CONST_hc,\n \"q\": CONST_q}\n if constants is not None:\n self.variables.update(constants)\n self.param_names = tuple(param_names) if param_names is not None else tuple()\n if pos_names is not None:\n self.pos_names = tuple(pos_names)\n else:\n self.pos_names = (\"pos\",)\n for key in self.param_names + self.pos_names:\n if key in self.variables:\n raise RuntimeError(\"The keyword %s is already defined, please chose another variable name\")\n self.variables[key] = numpy.NaN\n\n self.codes = OrderedDict(((name, numexpr.NumExpr(expr)) for name, expr in self.expressions.items()))\n\n def __call__(self, param, pos):\n \"\"\"This makes the class instance behave like a function,\n actually a function that translates the n-parameter of the detector\n positioning on the goniometer and the m-parameters.\n\n :param param: parameter of the fit\n :param pos: position of the goniometer (representation from the\n goniometer)\n :return: 6-tuple with (dist, poni1, poni2, rot1, rot2, rot3) as needed\n for pyFAI.\n \"\"\"\n res = {}\n variables = self.variables.copy()\n for name, value in zip(self.param_names, param):\n variables[name] = value\n if len(self.pos_names) == 1:\n variables[self.pos_names[0]] = pos\n else:\n for name, value in zip(self.pos_names, pos):\n variables[name] = value\n for name, code in self.codes.items():\n signa = [variables.get(name, numpy.NaN) for name in code.input_names]\n res[name] = (float(code(*signa)))\n # could ne done in a single liner but harder to understand !\n return self.ParamNT(**res)\n\n def __repr__(self):\n res = [\"%s with param: %s and pos: %s\" % (self.__class__.__name__, self.param_names, self.pos_names), ]\n for name, expr in self.expressions.items():\n res.append(\" %s= %s\" % (name, expr))\n return os.linesep.join(res)\n\n def to_dict(self):\n \"\"\"Export the instance representation for serialization as a dictionary\n \"\"\"\n res = OrderedDict([(\"content\", self.__class__.__name__),\n (\"param_names\", self.param_names),\n (\"pos_names\", self.pos_names),\n ])\n for name, expr in self.expressions.items():\n res[name + \"_expr\"] = expr\n constants = OrderedDict()\n for key, val in self.variables.items():\n if key in self.param_names:\n continue\n if self.pos_names and key in self.pos_names:\n continue\n constants[key] = val\n res[\"constants\"] = constants\n return res\n\n\nGeometryTranslation = GeometryTransformation\n\n\nclass Goniometer(object):\n \"\"\"This class represents the goniometer model. Unlike this name suggests,\n it may include translation in addition to rotations\n \"\"\"\n\n _file_version_1_1 = \"Goniometer calibration v1.1\"\n\n file_version = \"Goniometer calibration v2\"\n\n def __init__(self, param, trans_function, detector=\"Detector\",\n wavelength=None, param_names=None, pos_names=None):\n \"\"\"Constructor of the Goniometer class.\n\n :param param: vector of parameter to refine for defining the detector\n position on the goniometer\n :param trans_function: function taking the parameters of the\n goniometer and the goniometer position and return the\n 6 parameters [dist, poni1, poni2, rot1, rot2, rot3]\n :param detector: detector mounted on the moving arm\n :param wavelength: the wavelength used for the experiment\n :param param_names: list of names to \"label\" the param vector.\n :param pos_names: list of names to \"label\" the position vector of\n the gonio.\n \"\"\"\n\n self.param = param\n self.trans_function = trans_function\n self.detector = detector_factory(detector)\n self._wavelength = wavelength\n if param_names is None and \"param_names\" in dir(trans_function):\n param_names = trans_function.param_names\n if param_names is not None:\n if isinstance(param, dict):\n self.param = [param.get(i, 0) for i in param_names]\n self.nt_param = namedtuple(\"GonioParam\", param_names)\n else:\n self.nt_param = lambda *x: tuple(x)\n if pos_names is None and \"pos_names\" in dir(trans_function):\n pos_names = trans_function.pos_names\n self.nt_pos = namedtuple(\"GonioPos\", pos_names) if pos_names else lambda *x: tuple(x)\n\n def __repr__(self):\n return \"Goniometer with param %s %s with %s\" % (self.nt_param(*self.param), os.linesep, self.detector)\n\n @property\n def wavelength(self):\n wl_fct = self.trans_function.codes.get(\"wavelength\")\n if wl_fct is not None:\n # check that wavelengt does not depend on the motor position\n params = wl_fct.input_names\n for motor in self.trans_function.pos_names:\n if motor in params:\n logger.warning(\"Wavelength depends on motors, returning the default value\")\n return self._wavelength\n dummy_position = [0] * len(self.nt_pos._fields)\n return self.trans_function(self.param, dummy_position).wavelength\n else:\n return self._wavelength\n\n @wavelength.setter\n def wavelength(self, value):\n if \"wavelength\" in self.trans_function.codes:\n logger.warning(\"Wavelength is a fitted parameter, cannot be set. Please set fitted parameter\")\n else:\n self._wavelength = value\n\n def get_ai(self, position):\n \"\"\"Creates an azimuthal integrator from the motor position\n\n :param position: the goniometer position, a float for a 1 axis goniometer\n :return: A freshly build AzimuthalIntegrator\n \"\"\"\n res = self.trans_function(self.param, position)\n params = {\"detector\": self.detector,\n \"wavelength\": self._wavelength}\n for name, value in zip(res._fields, res):\n params[name] = value\n return AzimuthalIntegrator(**params)\n\n def get_mg(self, positions):\n \"\"\"Creates a MultiGeometry integrator from a list of goniometer\n positions.\n\n :param positions: A list of goniometer positions\n :return: A freshly build multi-geometry\n \"\"\"\n ais = [self.get_ai(pos) for pos in positions]\n mg = MultiGeometry(ais)\n return mg\n\n def to_dict(self):\n \"\"\"Export the goniometer configuration to a dictionary\n\n :return: Ordered dictionary\n \"\"\"\n dico = OrderedDict([(\"content\", self.file_version)])\n\n dico[\"detector\"] = self.detector.name\n dico[\"detector_config\"] = self.detector.get_config()\n\n if self.wavelength:\n dico[\"wavelength\"] = self.wavelength\n dico[\"param\"] = tuple(self.param)\n if \"_fields\" in dir(self.nt_param):\n dico[\"param_names\"] = self.nt_param._fields\n if \"_fields\" in dir(self.nt_pos):\n dico[\"pos_names\"] = self.nt_pos._fields\n if \"to_dict\" in dir(self.trans_function):\n dico[\"trans_function\"] = self.trans_function.to_dict()\n else:\n logger.warning(\"trans_function is not serializable\")\n return dico\n\n def save(self, filename):\n \"\"\"Save the goniometer configuration to file\n\n :param filename: name of the file to save configuration to\n \"\"\"\n dico = self.to_dict()\n try:\n with open(filename, \"w\") as f:\n f.write(json.dumps(dico, indent=2))\n except IOError:\n logger.error(\"IOError while writing to file %s\", filename)\n\n write = save\n\n @classmethod\n def _get_detector_from_dict(cls, dico):\n file_version = dico[\"content\"]\n if file_version == cls._file_version_1_1:\n # v1.1\n # Try to extract useful keys\n detector = Detector.factory(dico[\"detector\"])\n # This is not accurate, some keys could be missing\n keys = detector.get_config().keys()\n config = {}\n for k in keys:\n if k in dico:\n config[k] = dico[k]\n del dico[k]\n detector = Detector.factory(dico[\"detector\"], config)\n else:\n # v2\n detector = Detector.factory(dico[\"detector\"], dico.get(\"detector_config\", None))\n return detector\n\n @classmethod\n def sload(cls, filename):\n \"\"\"Class method for instanciating a Goniometer object from a JSON file\n\n :param filename: name of the JSON file\n :return: Goniometer object\n \"\"\"\n\n with open(filename) as f:\n dico = json.load(f)\n assert \"trans_function\" in dico, \"No translation function defined in JSON file\"\n file_version = dico[\"content\"]\n assert file_version in [cls.file_version, cls._file_version_1_1], \"JSON file contains a goniometer calibration\"\n detector = cls._get_detector_from_dict(dico)\n tansfun = dico.get(\"trans_function\", {})\n if \"content\" in tansfun:\n content = tansfun.pop(\"content\")\n # May be adapted for other classes of GeometryTransformation functions\n if content in (\"GeometryTranslation\", \"GeometryTransformation\"):\n funct = GeometryTransformation(**tansfun)\n elif content == \"ExtendedTransformation\":\n funct = ExtendedTransformation(**tansfun)\n else:\n raise RuntimeError(f\"content={content}, not in in (GeometryTranslation, GeometryTransformation, ExtendedTransformation)\")\n else: # assume GeometryTransformation\n funct = GeometryTransformation(**tansfun)\n\n gonio = cls(param=dico.get(\"param\", []),\n trans_function=funct,\n detector=detector,\n wavelength=dico.get(\"wavelength\"))\n return gonio\n\n\nclass SingleGeometry(object):\n \"\"\"This class represents a single geometry of a detector position on a\n goniometer arm\n \"\"\"\n\n def __init__(self, label, image=None, metadata=None, pos_function=None,\n control_points=None, calibrant=None, detector=None, geometry=None):\n \"\"\"Constructor of the SingleGeometry class, used for calibrating a\n multi-geometry setup with a moving detector.\n\n :param label: name of the geometry, a string or anything unmutable\n :param image: image with Debye-Scherrer rings as 2d numpy array\n :param metadata: anything which contains the goniometer position\n :param pos_function: a function which takes the metadata as input\n and returns the goniometer arm position\n :param control_points: a pyFAI.control_points.ControlPoints instance\n (optional parameter)\n :param calibrant: a pyFAI.calibrant.Calibrant instance.\n Contains the wavelength to be used (optional parameter)\n :param detector: a pyFAI.detectors.Detector instance or something like\n that Contains the mask to be used (optional parameter)\n :param geometry: an azimuthal integrator or a ponifile\n (or a dict with the geometry) (optional parameter)\n \"\"\"\n self.label = label\n self.image = image\n self.metadata = metadata # may be anything\n self.calibrant = calibrant\n if control_points is None or isinstance(control_points, ControlPoints):\n self.control_points = control_points\n else:\n # Probaly a NPT file\n self.control_points = ControlPoints(control_points, calibrant=calibrant)\n\n if detector is not None:\n self.detector = detector_factory(detector)\n else:\n self.detector = None\n if isinstance(geometry, Geometry):\n dict_geo = geometry.getPyFAI()\n elif isinstance(geometry, StringTypes) and os.path.exists(geometry):\n dict_geo = Geometry.sload(geometry).getPyFAI()\n elif isinstance(geometry, dict):\n dict_geo = geometry\n if self.detector is not None:\n dict_geo[\"detector\"] = self.detector\n if self.control_points is not None:\n dict_geo[\"data\"] = self.control_points.getList()\n if self.calibrant is not None:\n dict_geo[\"calibrant\"] = self.calibrant\n if \"max_shape\" in dict_geo:\n # not used in constructor\n dict_geo.pop(\"max_shape\")\n self.geometry_refinement = GeometryRefinement(**dict_geo)\n if self.detector is None:\n self.detector = self.geometry_refinement.detector\n self.pos_function = pos_function\n self.massif = None\n\n def get_position(self):\n \"\"\"This method is in charge of calculating the motor position from metadata/label/...\"\"\"\n return self.pos_function(self.metadata)\n\n def extract_cp(self, max_rings=None, pts_per_deg=1.0, Imin=0):\n \"\"\"Performs an automatic keypoint extraction and update the geometry refinement part\n\n :param max_ring: extract at most N rings from the image\n :param pts_per_deg: number of control points per azimuthal degree (increase for better precision)\n \"\"\"\n if self.massif is None:\n self.massif = Massif(self.image)\n\n tth = numpy.array([i for i in self.calibrant.get_2th() if i is not None])\n tth = numpy.unique(tth)\n tth_min = numpy.zeros_like(tth)\n tth_max = numpy.zeros_like(tth)\n delta = (tth[1:] - tth[:-1]) / 4.0\n tth_max[:-1] = delta\n tth_max[-1] = delta[-1]\n tth_min[1:] = -delta\n tth_min[0] = -delta[0]\n tth_max += tth\n tth_min += tth\n shape = self.image.shape\n ttha = self.geometry_refinement.twoThetaArray(shape)\n chia = self.geometry_refinement.chiArray(shape)\n rings = 0\n cp = ControlPoints(calibrant=self.calibrant)\n if max_rings is None:\n max_rings = tth.size\n\n ms = marchingsquares.MarchingSquaresMergeImpl(ttha,\n mask=self.geometry_refinement.detector.mask,\n use_minmax_cache=True)\n for i in range(tth.size):\n if rings >= max_rings:\n break\n mask = numpy.logical_and(ttha >= tth_min[i], ttha < tth_max[i])\n if self.detector.mask is not None:\n mask = numpy.logical_and(mask, numpy.logical_not(self.geometry_refinement.detector.mask))\n size = mask.sum(dtype=int)\n if (size > 0):\n rings += 1\n sub_data = self.image.ravel()[numpy.where(mask.ravel())]\n mean = sub_data.mean(dtype=numpy.float64)\n std = sub_data.std(dtype=numpy.float64)\n upper_limit = mean + std\n mask2 = numpy.logical_and(self.image > upper_limit, mask)\n size2 = mask2.sum(dtype=int)\n if size2 < 1000:\n upper_limit = mean\n mask2 = numpy.logical_and(self.image > upper_limit, mask)\n size2 = mask2.sum()\n # length of the arc:\n points = ms.find_pixels(tth[i])\n seeds = set((i[0], i[1]) for i in points if mask2[i[0], i[1]])\n # max number of points: 360 points for a full circle\n azimuthal = chia[points[:, 0].clip(0, shape[0]), points[:, 1].clip(0, shape[1])]\n nb_deg_azim = numpy.unique(numpy.rad2deg(azimuthal).round()).size\n keep = int(nb_deg_azim * pts_per_deg)\n if keep == 0:\n continue\n dist_min = len(seeds) / 2.0 / keep\n # why 3.0, why not ?\n\n logger.info(\"Extracting datapoint for ring %s (2theta = %.2f deg); \" +\n \"searching for %i pts out of %i with I>%.1f, dmin=%.1f\",\n i, numpy.degrees(tth[i]), keep, size2, upper_limit, dist_min)\n res = self.massif.peaks_from_area(mask2, Imin=Imin, keep=keep, dmin=dist_min, seed=seeds, ring=i)\n cp.append(res, i)\n self.control_points = cp\n self.geometry_refinement.data = numpy.asarray(cp.getList(), dtype=numpy.float64)\n return cp\n\n def get_ai(self):\n \"\"\"Create a new azimuthal integrator to be used.\n\n :return: Azimuthal Integrator instance\n \"\"\"\n config = self.geometry_refinement.get_config()\n ai = AzimuthalIntegrator()\n ai.set_config(config)\n return ai\n\n\nclass GoniometerRefinement(Goniometer):\n \"\"\"This class allow the translation of a goniometer geometry into a pyFAI\n geometry using a set of parameter to refine.\n \"\"\"\n\n def __init__(self, param, pos_function, trans_function,\n detector=\"Detector\", wavelength=None, param_names=None, pos_names=None,\n bounds=None):\n \"\"\"Constructor of the GoniometerRefinement class\n\n :param param: vector of parameter to refine for defining the detector\n position on the goniometer\n :param pos_function: a function taking metadata and extracting the\n goniometer position\n :param trans_function: function taking the parameters of the\n goniometer and the gonopmeter position and return the\n 6/7 parameters [dist, poni1, poni2, rot1, rot2, rot3, wavelength]\n :param detector: detector mounted on the moving arm\n :param wavelength: the wavelength used for the experiment\n :param param_names: list of names to \"label\" the param vector.\n :param pos_names: list of names to \"label\" the position vector of the\n gonio.\n :param bounds: list of 2-tuple with the lower and upper bound of each function\n \"\"\"\n Goniometer.__init__(self, param, trans_function,\n detector=detector, wavelength=wavelength,\n param_names=param_names, pos_names=pos_names)\n self.single_geometries = OrderedDict() # a dict of labels: SingleGeometry\n if bounds is None:\n self.bounds = [(None, None)] * len(self.param)\n else:\n if isinstance(bounds, dict) and \"_fields\" in dir(self.nt_param):\n self.bounds = [bounds.get(i, (None, None))\n for i in self.nt_param._fields]\n else:\n self.bounds = list(bounds)\n self.pos_function = pos_function\n self.fit_wavelength = \"wavelength\" in self.trans_function.codes\n\n def new_geometry(self, label, image=None, metadata=None, control_points=None,\n calibrant=None, geometry=None):\n \"\"\"Add a new geometry for calibration\n\n :param label: usually a string\n :param image: 2D numpy array with the Debye scherrer rings\n :param metadata: some metadata\n :param control_points: an instance of ControlPoints\n :param calibrant: the calibrant used for calibrating\n :param geometry: poni or AzimuthalIntegrator instance.\n \"\"\"\n if geometry is None:\n geometry = self.get_ai(self.pos_function(metadata))\n sg = SingleGeometry(label=label,\n image=image,\n metadata=metadata,\n control_points=control_points,\n calibrant=calibrant,\n detector=self.detector,\n pos_function=self.pos_function,\n geometry=geometry)\n self.single_geometries[label] = sg\n return sg\n\n def __repr__(self):\n name = self.__class__.__name__\n count = len(self.single_geometries)\n geometry_list = \", \".join(self.single_geometries.keys())\n return \"%s with %i geometries labeled: %s.\" % (name, count, geometry_list)\n\n def residu2(self, param):\n \"Actually performs the calulation of the average of the error squared\"\n sumsquare = 0.0\n npt = 0\n for single in self.single_geometries.values():\n motor_pos = single.get_position()\n single_param = self.trans_function(param, motor_pos)._asdict()\n pyFAI_param = [single_param.get(name, 0.0)\n for name in [\"dist\", \"poni1\", \"poni2\", \"rot1\", \"rot2\", \"rot3\"]]\n pyFAI_param.append(single_param.get(\"wavelength\", self.wavelength) * 1e10)\n if (single.geometry_refinement is not None) and (len(single.geometry_refinement.data) >= 1):\n sumsquare += single.geometry_refinement.chi2_wavelength(pyFAI_param)\n npt += single.geometry_refinement.data.shape[0]\n return sumsquare / max(npt, 1)\n\n def chi2(self, param=None):\n \"\"\"Calculate the average of the square of the error for a given parameter set\n \"\"\"\n if param is not None:\n return self.residu2(param)\n else:\n return self.residu2(self.param)\n\n def refine2(self, method=\"slsqp\", **options):\n \"\"\"Geometry refinement tool\n\n See https://docs.scipy.org/doc/scipy-0.18.1/reference/generated/scipy.optimize.minimize.html\n\n :param method: name of the minimizer\n :param options: options for the minimizer\n \"\"\"\n if method.lower() in [\"simplex\", \"nelder-mead\"]:\n method = \"Nelder-Mead\"\n bounds = None\n else:\n bounds = self.bounds\n former_error = self.chi2()\n print(\"Cost function before refinement: %s\" % former_error)\n param = numpy.asarray(self.param, dtype=numpy.float64)\n print(param)\n res = minimize(self.residu2, param, method=method,\n bounds=bounds, tol=1e-12,\n options=options)\n print(res)\n newparam = res.x\n new_error = res.fun\n print(\"Cost function after refinement: %s\" % new_error)\n print(self.nt_param(*newparam))\n\n # print(\"Constrained Least square %s --> %s\" % (former_error, new_error))\n if new_error < former_error:\n # print(param, newparam)\n\n i = abs(param - newparam).argmax()\n if \"_fields\" in dir(self.nt_param):\n name = self.nt_param._fields[i]\n print(\"maxdelta on: %s (%i) %s --> %s\" % (name, i, self.param[i], newparam[i]))\n else:\n print(\"maxdelta on: %i %s --> %s\" % (i, self.param[i], newparam[i]))\n self.param = newparam\n # update wavelength after successful optimization: not easy\n # if self.fit_wavelength:\n # self.wavelength = self.\n elif self.fit_wavelength:\n print(\"Restore wavelength and former parameters\")\n former_wavelength = self.wavelength\n for sg in self.single_geometries.values():\n sg.calibrant.setWavelength_change2th(former_wavelength)\n print(self.nt_param(*self.param))\n return self.param\n\n def set_bounds(self, name, mini=None, maxi=None):\n \"\"\"Redefines the bounds for the refinement\n\n :param name: name of the parameter or index in the parameter set\n :param mini: minimum value\n :param maxi: maximum value\n \"\"\"\n if isinstance(name, StringTypes) and \"_fields\" in dir(self.nt_param):\n idx = self.nt_param._fields.index(name)\n else:\n idx = int(name)\n self.bounds[idx] = (mini, maxi)\n\n @classmethod\n def sload(cls, filename, pos_function=None):\n \"\"\"Class method for instanciating a Goniometer object from a JSON file\n\n :param filename: name of the JSON file\n :param pos_function: a function taking metadata and extracting the\n goniometer position\n :return: Goniometer object\n \"\"\"\n\n with open(filename) as f:\n dico = json.load(f)\n assert dico[\"content\"] == cls.file_version, \"JSON file contains a goniometer calibration\"\n assert \"trans_function\" in dico, \"No translation function defined in JSON file\"\n detector = cls._get_detector_from_dict(dico)\n tansfun = dico.get(\"trans_function\", {})\n if \"content\" in tansfun:\n content = tansfun.pop(\"content\")\n # May be adapted for other classes of GeometryTransformation functions\n if content in (\"GeometryTranslation\", \"GeometryTransformation\"):\n funct = GeometryTransformation(**tansfun)\n elif content == \"ExtendedTranformation\":\n funct = ExtendedTransformation(**tansfun)\n else:\n raise RuntimeError(\"content= %s, not in in (GeometryTranslation, GeometryTransformation, ExtendedTranformation)\")\n else: # assume GeometryTransformation\n funct = GeometryTransformation(**tansfun)\n\n gonio = cls(param=dico.get(\"param\", []),\n trans_function=funct,\n pos_function=pos_function,\n detector=detector,\n wavelength=dico.get(\"wavelength\"))\n return gonio\n" ]
[ [ "numpy.logical_not", "numpy.unique", "numpy.asarray", "numpy.degrees", "numpy.rad2deg", "scipy.optimize.minimize", "numpy.zeros_like", "numpy.logical_and" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "0.16", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "1.3", "1.8" ], "tensorflow": [] } ]
Dakad/HackSabamApp
[ "03fb248f6c067febdc5d131d9cdc8fa82c80c40d" ]
[ "pipeline/improv.py" ]
[ "from skimage.filters import threshold_local\nimport numpy as np\nimport cv2\nimport imutils\n\n\n# https://github.com/yardstick17/image_text_reader/blob/master/image_preprocessing/remove_noise.py\n\ndef _order_points_(pts):\n # initialzie a list of coordinates that will be ordered\n # such that the first entry in the list is the top-left,\n # the second entry is the top-right, the third is the\n # bottom-right, and the fourth is the bottom-left\n rect = np.zeros((4, 2), dtype=\"float32\")\n\n # the top-left point will have the smallest sum, whereas\n # the bottom-right point will have the largest sum\n s = pts.sum(axis=1)\n rect[0] = pts[np.argmin(s)]\n rect[2] = pts[np.argmax(s)]\n\n # now, compute the difference between the points, the\n # top-right point will have the smallest difference,\n # whereas the bottom-left will have the largest difference\n diff = np.diff(pts, axis=1)\n rect[1] = pts[np.argmin(diff)]\n rect[3] = pts[np.argmax(diff)]\n\n # return the ordered coordinates\n return rect\n\n\ndef four_point_transform(image, pts):\n # obtain a consistent order of the points and unpack them\n # individually\n rect = _order_points_(pts)\n (tl, tr, br, bl) = rect\n\n # compute the width of the new image, which will be the\n # maximum distance between bottom-right and bottom-left\n # x-coordiates or the top-right and top-left x-coordinates\n widthA = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2))\n widthB = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2))\n maxWidth = max(int(widthA), int(widthB))\n\n # compute the height of the new image, which will be the\n # maximum distance between the top-right and bottom-right\n # y-coordinates or the top-left and bottom-left y-coordinates\n heightA = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2))\n heightB = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2))\n maxHeight = max(int(heightA), int(heightB))\n\n # now that we have the dimensions of the new image, construct\n # the set of destination points to obtain a \"birds eye view\",\n # (i.e. top-down view) of the image, again specifying points\n # in the top-left, top-right, bottom-right, and bottom-left\n # order\n dst = np.array(\n [[0, 0], [maxWidth - 1, 0], [maxWidth - 1, maxHeight - 1], [0, maxHeight - 1]],\n dtype=\"float32\",\n )\n\n # compute the perspective transform matrix and then apply it\n M = cv2.getPerspectiveTransform(rect, dst)\n warped = cv2.warpPerspective(image, M, (maxWidth, maxHeight))\n\n # return the warped image\n return warped\n\n\ndef detect_edge(image):\n # Convert the img to grayscale\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n # Blur it\n # gray = cv2.GaussianBlur(gray, (5, 5), 0)\n # Find the edges\n edged = cv2.Canny(gray, 75, 200)\n\n return edged\n\n\ndef detect_contours(image):\n # Find the contours in the image\n cnts = cv2.findContours(image, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)\n cnts = imutils.grab_contours(cnts)\n # Keep the largest ones\n cnts = sorted(cnts, key=cv2.contourArea, reverse=True)[:5]\n\n for c in cnts:\n # Approximate the contour\n peri = cv2.arcLength(c, True)\n approx = cv2.approxPolyDP(c, 0.02 * peri, True)\n\n # ? Has 4 pts ?\n if len(approx) == 4:\n return [approx]\n # Otherwise, send\n return []\n\n\ndef get_transform(image, contour, ratio, has_effect=False):\n # Apply the 4-pt transform on the original image\n four_point = four_point_transform(image, contour.reshape(4, 2) * ratio)\n # Convert warped img to GRAY\n warped = cv2.cvtColor(four_point, cv2.COLOR_BGR2GRAY)\n\n effect = None\n if has_effect == True:\n # Threshold it\n T = threshold_local(warped, 11, offset=10, method=\"gaussian\")\n # Apply 'black & white' paper effect\n effect = (warped > T).astype(\"uint8\") * 255\n\n return (warped, effect)\n\n\ndef deskew(image, gray):\n \"\"\"A Skewed image is defined as a image which is not straight.\n Skewed images directly impact the line segmentation of OCR engine which reduces its accuracy\n\n \"\"\"\n\n # Flip the foreground\n gray = cv2.bitwise_not(gray)\n\n # Treshold, set foreground px to 255 and background to 0\n tresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]\n\n # Get the (x,y) coord\n coords = np.column_stack(np.where(tresh > 0))\n\n # Get the rotated bouding box of these coords\n # https://stackoverflow.com/questions/15956124/minarearect-angles-unsure-about-the-angle-returned\n angle = cv2.minAreaRect(coords)[-1] # Return val btwn [-90,0]\n\n # As the rect rotate clockwise, angle --> 0\n if angle < -45:\n angle = -(90 + angle)\n else:\n # Just take the inverse\n angle = -angle\n\n rotated = None\n\n if(np.abs(angle) != 0):\n # Rotate the image\n (height, width) = gray.shape[:2]\n center = (width // 2, height // 2)\n matrix = cv2.getRotationMatrix2D(center, angle, 1.0)\n rotated = cv2.warpAffine(\n image,\n matrix,\n (width, height),\n flags=cv2.INTER_CUBIC,\n borderMode=cv2.BORDER_REPLICATE,\n )\n\n return [rotated, angle]\n\n\ndef remove_shadow(image):\n result_planes = []\n rgb_planes = cv2.split(image)\n\n for plan in rgb_planes:\n img_dilated = cv2.dilate(image, np.ones((7, 7), np.uint8))\n img_bg = cv2.medianBlur(img_dilated, 21)\n img_diff = 255 - cv2.absdiff(plane, img_bg)\n img_norm = cv2.normalize(\n img_diff, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8UC1)\n result_planes.append(img_diff)\n result = cv2.merge(result_planes)\n return result\n\n\ndef image_smoothening(img):\n ret1, th1 = cv2.threshold(img, BINARY_THREHOLD, 255, cv2.THRESH_BINARY)\n ret2, th2 = cv2.threshold(th1, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n blur = cv2.GaussianBlur(th2, (1, 1), 0)\n ret3, th3 = cv2.threshold(\n blur, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n return th3\n\n\ndef remove_noise(image):\n \"\"\" Noise is a random variation of color or brightness btw pixels.\n Noise decrease the readability of text from an image. \n There are two major types of noises : \n Salt & Pepper\n Gaussian \n \"\"\"\n filtered = cv2.absdiff(image.astype(np.uint8), 255,\n cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 9, 41)\n kernel = np.ones((1, 1), np.uint8)\n opening = cv2.morphologyEx(filtered, cv2.MORPH_OPEN, kernel)\n closing = cv2.morphologyEx(opening, cv2.MORPH_CLOSE, kernel)\n\n img = image_smoothening(image)\n transform = cv2.bitwise_or(img, closing)\n return transform\n\n\ndef kmeans(input_img, k, i_val):\n # _, thresh = cv2.threshold(img, kmeans(input_img=img, k=8, i_val=2)[0], 255, cv2.THRESH_BINARY)\n\n hist = cv2.calcHist([input_img], [0], None, [256], [0, 256])\n img = input_img.ravel()\n img = np.reshape(img, (-1, 1))\n img = img.astype(np.float32)\n\n criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)\n flags = cv2.KMEANS_RANDOM_CENTERS\n compactness, labels, centers = cv2.kmeans(\n img, k, None, criteria, 10, flags)\n centers = np.sort(centers, axis=0)\n\n return centers[i_val].astype(int), centers, hist\n" ]
[ [ "numpy.sqrt", "numpy.abs", "numpy.reshape", "numpy.sort", "numpy.ones", "numpy.argmax", "numpy.diff", "numpy.argmin", "numpy.array", "numpy.zeros", "numpy.where" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
martincourtois/CodinGame
[ "4df9c8af5cb9c513880dd086da9bf3f201bd56ed" ]
[ "CLASSIC PUZZLE - MEDIUM/222-rubiks-cube-movements.py" ]
[ "import sys\nimport math\nimport re\nimport numpy as np\n\nFRONT = [\n ( (2, 2), (2, 3), (3, 3), (3, 2), (2, 2) ),\n ( (1, 2), (2, 4), (4, 3), (3, 1), (1, 2) ),\n ( (1, 3), (3, 4), (4, 2), (2, 1), (1, 3) )\n]\nBACK = [\n ( (2, 6), (2, 7), (3, 7), (3, 6), (2, 6) ),\n\t( (2, 5), (0, 2), (3, 0), (5, 3), (2, 5) ),\n\t( (0, 3), (2, 0), (5, 2), (3, 5), (0, 3) )\n]\nRIGHT = [\n ( (2, 4), (2, 5), (3, 5), (3, 4), (2, 4) ),\n ( (5, 3), (3, 3), (1, 3), (2, 6), (5, 3) ),\n ( (4, 3), (2, 3), (0, 3), (3, 6), (4, 3) )\n]\nLEFT = [\n ( (2, 0), (2, 1), (3, 1), (3, 0), (2, 0) ),\n\t( (0, 2), (2, 2), (4, 2), (3, 7), (0, 2) ),\n\t( (1, 2), (3, 2), (5, 2), (2, 7), (1, 2) )\n]\nUP = [\n ( (0, 2), (0, 3), (1, 3), (1, 2), (0, 2) ),\n\t( (2, 7), (2, 5), (2, 3), (2, 1), (2, 7) ),\n\t( (2, 6), (2, 4), (2, 2), (2, 0), (2, 6) )\n]\nDOWN = [\n ( (4, 2), (4, 3), (5, 3), (5, 2), (4, 2) ),\n\t( (3, 0), (3, 2), (3, 4), (3, 6), (3, 0) ),\n\t( (3, 1), (3, 3), (3, 5), (3, 7), (3, 1) )\n]\n\ndef transform(array, moves) -> np.ndarray:\n new_array = np.array(array)\n for cycle in moves:\n for i1, i2 in zip(cycle[0:-1], cycle[1:]):\n new_array[i2] = array[i1]\n return new_array\n\ndef operation(array, moves, rev=False):\n if rev:\n moves = [c[::-1] for c in moves]\n return transform(array, moves)\n\ndef make_a_move(cube, op, rev, twice):\n moves = {\n 'F': FRONT,\n 'B': BACK,\n 'R': RIGHT,\n 'L': LEFT,\n 'U': UP,\n 'D': DOWN,\n }\n moves = moves[op]\n if twice:\n return operation(operation(cube, moves, rev), moves, rev)\n else:\n return operation(cube, moves, rev)\n\ndef parse(string, cube):\n pattern1 = \"\"\"[FBRLUD]'?2?\"\"\"\n pattern2 = \"\"\"(?P<op>[FBRLUD])(?P<rev>'?)(?P<twice>2?)\"\"\"\n \n print(cube, file=sys.stderr)\n for action in re.findall(pattern1, string):\n op, rev, twice = re.match(pattern2, action).groups()\n rev = bool(rev)\n twice = bool(twice)\n cube = make_a_move(cube, op, rev, twice)\n return cube\n\nmove = input()\ncube = np.full((6, 8), ' ')\ncube[0:2, 2:4] = 'U'\ncube[2:4, 2:4] = 'F'\ncube[4:6, 2:4] = 'D'\ncube[2:4, 0:2] = 'L'\ncube[2:4, 4:6] = 'R'\ncube[2:4, 6:8] = 'B'\ncube = parse(move, cube)\n\nprint(''.join(cube[2, 2:4]))\nprint(''.join(cube[3, 2:4]))\n" ]
[ [ "numpy.array", "numpy.full" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ConorDevilly/bar_speed
[ "7a48a21b7dc9794b99a0f65c59a12f7f65d385e6" ]
[ "src/plot.py" ]
[ "import matplotlib.pyplot as plt\nimport argparse\nimport numpy as np\n\n\nclass BarGraph():\n \"\"\"\n Loads data from a log file\n Has ability to display three graphs: Speed, Acceleration and Power\n \"\"\"\n\n def load_data(self, path_to_data):\n \"\"\"Parse necessary data from a file\"\"\"\n with open(path_to_data, 'r') as log:\n lines = log.readlines()\n self.distance_data = self._load_distance_data(lines)\n self.cm_multiplier = self._load_cm_multiplier(lines)\n self.fps = self._load_fps(lines)\n\n def _load_distance_data(self, log):\n \"\"\"Returns distance data parsed from file\"\"\"\n distance_data = []\n for line in log:\n if 'INFO:root:Frame' in line and 'Distance moved' in line:\n distance_data.append(line.split(' ')[-1].strip())\n return distance_data\n\n def _load_cm_multiplier(self, log):\n \"\"\"Returns cm multiplier parsed from file\"\"\"\n cm_mult = None\n for line in log:\n if 'INFO:root:CM Multiplier:' in line:\n cm_mult = float(line.split(' ')[-1].strip())\n return cm_mult\n\n def _load_fps(self, log):\n \"\"\"Returns fps parsed from file\"\"\"\n fps = None\n for line in log:\n if 'INFO:root:Video FPS:' in line:\n fps = int(float(line.split(' ')[-1].strip()))\n return fps\n\n def _transform_to_velocity_data(self, distance_data, cm_multiplier):\n \"\"\"Turns a list of distance data into velocity data\"\"\"\n prev_dist = 0\n velocity_data = []\n for data_point in distance_data:\n curr_dist = int(data_point)\n dist_dif = (curr_dist - prev_dist) * cm_multiplier\n velocity_data.append(dist_dif)\n prev_dist = curr_dist\n return velocity_data\n\n def _transform_to_speed_data(self, distance_data, cm_multiplier):\n \"\"\"Turns distance data into speed data\"\"\"\n return [abs(data_point) for data_point in self._transform_to_velocity_data(distance_data, cm_multiplier)]\n\n def _get_total_seconds(self):\n \"\"\"Return the total seconds, based on FPS\"\"\"\n if not self.fps:\n raise Exception(\"FPS not loaded\")\n return(len(self.distance_data) / self.fps)\n\n def _reduce_points(self, data, interval):\n \"\"\"Returns intervals of data list\"\"\"\n return data[0::interval]\n\n def _reduce_data_half_second(self, data):\n \"\"\"Reduce number of points to be one every 0.5 seconds\"\"\"\n total_data_points = self._get_total_seconds() * 2\n point_interval = len(self.distance_data) / total_data_points\n return data[0::point_interval]\n\n def plot_speed_graph(self):\n \"\"\"Displays a speed graph based on distance_data\"\"\"\n _title = 'Speed'\n # Convert data into speed form (absolute value of velocity)\n speed_data = self._transform_to_speed_data(self.distance_data, self.cm_multiplier)\n speed_data = self._reduce_data_half_second(speed_data)\n self.plot_graph(speed_data, _title)\n\n def plot_velocity_graph(self):\n \"\"\"Displays a velocity graph based on distance_data\"\"\"\n _title = 'Velocity'\n velocity_data = self.distance_data\n velocity_data = self._transform_to_velocity_data(self.distance_data, self.cm_multiplier)\n velocity_data = self._reduce_data_half_second(velocity_data)\n self.plot_graph(velocity_data, _title)\n\n def plot_acceleration_graph(self):\n \"\"\"Displays a acceleration graph based on distance_data\"\"\"\n _title = 'Acceleration'\n _xlabel = 'Seconds^2'\n speed_data = self._transform_to_speed_data(self.distance_data, self.cm_multiplier)\n speed_data = self._reduce_data_half_second(speed_data)\n acceleration_data = []\n\n prev_speed = 0\n # Transform speed data into acceleration data\n for curr_speed in speed_data:\n acceleration_data.append(abs(prev_speed - curr_speed))\n prev_speed = curr_speed\n self.plot_graph(acceleration_data, _title, xlabel=_xlabel)\n\n def plot_graph(self, data, title, ylabel='Centimeters', xlabel='Seconds'):\n \"\"\"Add data to a graph\"\"\"\n # Calculate total points needed on x axis\n total_seconds = len(data) / 2\n # Add an extra data point if there entries left over\n if len(data) % 2 == 1:\n total_seconds += 0.5\n time = np.arange(0, total_seconds, 0.5)\n plt.plot(time, data)\n plt.title(title)\n plt.ylabel(ylabel)\n plt.xlabel(xlabel)\n\n def plot_all_graphs(self):\n \"\"\"Show all graphs\"\"\"\n plt.subplot(221)\n self.plot_speed_graph()\n plt.subplot(222)\n self.plot_velocity_graph()\n plt.subplot(223)\n self.plot_acceleration_graph()\n\n def show_graph(self):\n \"\"\"Display loaded graphs\"\"\"\n plt.tight_layout()\n plt.show()\n\n\ndef setup_parser():\n \"\"\"\n Sets up arguments\n :return: Parser object with path and graph flags\n :rtype: ArgumentParser\n \"\"\"\n parser = argparse.ArgumentParser(description='Displays graphs based on given log file')\n parser.add_argument('-p', '--path', help='Path to log file', required=True)\n parser.add_argument('-g', '--graph', help='Graph to display', required=True,\n choices=['speed', 'velocity', 'acceleration', 'all'])\n return parser\n\n\ndef main():\n parser = setup_parser()\n args = parser.parse_args()\n bar_graph = BarGraph()\n bar_graph.load_data(args.path)\n plot_graph = {\n 'speed': bar_graph.plot_speed_graph,\n 'velocity': bar_graph.plot_velocity_graph,\n 'acceleration': bar_graph.plot_acceleration_graph,\n 'all': bar_graph.plot_all_graphs\n }.get(args.graph, 'velocity')\n plot_graph()\n bar_graph.show_graph()\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "matplotlib.pyplot.tight_layout", "matplotlib.pyplot.title", "numpy.arange", "matplotlib.pyplot.plot", "matplotlib.pyplot.subplot", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
michellechena/azure-intelligent-edge-patterns
[ "b1260b962b208880532391e7ef2148d240f489f8" ]
[ "factory-ai-vision/EdgeSolution/modules/OVMSAdaptorModule/app/ovms_batchImageProcessor.py" ]
[ "import logging\nfrom PIL import Image, ImageDraw\nimport io\nimport numpy as np\nfrom tensorflow import make_tensor_proto, make_ndarray\nimport cv2\nimport inferencing_pb2\nimport media_pb2\nimport extension_pb2\nimport os\nimport ovms\nimport time\n\nfrom cascade.voe_to_ovms import load_voe_config_from_json, voe_config_to_ovms_config\n\nimport threading\n\ndef process_voe_config(processor):\n while True:\n if os.path.exists('/workspace/voe_config.json'):\n metadatas_json = open('/workspace/voe_config.json').read()\n if metadatas_json != processor.metadatas_json:\n print('Updating Metadatas...')\n voe_config = load_voe_config_from_json(metadatas_json)\n _, metadatas = voe_config_to_ovms_config(voe_config)\n\n processor.metadatas = metadatas\n processor.metadatas_json = metadatas_json\n\n time.sleep(3)\n\ndef process_response(response, img, metadatas):\n predictions = []\n if response is not None:\n coordinates = make_ndarray(response.outputs['coordinates'])\n confidences = make_ndarray(response.outputs['confidences'])\n attributes = []\n\n for k in response.outputs:\n if (metadatas is not None) and (k in metadatas):\n #print(k)\n #print(metadatas[k])\n metadata = metadatas[k]\n if metadata['type'] == 'classification':\n ndarray = make_ndarray(response.outputs[k])\n tag_indexes = np.argmax(ndarray, axis=2).flatten()\n tags = list(metadata['labels'][tag_index]\n for tag_index in tag_indexes)\n confidences = np.max(ndarray, axis=2).flatten()\n attributes.append({\n 'name': k,\n 'type': 'classification',\n 'values': tags,\n 'confidences': confidences\n })\n if metadata['type'] == 'regression':\n ndarray = make_ndarray(response.outputs[k])\n scores = ndarray\n if 'scale' in metadata:\n scores *= metadata['scale']\n scores = scores.flatten().astype('int').tolist()\n attributes.append({\n 'name': k,\n 'type': 'regression',\n 'values': scores\n })\n\n n = coordinates.shape[0]\n predictions = []\n for i in range(n):\n x1, y1, x2, y2 = coordinates[i, 0]\n prediction = {\n 'tag': 'face',\n 'attributes': [],\n 'confidence': confidences[i],\n 'box': {\n 'l': x1,\n 't': y1,\n 'w': x2-x1,\n 'h': y2-y1\n }\n }\n print(attributes, flush=True)\n for attribute in attributes:\n if attribute['type'] == 'regression':\n prediction['attributes'].append({\n 'name': attribute['name'],\n 'value': str(attribute['values'][i]),\n 'confidence': -1})\n if attribute['type'] == 'classification':\n prediction['attributes'].append({\n 'name': attribute['name'],\n 'value': str(attribute['values'][i]),\n 'confidence': attribute['confidences'][i]})\n prediction['attributes'].sort(key=lambda x:x['name'])\n predictions.append(prediction)\n\n # followings is for drawing\n #h, w, _ = img.shape\n #for prediction in predictions:\n # x1 = int(prediction['box']['l'] * w)\n # y1 = int(prediction['box']['t'] * h)\n # x2 = int((prediction['box']['w'] + prediction['box']['l']) * w)\n # y2 = int((prediction['box']['h'] + prediction['box']['t']) * h)\n # cv2.rectangle(img, (x1, y1), (x2, y2), (0, 255, 255), 3)\n # font = cv2.FONT_HERSHEY_SIMPLEX\n # fontScale = 0.5\n # color = (0, 255, 255)\n # thickness = 1\n # text = prediction['tag']\n # for attribute in prediction['attributes']:\n # text += ' / ' + str(attribute['value'])\n # cv2.putText(img, text, (x1, y1-10), font,\n # fontScale, color, thickness, cv2.LINE_AA)\n return img, predictions\n\nclass OVMSBatchImageProcessor():\n def __init__(self):\n self.stub = None\n self.metadatas = None\n self.metadatas_json = ''\n self.th = threading.Thread(target=process_voe_config, args=(self,))\n self.th.start()\n \n def process_images(self, mediaStreamMessage, rawBytes, size):\n\n #FIXME\n if self.stub is None:\n self.stub = ovms.connect_ovms('ovmsserver:9001')\n\n # Read image raw bytes\n im = Image.frombytes('RGB', size, rawBytes.tobytes())\n #predictions = OVMS.predict\n\n img = np.asarray(im)\n img = img.astype(np.float32) # BGR color format, shape HWC\n\n img = cv2.resize(img, (416, 416))\n img_tensor = img.reshape(1, 416, 416, 3)\n\n #predictions = [{'tag': 'aa', 'confidence': 0.5}]\n response = ovms.predict(self.stub, img_tensor)\n #print('1', flush=True)\n img, predictions = process_response(response, img, self.metadatas)\n #print('2', flush=True)\n\n\n for prediction in predictions:\n inference = mediaStreamMessage.media_sample.inferences.add()\n inference.type = inferencing_pb2.Inference.InferenceType.ENTITY\n\n attributes = []\n #print('3', flush=True)\n for attribute in prediction['attributes']:\n attributes.append(inferencing_pb2.Attribute(name=attribute['name'], value=attribute['value'], confidence=attribute['confidence']))\n\n #print('4', flush=True)\n inference.entity.CopyFrom(\n inferencing_pb2.Entity(\n tag=inferencing_pb2.Tag(\n value=prediction['tag'], \n confidence=prediction['confidence']\n ),\n box=inferencing_pb2.Rectangle(\n l=prediction['box']['l'],\n t=prediction['box']['t'],\n w=prediction['box']['w'],\n h=prediction['box']['h']\n ),\n attributes=attributes\n )\n )\n return mediaStreamMessage\n" ]
[ [ "numpy.asarray", "numpy.max", "tensorflow.make_ndarray", "numpy.argmax" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
anair13/agents
[ "756f7bdf493986c25eb585438134f1dbb8045b1b", "756f7bdf493986c25eb585438134f1dbb8045b1b" ]
[ "tf_agents/bandits/agents/greedy_reward_prediction_agent_test.py", "tf_agents/policies/temporal_action_smoothing.py" ]
[ "# coding=utf-8\n# Copyright 2020 The TF-Agents Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for greedy_reward_prediction_agent.py.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nimport tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import\nfrom tf_agents.bandits.agents import greedy_reward_prediction_agent as greedy_agent\nfrom tf_agents.bandits.drivers import driver_utils\nfrom tf_agents.bandits.networks import global_and_arm_feature_network\nfrom tf_agents.bandits.policies import constraints\nfrom tf_agents.bandits.specs import utils as bandit_spec_utils\nfrom tf_agents.networks import network\nfrom tf_agents.policies import utils as policy_utilities\nfrom tf_agents.specs import tensor_spec\nfrom tf_agents.trajectories import policy_step\nfrom tf_agents.trajectories import time_step as ts\nfrom tf_agents.utils import common\n\n\nclass DummyNet(network.Network):\n\n def __init__(self, unused_observation_spec, action_spec, name=None):\n super(DummyNet, self).__init__(\n unused_observation_spec, state_spec=(), name=name)\n action_spec = tf.nest.flatten(action_spec)[0]\n num_actions = action_spec.maximum - action_spec.minimum + 1\n\n # Store custom layers that can be serialized through the Checkpointable API.\n self._dummy_layers = [\n tf.keras.layers.Dense(\n num_actions,\n kernel_initializer=tf.constant_initializer([[1, 1.5, 2],\n [1, 1.5, 4]]),\n bias_initializer=tf.constant_initializer([[1], [1], [-10]]))\n ]\n\n def call(self, inputs, step_type=None, network_state=()):\n del step_type\n inputs = tf.cast(inputs, tf.float32)\n for layer in self._dummy_layers:\n inputs = layer(inputs)\n return inputs, network_state\n\n\ndef _get_initial_and_final_steps(observations, rewards):\n batch_size = tf.nest.flatten(observations)[0].shape[0]\n if isinstance(observations, np.ndarray):\n observations = tf.constant(\n observations, dtype=tf.float32, name='observation')\n initial_step = ts.TimeStep(\n tf.constant(\n ts.StepType.FIRST,\n dtype=tf.int32,\n shape=[batch_size],\n name='step_type'),\n tf.constant(0.0, dtype=tf.float32, shape=[batch_size], name='reward'),\n tf.constant(1.0, dtype=tf.float32, shape=[batch_size], name='discount'),\n observations)\n final_step = ts.TimeStep(\n tf.constant(\n ts.StepType.LAST,\n dtype=tf.int32,\n shape=[batch_size],\n name='step_type'),\n tf.constant(rewards, dtype=tf.float32, name='reward'),\n tf.constant(1.0, dtype=tf.float32, shape=[batch_size], name='discount'),\n observations)\n return initial_step, final_step\n\n\ndef _get_initial_and_final_steps_nested_rewards(observations, rewards):\n batch_size = tf.nest.flatten(observations)[0].shape[0]\n if isinstance(observations, np.ndarray):\n observations = tf.constant(\n observations, dtype=tf.float32, name='observation')\n zero_rewards = {\n 'reward': tf.constant(0.0, dtype=tf.float32, shape=[batch_size]),\n 'constraint': tf.constant(0.0, dtype=tf.float32, shape=[batch_size])\n }\n initial_step = ts.TimeStep(\n tf.constant(\n ts.StepType.FIRST,\n dtype=tf.int32,\n shape=[batch_size],\n name='step_type'),\n zero_rewards,\n tf.constant(1.0, dtype=tf.float32, shape=[batch_size], name='discount'),\n observations)\n rewards_nest = tf.nest.map_structure(\n lambda t: tf.convert_to_tensor(t, dtype=tf.float32), rewards)\n final_step = ts.TimeStep(\n tf.constant(\n ts.StepType.LAST,\n dtype=tf.int32,\n shape=[batch_size],\n name='step_type'),\n rewards_nest,\n tf.constant(1.0, dtype=tf.float32, shape=[batch_size], name='discount'),\n observations)\n return initial_step, final_step\n\n\ndef _get_initial_and_final_steps_with_action_mask(observations, rewards):\n batch_size = tf.nest.flatten(observations)[0].shape[0]\n initial_step = ts.TimeStep(\n tf.constant(\n ts.StepType.FIRST,\n dtype=tf.int32,\n shape=[batch_size],\n name='step_type'),\n tf.constant(0.0, dtype=tf.float32, shape=[batch_size], name='reward'),\n tf.constant(1.0, dtype=tf.float32, shape=[batch_size], name='discount'),\n (observations[0], observations[1]))\n final_step = ts.TimeStep(\n tf.constant(\n ts.StepType.LAST,\n dtype=tf.int32,\n shape=[batch_size],\n name='step_type'),\n tf.constant(rewards, dtype=tf.float32, name='reward'),\n tf.constant(1.0, dtype=tf.float32, shape=[batch_size],\n name='discount'), (tf.nest.map_structure(\n lambda x: x + 100., observations[0]), observations[1]))\n return initial_step, final_step\n\n\ndef _get_initial_and_final_steps_action_mask_nested_rewards(\n observations, rewards):\n batch_size = tf.nest.flatten(observations)[0].shape[0]\n zero_rewards = {\n 'reward': tf.constant(0.0, dtype=tf.float32, shape=[batch_size]),\n 'constraint': tf.constant(0.0, dtype=tf.float32, shape=[batch_size])\n }\n initial_step = ts.TimeStep(\n tf.constant(\n ts.StepType.FIRST,\n dtype=tf.int32,\n shape=[batch_size],\n name='step_type'),\n zero_rewards,\n tf.constant(1.0, dtype=tf.float32, shape=[batch_size], name='discount'),\n (observations[0], observations[1]))\n rewards_nest = tf.nest.map_structure(\n lambda t: tf.convert_to_tensor(t, dtype=tf.float32), rewards)\n final_step = ts.TimeStep(\n tf.constant(\n ts.StepType.LAST,\n dtype=tf.int32,\n shape=[batch_size],\n name='step_type'),\n rewards_nest,\n tf.constant(1.0, dtype=tf.float32, shape=[batch_size],\n name='discount'), (tf.nest.map_structure(\n lambda x: x + 100., observations[0]), observations[1]))\n return initial_step, final_step\n\n\ndef _get_action_step(action):\n return policy_step.PolicyStep(\n action=tf.convert_to_tensor(action),\n info=policy_utilities.PolicyInfo())\n\n\ndef _get_experience(initial_step, action_step, final_step):\n single_experience = driver_utils.trajectory_for_bandit(\n initial_step, action_step, final_step)\n # Adds a 'time' dimension.\n return tf.nest.map_structure(\n lambda x: tf.expand_dims(tf.convert_to_tensor(x), 1),\n single_experience)\n\n\nclass AgentTest(tf.test.TestCase):\n\n def setUp(self):\n super(AgentTest, self).setUp()\n tf.compat.v1.enable_resource_variables()\n self._obs_spec = tensor_spec.TensorSpec([2], tf.float32)\n self._time_step_spec = ts.time_step_spec(self._obs_spec)\n self._action_spec = tensor_spec.BoundedTensorSpec(\n dtype=tf.int32, shape=(), minimum=0, maximum=2)\n self._observation_spec = self._time_step_spec.observation\n\n def testCreateAgent(self):\n reward_net = DummyNet(self._observation_spec, self._action_spec)\n agent = greedy_agent.GreedyRewardPredictionAgent(\n self._time_step_spec,\n self._action_spec,\n reward_network=reward_net,\n optimizer=None)\n self.assertIsNotNone(agent.policy)\n\n def testInitializeAgent(self):\n reward_net = DummyNet(self._observation_spec, self._action_spec)\n agent = greedy_agent.GreedyRewardPredictionAgent(\n self._time_step_spec,\n self._action_spec,\n reward_network=reward_net,\n optimizer=None)\n init_op = agent.initialize()\n if not tf.executing_eagerly():\n with self.cached_session() as sess:\n common.initialize_uninitialized_variables(sess)\n self.assertIsNone(sess.run(init_op))\n\n def testLoss(self):\n reward_net = DummyNet(self._observation_spec, self._action_spec)\n observations = np.array([[1, 2], [3, 4]], dtype=np.float32)\n actions = np.array([0, 1], dtype=np.int32)\n rewards = np.array([0.5, 3.0], dtype=np.float32)\n initial_step, final_step = _get_initial_and_final_steps_nested_rewards(\n observations, rewards)\n action_step = _get_action_step(actions)\n experience = _get_experience(initial_step, action_step, final_step)\n\n agent = greedy_agent.GreedyRewardPredictionAgent(\n self._time_step_spec,\n self._action_spec,\n reward_network=reward_net,\n optimizer=None)\n init_op = agent.initialize()\n if not tf.executing_eagerly():\n with self.cached_session() as sess:\n common.initialize_uninitialized_variables(sess)\n self.assertIsNone(sess.run(init_op))\n loss, _ = agent._loss(experience)\n self.evaluate(tf.compat.v1.initialize_all_variables())\n self.assertAllClose(self.evaluate(loss), 42.25)\n\n def testPolicy(self):\n reward_net = DummyNet(self._observation_spec, self._action_spec)\n agent = greedy_agent.GreedyRewardPredictionAgent(\n self._time_step_spec,\n self._action_spec,\n reward_network=reward_net,\n optimizer=None)\n observations = tf.constant([[1, 2], [3, 4]], dtype=tf.float32)\n time_steps = ts.restart(observations, batch_size=2)\n policy = agent.policy\n action_step = policy.action(time_steps)\n # Batch size 2.\n self.assertAllEqual([2], action_step.action.shape)\n self.evaluate(tf.compat.v1.initialize_all_variables())\n actions = self.evaluate(action_step.action)\n self.assertAllEqual(actions, [1, 2])\n\n def testInitializeRestoreAgent(self):\n reward_net = DummyNet(self._observation_spec, self._action_spec)\n agent = greedy_agent.GreedyRewardPredictionAgent(\n self._time_step_spec,\n self._action_spec,\n reward_network=reward_net,\n optimizer=None)\n observations = tf.constant([[1, 2], [3, 4]], dtype=tf.float32)\n time_steps = ts.restart(observations, batch_size=2)\n policy = agent.policy\n action_step = policy.action(time_steps)\n self.evaluate(tf.compat.v1.initialize_all_variables())\n\n checkpoint = tf.train.Checkpoint(agent=agent)\n\n latest_checkpoint = tf.train.latest_checkpoint(self.get_temp_dir())\n checkpoint_load_status = checkpoint.restore(latest_checkpoint)\n\n if tf.executing_eagerly():\n self.evaluate(checkpoint_load_status.initialize_or_restore())\n self.assertAllEqual(self.evaluate(action_step.action), [1, 2])\n else:\n with self.cached_session() as sess:\n checkpoint_load_status.initialize_or_restore(sess)\n self.assertAllEqual(sess.run(action_step.action), [1, 2])\n\n def testTrainAgent(self):\n reward_net = DummyNet(self._observation_spec, self._action_spec)\n optimizer = tf.compat.v1.train.GradientDescentOptimizer(learning_rate=0.1)\n agent = greedy_agent.GreedyRewardPredictionAgent(\n self._time_step_spec,\n self._action_spec,\n reward_network=reward_net,\n optimizer=optimizer)\n observations = np.array([[1, 2], [3, 4]], dtype=np.float32)\n actions = np.array([0, 1], dtype=np.int32)\n rewards = np.array([0.5, 3.0], dtype=np.float32)\n initial_step, final_step = _get_initial_and_final_steps(\n observations, rewards)\n action_step = _get_action_step(actions)\n experience = _get_experience(initial_step, action_step, final_step)\n loss_before, _ = agent.train(experience, None)\n loss_after, _ = agent.train(experience, None)\n self.evaluate(tf.compat.v1.initialize_all_variables())\n self.assertAllClose(self.evaluate(loss_before), 42.25)\n self.assertAllClose(self.evaluate(loss_after), 93.46)\n\n def testTrainAgentWithConstraint(self):\n reward_net = DummyNet(self._observation_spec, self._action_spec)\n optimizer = tf.compat.v1.train.GradientDescentOptimizer(learning_rate=0.1)\n\n constraint_net = DummyNet(self._observation_spec, self._action_spec)\n neural_constraint = constraints.NeuralConstraint(\n self._time_step_spec,\n self._action_spec,\n constraint_network=constraint_net)\n\n reward_spec = {\n 'reward': tensor_spec.TensorSpec(\n shape=(), dtype=tf.float32, name='reward'),\n 'constraint': tensor_spec.TensorSpec(\n shape=(), dtype=tf.float32, name='constraint')\n }\n self._time_step_spec = ts.time_step_spec(self._obs_spec, reward_spec)\n\n agent = greedy_agent.GreedyRewardPredictionAgent(\n self._time_step_spec,\n self._action_spec,\n reward_network=reward_net,\n optimizer=optimizer,\n constraints=[neural_constraint])\n observations = np.array([[1, 2], [3, 4]], dtype=np.float32)\n actions = np.array([0, 1], dtype=np.int32)\n rewards = {\n 'reward': np.array([0.5, 3.0], dtype=np.float32),\n 'constraint': np.array([6.0, 4.0], dtype=np.float32)\n }\n initial_step, final_step = _get_initial_and_final_steps_nested_rewards(\n observations, rewards)\n action_step = _get_action_step(actions)\n experience = _get_experience(initial_step, action_step, final_step)\n loss_before, _ = agent.train(experience, None)\n self.evaluate(tf.compat.v1.initialize_all_variables())\n # The loss is the sum of the reward loss and the constraint loss.\n self.assertAllClose(self.evaluate(loss_before), 42.25 + 30.125)\n\n def testTrainAgentWithMask(self):\n reward_net = DummyNet(self._observation_spec, self._action_spec)\n optimizer = tf.compat.v1.train.GradientDescentOptimizer(learning_rate=0.1)\n time_step_spec = ts.time_step_spec((tensor_spec.TensorSpec([2], tf.float32),\n tensor_spec.TensorSpec([3], tf.int32)))\n agent = greedy_agent.GreedyRewardPredictionAgent(\n time_step_spec,\n self._action_spec,\n reward_network=reward_net,\n optimizer=optimizer,\n observation_and_action_constraint_splitter=lambda x: (x[0], x[1]))\n observations = (np.array([[1, 2], [3, 4]], dtype=np.float32),\n np.array([[1, 0, 0], [1, 1, 0]], dtype=np.int32))\n actions = np.array([0, 1], dtype=np.int32)\n rewards = np.array([0.5, 3.0], dtype=np.float32)\n initial_step, final_step = _get_initial_and_final_steps_with_action_mask(\n observations, rewards)\n action_step = _get_action_step(actions)\n experience = _get_experience(initial_step, action_step, final_step)\n loss_before, _ = agent.train(experience, None)\n loss_after, _ = agent.train(experience, None)\n self.evaluate(tf.compat.v1.initialize_all_variables())\n self.assertAllClose(self.evaluate(loss_before), 42.25)\n self.assertAllClose(self.evaluate(loss_after), 93.46)\n\n def testTrainAgentWithMaskAndConstraint(self):\n reward_net = DummyNet(self._observation_spec, self._action_spec)\n optimizer = tf.compat.v1.train.GradientDescentOptimizer(learning_rate=0.1)\n reward_spec = {\n 'reward': tensor_spec.TensorSpec(\n shape=(), dtype=tf.float32, name='reward'),\n 'constraint': tensor_spec.TensorSpec(\n shape=(), dtype=tf.float32, name='constraint')\n }\n observation_and_mask_spec = (tensor_spec.TensorSpec([2], tf.float32),\n tensor_spec.TensorSpec([3], tf.int32))\n time_step_spec = ts.time_step_spec(observation_and_mask_spec, reward_spec)\n\n constraint_net = DummyNet(self._observation_spec, self._action_spec)\n neural_constraint = constraints.NeuralConstraint(\n self._time_step_spec,\n self._action_spec,\n constraint_network=constraint_net)\n\n agent = greedy_agent.GreedyRewardPredictionAgent(\n time_step_spec,\n self._action_spec,\n reward_network=reward_net,\n optimizer=optimizer,\n observation_and_action_constraint_splitter=lambda x: (x[0], x[1]),\n constraints=[neural_constraint])\n observations = (np.array([[1, 2], [3, 4]], dtype=np.float32),\n np.array([[1, 0, 0], [1, 1, 0]], dtype=np.int32))\n actions = np.array([0, 1], dtype=np.int32)\n rewards = {\n 'reward': np.array([0.5, 3.0], dtype=np.float32),\n 'constraint': np.array([6.0, 4.0], dtype=np.float32)\n }\n initial_step, final_step = (\n _get_initial_and_final_steps_action_mask_nested_rewards(\n observations, rewards))\n action_step = _get_action_step(actions)\n experience = _get_experience(initial_step, action_step, final_step)\n loss_before, _ = agent.train(experience, None)\n self.evaluate(tf.compat.v1.initialize_all_variables())\n # The loss is the sum of the reward loss and the constraint loss.\n self.assertAllClose(self.evaluate(loss_before), 42.25 + 30.125)\n\n def testTrainAgentWithLaplacianSmoothing(self):\n reward_net = DummyNet(self._observation_spec, self._action_spec)\n optimizer = tf.compat.v1.train.GradientDescentOptimizer(learning_rate=0.1)\n laplacian_matrix = tf.constant([[1.0, -1.0, 0.0],\n [-1.0, 2.0, -1.0],\n [0.0, -1.0, 1.0]])\n agent = greedy_agent.GreedyRewardPredictionAgent(\n self._time_step_spec,\n self._action_spec,\n reward_network=reward_net,\n optimizer=optimizer,\n laplacian_matrix=laplacian_matrix,\n laplacian_smoothing_weight=1.0)\n observations = np.array([[1, 2], [3, 4]], dtype=np.float32)\n actions = np.array([0, 1], dtype=np.int32)\n rewards = np.array([0.5, 3.0], dtype=np.float32)\n initial_step, final_step = _get_initial_and_final_steps(\n observations, rewards)\n action_step = _get_action_step(actions)\n experience = _get_experience(initial_step, action_step, final_step)\n loss_before, _ = agent.train(experience, None)\n self.evaluate(tf.compat.v1.initialize_all_variables())\n # The Laplacian smoothing term ends up adding 22.5 to the loss.\n self.assertAllClose(self.evaluate(loss_before), 42.25 + 22.5)\n\n def testTrainAgentWithLaplacianSmoothingInvalidMatrix(self):\n if tf.executing_eagerly:\n return\n\n observations = np.array([[1, 2], [3, 4]], dtype=np.float32)\n actions = np.array([0, 1], dtype=np.int32)\n rewards = np.array([0.5, 3.0], dtype=np.float32)\n initial_step, final_step = _get_initial_and_final_steps(\n observations, rewards)\n action_step = _get_action_step(actions)\n experience = _get_experience(initial_step, action_step, final_step)\n\n with self.assertRaisesRegexp(tf.errors.InvalidArgumentError, ''):\n reward_net = DummyNet(self._observation_spec, self._action_spec)\n optimizer = tf.compat.v1.train.GradientDescentOptimizer(learning_rate=0.1)\n # Set the Laplacian matrix to be the identity, which is not a valid\n # Laplacian.\n laplacian_matrix = tf.eye(3)\n agent = greedy_agent.GreedyRewardPredictionAgent(\n self._time_step_spec,\n self._action_spec,\n reward_network=reward_net,\n optimizer=optimizer,\n laplacian_matrix=laplacian_matrix,\n laplacian_smoothing_weight=1.0)\n self.evaluate(tf.compat.v1.initialize_all_variables())\n loss_before, _ = agent.train(experience, None)\n self.evaluate(loss_before)\n\n def testTrainPerArmAgent(self):\n obs_spec = bandit_spec_utils.create_per_arm_observation_spec(\n 2, 3, 4, add_num_actions_feature=True)\n time_step_spec = ts.time_step_spec(obs_spec)\n reward_net = (\n global_and_arm_feature_network.create_feed_forward_common_tower_network(\n obs_spec, (4, 3), (3, 4), (4, 2)))\n optimizer = tf.compat.v1.train.GradientDescentOptimizer(learning_rate=0.1)\n agent = greedy_agent.GreedyRewardPredictionAgent(\n time_step_spec,\n self._action_spec,\n reward_network=reward_net,\n accepts_per_arm_features=True,\n optimizer=optimizer)\n observations = {\n bandit_spec_utils.GLOBAL_FEATURE_KEY:\n tf.constant([[1, 2], [3, 4]], dtype=tf.float32),\n bandit_spec_utils.PER_ARM_FEATURE_KEY:\n tf.cast(\n tf.reshape(tf.range(24), shape=[2, 4, 3]), dtype=tf.float32),\n bandit_spec_utils.NUM_ACTIONS_FEATURE_KEY:\n tf.ones([2], dtype=tf.int32)\n }\n actions = np.array([0, 3], dtype=np.int32)\n rewards = np.array([0.5, 3.0], dtype=np.float32)\n initial_step, final_step = _get_initial_and_final_steps(\n observations, rewards)\n action_step = policy_step.PolicyStep(\n action=tf.convert_to_tensor(actions),\n info=policy_utilities.PerArmPolicyInfo(\n chosen_arm_features=np.array([[1, 2, 3], [3, 2, 1]],\n dtype=np.float32)))\n experience = _get_experience(initial_step, action_step, final_step)\n agent.train(experience, None)\n self.evaluate(tf.compat.v1.initialize_all_variables())\n\n def testTrainPerArmAgentWithConstraint(self):\n obs_spec = bandit_spec_utils.create_per_arm_observation_spec(2, 3, 4)\n reward_spec = {\n 'reward': tensor_spec.TensorSpec(\n shape=(), dtype=tf.float32, name='reward'),\n 'constraint': tensor_spec.TensorSpec(\n shape=(), dtype=tf.float32, name='constraint')\n }\n time_step_spec = ts.time_step_spec(obs_spec, reward_spec)\n reward_net = (\n global_and_arm_feature_network.create_feed_forward_common_tower_network(\n obs_spec, (4, 3), (3, 4), (4, 2)))\n optimizer = tf.compat.v1.train.GradientDescentOptimizer(learning_rate=0.1)\n constraint_net = (\n global_and_arm_feature_network.create_feed_forward_common_tower_network(\n obs_spec, (4, 3), (3, 4), (4, 2)))\n neural_constraint = constraints.NeuralConstraint(\n time_step_spec,\n self._action_spec,\n constraint_network=constraint_net)\n\n agent = greedy_agent.GreedyRewardPredictionAgent(\n time_step_spec,\n self._action_spec,\n reward_network=reward_net,\n accepts_per_arm_features=True,\n optimizer=optimizer,\n constraints=[neural_constraint])\n observations = {\n bandit_spec_utils.GLOBAL_FEATURE_KEY:\n tf.constant([[1, 2], [3, 4]], dtype=tf.float32),\n bandit_spec_utils.PER_ARM_FEATURE_KEY:\n tf.cast(\n tf.reshape(tf.range(24), shape=[2, 4, 3]), dtype=tf.float32)\n }\n actions = np.array([0, 3], dtype=np.int32)\n rewards = {\n 'reward': np.array([0.5, 3.0], dtype=np.float32),\n 'constraint': np.array([6.0, 4.0], dtype=np.float32)\n }\n initial_step, final_step = _get_initial_and_final_steps_nested_rewards(\n observations, rewards)\n action_step = policy_step.PolicyStep(\n action=tf.convert_to_tensor(actions),\n info=policy_utilities.PerArmPolicyInfo(\n chosen_arm_features=np.array([[1, 2, 3], [3, 2, 1]],\n dtype=np.float32)))\n experience = _get_experience(initial_step, action_step, final_step)\n agent.train(experience, None)\n self.evaluate(tf.compat.v1.initialize_all_variables())\n\n\nif __name__ == '__main__':\n tf.test.main()\n", "# coding=utf-8\n# Copyright 2020 The TF-Agents Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"A TFPolicy wrapper that applies exponential moving averaging to actions.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\n# Using Type Annotations.\nfrom __future__ import print_function\n\nfrom typing import Optional, Text\n\nimport tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import\n\nfrom tf_agents.policies import tf_policy\nfrom tf_agents.trajectories import policy_step\n\n\nclass TemporalActionSmoothing(tf_policy.TFPolicy):\n \"\"\"A wrapper that applies exponential moving averaging to action outputs.\"\"\"\n\n def __init__(self,\n policy: tf_policy.TFPolicy,\n smoothing_coefficient: float,\n name: Optional[Text] = None):\n \"\"\"Adds TemporalActionSmoothing to the given policy.\n\n smoothed_action = previous_action * smoothing_coefficient +\n action * (1.0 - smoothing_coefficient))\n\n Args:\n policy: A policy implementing the tf_policy.TFPolicy interface.\n smoothing_coefficient: Coefficient used for smoothing actions.\n name: The name of this policy. Defaults to the class name.\n \"\"\"\n policy_state_spec = (policy.policy_state_spec, policy.action_spec)\n super(TemporalActionSmoothing, self).__init__(\n policy.time_step_spec, policy.action_spec, policy_state_spec, name=name)\n self._wrapped_policy = policy\n self._smoothing_coefficient = smoothing_coefficient\n\n def _get_initial_state(self, batch_size):\n \"\"\"Creates zero state tuple with wrapped initial state and smoothing vars.\n\n Args:\n batch_size: The batch shape.\n\n Returns:\n A tuple of (wrapped_policy_initial_state, initial_smoothing_state)\n \"\"\"\n wrapped_initial_state = self._wrapped_policy.get_initial_state(batch_size)\n initial_smoothing_state = super(TemporalActionSmoothing,\n self)._get_initial_state(batch_size)[1]\n return (wrapped_initial_state, initial_smoothing_state)\n\n def _variables(self):\n return self._wrapped_policy.variables()\n\n def _distribution(self, time_step, policy_state):\n raise NotImplementedError(\n '`distribution` not implemented for TemporalActionSmoothingWrapper.')\n\n def _action(self, time_step, policy_state, seed):\n # Get action from the wrapped policy.\n wrapped_policy_state, moving_average = policy_state\n wrapped_policy_step = self._wrapped_policy.action(time_step,\n wrapped_policy_state,\n seed)\n\n # Compute smoothed action & updated action tensor.\n def _smooth_action_tensor(smoothing_state_tensor, action_tensor):\n return (smoothing_state_tensor * self._smoothing_coefficient +\n action_tensor * (1.0 - self._smoothing_coefficient))\n\n smoothed_action = tf.nest.map_structure(_smooth_action_tensor,\n moving_average,\n wrapped_policy_step.action)\n\n # Package results in PolicyStep.\n return policy_step.PolicyStep(smoothed_action,\n (wrapped_policy_step.state, smoothed_action),\n wrapped_policy_step.info)\n" ]
[ [ "tensorflow.convert_to_tensor", "tensorflow.constant", "tensorflow.executing_eagerly", "tensorflow.range", "tensorflow.train.Checkpoint", "tensorflow.cast", "tensorflow.test.main", "tensorflow.eye", "tensorflow.ones", "tensorflow.compat.v1.initialize_all_variables", "tensorflow.constant_initializer", "tensorflow.compat.v1.enable_resource_variables", "tensorflow.nest.flatten", "numpy.array", "tensorflow.compat.v1.train.GradientDescentOptimizer", "tensorflow.nest.map_structure" ], [ "tensorflow.nest.map_structure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
OCR-D/ocrd_tesserocr
[ "ac274656bfb7021ccd752bccd947e53403b4a909" ]
[ "ocrd_tesserocr/recognize.py" ]
[ "from __future__ import absolute_import\nimport os.path\nimport math\nfrom PIL import Image, ImageStat\nimport numpy as np\nfrom shapely.geometry import Polygon, asPolygon\nfrom shapely.ops import unary_union\n\nfrom tesserocr import (\n RIL, PSM, PT, OEM,\n Orientation,\n WritingDirection,\n TextlineOrder,\n tesseract_version,\n PyTessBaseAPI, get_languages as get_languages_)\n\nfrom ocrd_utils import (\n getLogger,\n make_file_id,\n assert_file_grp_cardinality,\n shift_coordinates,\n coordinates_for_segment,\n polygon_from_x0y0x1y1,\n polygon_from_points,\n points_from_polygon,\n xywh_from_polygon,\n MIMETYPE_PAGE,\n membername\n)\nfrom ocrd_models.ocrd_page import (\n ReadingOrderType,\n RegionRefType,\n RegionRefIndexedType,\n OrderedGroupType,\n OrderedGroupIndexedType,\n UnorderedGroupType,\n UnorderedGroupIndexedType,\n PageType,\n CoordsType,\n ImageRegionType,\n MathsRegionType,\n SeparatorRegionType,\n NoiseRegionType,\n TableRegionType,\n TextRegionType,\n TextLineType,\n WordType,\n GlyphType,\n TextEquivType,\n AlternativeImageType,\n to_xml)\nfrom ocrd_models.ocrd_page_generateds import (\n ReadingDirectionSimpleType,\n TextLineOrderSimpleType,\n TextTypeSimpleType\n)\nfrom ocrd_modelfactory import page_from_file\nfrom ocrd import Processor\n\nfrom .config import get_tessdata_path, OCRD_TOOL\n\nTOOL = 'ocrd-tesserocr-recognize'\n\nCHOICE_THRESHOLD_NUM = 10 # maximum number of choices to query and annotate\nCHOICE_THRESHOLD_CONF = 1 # maximum score drop from best choice to query and annotate\n# (ChoiceIterator usually rounds to 0.0 for non-best, so this better be maximum)\n\ndef get_languages(*args, **kwargs):\n \"\"\"\n Wraps tesserocr.get_languages() with a fixed path parameter.\n \"\"\"\n return get_languages_(*args, path=get_tessdata_path(), **kwargs)\n\n# monkey-patch the tesserocr base class so have at least some state\nclass TessBaseAPI(PyTessBaseAPI):\n parameters = {}\n psm = PSM.AUTO\n image = None\n path = ''\n lang = ''\n oem = OEM.DEFAULT\n\n def __repr__(self):\n return str({'parameters': self.parameters,\n 'psm': self.psm,\n 'image': self.image,\n 'path': self.path,\n 'lang': self.lang,\n 'oem': self.oem})\n\n def InitFull(self, path=None, lang=None, oem=None, psm=None, variables=None):\n self.path = path or self.path\n self.lang = lang or self.lang\n self.oem = oem or self.oem\n self.parameters = variables or self.parameters\n super().InitFull(path=self.path, lang=self.lang, oem=self.oem, variables=self.parameters)\n\n def SetVariable(self, name, val):\n self.parameters[name] = val\n return super().SetVariable(name, val)\n\n def SetPageSegMode(self, psm):\n self.psm = psm\n super().SetPageSegMode(psm)\n\n def Reset(self, path=None, lang=None, oem=None, psm=None, parameters=None):\n self.Clear()\n self.InitFull(path=path, lang=lang, oem=oem, variables=parameters)\n self.SetPageSegMode(psm or self.psm)\n\n def __enter__(self):\n self.original_path = self.path\n self.original_lang = self.lang\n self.original_oem = self.oem\n self.original_parameters = self.parameters.copy()\n self.original_psm = self.psm\n return self\n\n def __exit__(self, exc_type, exc_val, exc_trace):\n self.path = self.original_path\n self.lang = self.original_lang\n self.oem = self.original_oem\n self.parameters = self.original_parameters\n self.psm = self.original_psm\n return None\n\nclass TesserocrRecognize(Processor):\n\n def __init__(self, *args, **kwargs):\n kwargs['ocrd_tool'] = OCRD_TOOL['tools'][TOOL]\n kwargs['version'] = OCRD_TOOL['version'] + ' (' + tesseract_version().split('\\n')[0] + ')'\n super(TesserocrRecognize, self).__init__(*args, **kwargs)\n \n if hasattr(self, 'workspace'):\n self.logger = getLogger('processor.TesserocrRecognize')\n\n def process(self):\n \"\"\"Perform layout segmentation and/or text recognition with Tesseract on the workspace.\n \n Open and deserialise PAGE input files and their respective images,\n then iterate over the element hierarchy down to the requested\n ``textequiv_level`` if it exists and if ``segmentation_level``\n is lower (i.e. more granular) or ``none``.\n \n Otherwise stop before (i.e. above) ``segmentation_level``. If any\n segmentation exist at that level already, and ``overwrite_segments``\n is false, then descend into these segments, else remove them.\n \n Set up Tesseract to recognise each segment's image (either from\n AlternativeImage or cropping the bounding box rectangle and masking\n it from the polygon outline) with the appropriate segmentation mode\n and ``model``. (If no ``model`` is given, only layout analysis will\n be performed.)\n \n Next, if there still is a gap between the current level in the PAGE hierarchy\n and the requested ``textequiv_level``, then iterate down the result hierarchy,\n adding new segments at each level (as well as reading order references,\n text line order, reading direction and orientation at the region/table level).\n \n Then, at ``textequiv_level``, remove any existing TextEquiv, unless\n ``overwrite_text`` is false, and add text and confidence results, unless\n ``model`` is empty.\n \n The special value ``textequiv_level=none`` behaves like ``glyph``,\n except that no actual text recognition will be performed, only\n layout analysis (so no ``model`` is needed, and new segmentation\n is created down to the glyph level).\n \n The special value ``segmentation_level=none`` likewise is lowest,\n i.e. no actual layout analysis will be performed, only\n text recognition (so existing segmentation is needed down to\n ``textequiv_level``).\n \n Finally, make all higher levels consistent with these text results\n by concatenation, ordering according to each level's respective\n readingDirection, textLineOrder, and ReadingOrder, and joining\n by whitespace as appropriate for each level and according to its\n Relation/join status.\n \n In other words:\n - If ``segmentation_level=region``, then segment the page into regions\n (unless ``overwrite_segments=false``), else iterate existing regions.\n - If ``textequiv_level=region``, then unless ``model`` is empty,\n recognize text in the region and annotate it. Regardless, continue\n with the next region. Otherwise...\n - If ``segmentation_level=cell`` or higher,\n then segment table regions into text regions (i.e. cells)\n (unless ``overwrite_segments=false``), else iterate existing cells.\n - If ``textequiv_level=cell``, then unless ``model`` is empty,\n recognize text in the cell and annotate it. Regardless, continue\n with the next cell. Otherwise...\n - If ``segmentation_level=line`` or higher,\n then segment text regions into text lines\n (unless ``overwrite_segments=false``), else iterate existing text lines.\n - If ``textequiv_level=line``, then unless ``model`` is empty,\n recognize text in the text lines and annotate it. Regardless, continue\n with the next line. Otherwise...\n - If ``segmentation_level=word`` or higher,\n then segment text lines into words\n (unless ``overwrite_segments=false``), else iterate existing words.\n - If ``textequiv_level=word``, then unless ``model`` is empty,\n recognize text in the words and annotate it. Regardless, continue\n with the next word. Otherwise...\n - If ``segmentation_level=glyph`` or higher,\n then segment words into glyphs\n (unless ``overwrite_segments=false``), else iterate existing glyphs.\n - If ``textequiv_level=glyph``, then unless ``model`` is empty,\n recognize text in the glyphs and annotate it. Regardless, continue\n with the next glyph. Otherwise...\n - (i.e. ``none``) annotate no text and be done.\n \n Note that ``cell`` is an _optional_ level that is only relevant for\n table regions, not text or other regions. \n Also, when segmenting tables in the same run that detects them\n (via ``segmentation_level=region`` and ``find_tables``), cells will\n just be 'paragraphs'. In contrast, when segmenting tables that already exist\n (via ``segmentation_level=cell``), cells will be detected in ``sparse_text``\n mode, i.e. as single-line text regions.\n \n Thus, ``segmentation_level`` is the entry point level for layout analysis,\n and setting it to ``none`` makes this processor behave as recognition-only.\n Whereas ``textequiv_level`` selects the exit point level for segmentation,\n and setting it to ``none`` makes this processor behave as segmentation-only,\n as does omitting ``model``.\n \n All segments above ``segmentation_level`` must already exist, and\n no segments below ``textequiv_level`` will be newly created.\n \n If ``find_tables``, then during region segmentation, also try to detect\n table blocks and add them as TableRegion, then query the page iterator\n for paragraphs and add them as TextRegion cells.\n \n If ``block_polygons``, then during region segmentation, query Tesseract\n for polygon outlines instead of bounding boxes for each region.\n (This is more precise, but due to some path representation errors does\n not always yield accurate/valid polygons.)\n \n If ``shrink_polygons``, then during segmentation (on any level), query Tesseract\n for all symbols/glyphs of each segment and calculate the convex hull for them.\n Annotate the resulting polygon instead of the coarse bounding box.\n (This is more precise and helps avoid overlaps between neighbours, especially\n when not segmenting all levels at once.)\n \n If ``sparse_text``, then during region segmentation, attempt to find\n single-line text blocks in no particular order (Tesseract's page segmentation\n mode ``SPARSE_TEXT``).\n \n If ``tesseract_parameters`` is given, setup each of its key-value pairs as\n run-time parameters in Tesseract.\n \n Finally, produce new output files by serialising the resulting hierarchy.\n \"\"\"\n self.logger.debug(\"TESSDATA: %s, installed Tesseract models: %s\", *get_languages())\n\n assert_file_grp_cardinality(self.input_file_grp, 1)\n assert_file_grp_cardinality(self.output_file_grp, 1)\n\n inlevel = self.parameter['segmentation_level']\n outlevel = self.parameter['textequiv_level']\n segment_only = outlevel == 'none' or not self.parameter.get('model', '')\n \n model = \"eng\"\n self.languages = get_languages()[1]\n if 'model' in self.parameter:\n model = self.parameter['model']\n for sub_model in model.split('+'):\n if sub_model.endswith('.traineddata'):\n self.logger.warning(\"Model '%s' has a .traineddata extension, removing. Please use model names without .traineddata extension\" % sub_model)\n sub_model = sub_model.replace('.traineddata', '')\n if sub_model not in get_languages()[1]:\n raise Exception(\"configured model \" + sub_model + \" is not installed\")\n self.logger.info(\"Using model '%s' in %s for recognition at the %s level\",\n model, get_languages()[0], outlevel)\n \n with TessBaseAPI(init=False) as tessapi:\n # Set init-time parameters\n # self.SetVariable(\"debug_file\", \"\") # show debug output (default: /dev/null)\n if outlevel == 'glyph':\n # populate GetChoiceIterator() with LSTM models, too:\n tessapi.SetVariable(\"lstm_choice_mode\", \"2\") # aggregate symbols\n tessapi.SetVariable(\"lstm_choice_iterations\", \"15\") # squeeze out more best paths\n tessapi.SetVariable(\"pageseg_apply_music_mask\", \"1\" if self.parameter['find_staves'] else \"0\")\n # TODO: maybe warn/raise when illegal combinations or characters not in the model unicharset?\n if self.parameter['char_whitelist']:\n tessapi.SetVariable(\"tessedit_char_whitelist\", self.parameter['char_whitelist'])\n if self.parameter['char_blacklist']:\n tessapi.SetVariable(\"tessedit_char_blacklist\", self.parameter['char_blacklist'])\n if self.parameter['char_unblacklist']:\n tessapi.SetVariable(\"tessedit_char_unblacklist\", self.parameter['char_unblacklist'])\n # todo: determine relevancy of these variables:\n # tessedit_preserve_min_wd_len 2\n # tessedit_prefer_joined_punct 0\n # tessedit_write_rep_codes 0\n # tessedit_parallelize 0\n # tessedit_zero_rejection 0\n # tessedit_zero_kelvin_rejection 0\n # tessedit_reject_mode 0\n # tessedit_use_reject_spaces 1\n # tessedit_fix_fuzzy_spaces 1\n # tessedit_char_blacklist\n # tessedit_char_whitelist\n # chs_leading_punct ('`\"\n # chs_trailing_punct1 ).,;:?!\n # chs_trailing_punct2 )'`\"\n # numeric_punctuation .,\n # unrecognised_char |\n # ok_repeated_ch_non_alphanum_wds -?*=\n # conflict_set_I_l_1 Il1[]\n # preserve_interword_spaces 0\n # tessedit_enable_dict_correction 0\n # tessedit_enable_bigram_correction 1\n # stopper_smallword_size 2\n # wordrec_max_join_chunks 4\n # suspect_space_level 100\n # suspect_short_words 2\n # language_model_ngram_on 0\n # language_model_ngram_order 8\n # language_model_min_compound_length 3\n # language_model_penalty_non_freq_dict_word 0.1\n # language_model_penalty_non_dict_word 0.15\n # language_model_penalty_punc 0.2\n # language_model_penalty_case 0.1\n # language_model_penalty_script 0.5\n # language_model_penalty_chartype 0.3\n # language_model_penalty_spacing 0.05\n # textord_max_noise_size 7\n # enable_noise_removal 1\n # classify_bln_numeric_mode 0\n # lstm_use_matrix 1\n # user_words_file\n # user_patterns_file\n tesseract_params = self.parameter['tesseract_parameters']\n for variable in tesseract_params:\n tessapi.SetVariable(variable, tesseract_params[variable])\n # Initialize Tesseract (loading model)\n tessapi.InitFull(path=get_tessdata_path(),\n lang=model,\n oem=getattr(OEM, self.parameter['oem']))\n # Iterate input files\n for (n, input_file) in enumerate(self.input_files):\n file_id = make_file_id(input_file, self.output_file_grp)\n page_id = input_file.pageId or input_file.ID\n self.logger.info(\"INPUT FILE %i / %s\", n, page_id)\n pcgts, pcgts_tree, pcgts_mapping, pcgts_invmap = page_from_file(self.workspace.download_file(input_file),\n with_tree=True)\n pcgts.set_pcGtsId(file_id)\n self.add_metadata(pcgts)\n page = pcgts.get_Page()\n \n page_image, page_coords, page_image_info = self.workspace.image_from_page(\n page, page_id)\n if self.parameter['dpi'] > 0:\n dpi = self.parameter['dpi']\n self.logger.info(\"Page '%s' images will use %d DPI from parameter override\",\n page_id, dpi)\n elif page_image_info.resolution != 1:\n dpi = page_image_info.resolution\n if page_image_info.resolutionUnit == 'cm':\n dpi = round(dpi * 2.54)\n self.logger.info(\"Page '%s' images will use %d DPI from image meta-data\",\n page_id, dpi)\n else:\n dpi = 0\n self.logger.info(\"Page '%s' images will use DPI estimated from segmentation\",\n page_id)\n if dpi:\n tessapi.SetVariable('user_defined_dpi', str(dpi))\n \n self.logger.info(\"Processing page '%s'\", page_id)\n # FIXME: We should somehow _mask_ existing regions in order to annotate incrementally (not redundantly).\n # Currently segmentation_level=region also means removing regions,\n # but we could have an independent setting for that, and attempt\n # to detect regions only where nothing exists yet (by clipping to\n # background before, or by removing clashing predictions after\n # detection).\n regions = page.get_AllRegions(classes=['Text'])\n if inlevel == 'region' and (\n not regions or self.parameter['overwrite_segments']):\n for regiontype in [\n 'AdvertRegion',\n 'ChartRegion',\n 'ChemRegion',\n 'GraphicRegion',\n 'ImageRegion',\n 'LineDrawingRegion',\n 'MathsRegion',\n 'MusicRegion',\n 'NoiseRegion',\n 'SeparatorRegion',\n 'TableRegion',\n 'TextRegion',\n 'UnknownRegion']:\n if getattr(page, 'get_' + regiontype)():\n self.logger.info('Removing existing %ss on page %s', regiontype, page_id)\n getattr(page, 'set_' + regiontype)([])\n page.set_ReadingOrder(None)\n # prepare Tesseract\n if self.parameter['find_tables']:\n if outlevel == 'region' and self.parameter.get('model', ''):\n raise Exception(\"When segmentation_level is region and find_tables is enabled, textequiv_level must be at least cell, because text results cannot be annotated on tables directly.\")\n tessapi.SetVariable(\"textord_tabfind_find_tables\", \"1\") # (default)\n # this should yield additional blocks within the table blocks\n # from the page iterator, but does not in fact (yet?):\n # (and it can run into assertion errors when the table structure\n # does not meet certain homogeneity expectations)\n #tessapi.SetVariable(\"textord_tablefind_recognize_tables\", \"1\")\n else:\n # disable table detection here, so tables will be\n # analysed as independent text/line blocks:\n tessapi.SetVariable(\"textord_tabfind_find_tables\", \"0\")\n tessapi.SetImage(page_image) # is already cropped to Border\n tessapi.SetPageSegMode(PSM.SPARSE_TEXT\n if self.parameter['sparse_text']\n else PSM.AUTO)\n if segment_only:\n self.logger.debug(\"Detecting regions in page '%s'\", page_id)\n tessapi.AnalyseLayout()\n else:\n self._reinit(tessapi, page, pcgts_mapping)\n self.logger.debug(\"Recognizing text in page '%s'\", page_id)\n tessapi.Recognize()\n page_image_bin = tessapi.GetThresholdedImage()\n file_path = self.workspace.save_image_file(\n page_image_bin, file_id + '.IMG-BIN',\n page_id=page_id,\n file_grp=self.output_file_grp)\n # update PAGE (reference the image file):\n page.add_AlternativeImage(AlternativeImageType(\n filename=file_path, comments=page_coords['features'] + ',binarized,clipped'))\n self._process_regions_in_page(tessapi.GetIterator(), page, page_coords, pcgts_mapping, dpi)\n elif inlevel == 'cell':\n # Tables are obligatorily recursive regions;\n # they might have existing text regions (cells),\n # which will be processed in the next branch\n # (because the iterator is recursive to depth),\n # or be empty. This is independent of whether\n # or not they should be segmented into cells.\n if outlevel == 'region':\n raise Exception(\"When segmentation_level is cell, textequiv_level must be at least cell too, because text results cannot be annotated on tables directly.\")\n # disable table detection here, so tables will be\n # analysed as independent text/line blocks:\n tessapi.SetVariable(\"textord_tabfind_find_tables\", \"0\")\n tables = page.get_AllRegions(classes=['Table'])\n if not tables:\n self.logger.warning(\"Page '%s' contains no table regions (but segmentation is off)\",\n page_id)\n else:\n self._process_existing_tables(tessapi, tables, page, page_image, page_coords, pcgts_mapping)\n elif regions:\n self._process_existing_regions(tessapi, regions, page_image, page_coords, pcgts_mapping)\n else:\n self.logger.warning(\"Page '%s' contains no text regions (but segmentation is off)\",\n page_id)\n \n # post-processing\n # bottom-up text concatenation\n if outlevel != 'none' and self.parameter.get('model', ''):\n page_update_higher_textequiv_levels(outlevel, pcgts, self.parameter['overwrite_text'])\n # bottom-up polygonal outline projection\n # if inlevel != 'none' and self.parameter['shrink_polygons']:\n # page_shrink_higher_coordinate_levels(inlevel, outlevel, pcgts)\n \n self.workspace.add_file(\n ID=file_id,\n file_grp=self.output_file_grp,\n pageId=input_file.pageId,\n mimetype=MIMETYPE_PAGE,\n local_filename=os.path.join(self.output_file_grp,\n file_id + '.xml'),\n content=to_xml(pcgts))\n\n def _process_regions_in_page(self, result_it, page, page_coords, mapping, dpi):\n index = 0\n ro = page.get_ReadingOrder()\n if not ro:\n ro = ReadingOrderType()\n page.set_ReadingOrder(ro)\n og = ro.get_OrderedGroup()\n if og:\n # start counting from largest existing index\n for elem in (og.get_RegionRefIndexed() +\n og.get_OrderedGroupIndexed() +\n og.get_UnorderedGroupIndexed()):\n if elem.index >= index:\n index = elem.index + 1\n else:\n # new top-level group\n og = OrderedGroupType(id=\"reading-order\")\n ro.set_OrderedGroup(og)\n # equivalent to GetComponentImages with raw_image=True,\n # (which would also give raw coordinates),\n # except we are also interested in the iterator's BlockType() here,\n # and its BlockPolygon()\n for i, it in enumerate(iterate_level(result_it, RIL.BLOCK)):\n # (padding will be passed to both BoundingBox and GetImage)\n # (actually, Tesseract honours padding only on the left and bottom,\n # whereas right and top are increased less!)\n # TODO: output padding can create overlap between neighbours; at least find polygonal difference\n bbox = it.BoundingBox(RIL.BLOCK, padding=self.parameter['padding'])\n # sometimes these polygons are not planar, which causes\n # PIL.ImageDraw.Draw.polygon (and likely others as well)\n # to misbehave; however, PAGE coordinate semantics prohibit\n # multi-path polygons!\n # (probably a bug in Tesseract itself, cf. tesseract#2826):\n if self.parameter['block_polygons']:\n polygon = it.BlockPolygon()\n elif self.parameter['shrink_polygons'] and not it.Empty(RIL.SYMBOL):\n polygon = join_polygons([polygon_from_x0y0x1y1(\n symbol.BoundingBox(RIL.SYMBOL, padding=self.parameter['padding']))\n for symbol in iterate_level(it, RIL.SYMBOL, parent=RIL.BLOCK)])\n # simulate a RestartBlock(), not defined by Tesseract:\n it.Begin()\n for j, it in enumerate(iterate_level(it, RIL.BLOCK)):\n if i == j:\n break\n else:\n polygon = polygon_from_x0y0x1y1(bbox)\n xywh = xywh_from_polygon(polygon)\n polygon = coordinates_for_segment(polygon, None, page_coords)\n polygon2 = polygon_for_parent(polygon, page)\n if polygon2 is not None:\n polygon = polygon2\n points = points_from_polygon(polygon)\n coords = CoordsType(points=points)\n # plausibilise candidate\n if polygon2 is None:\n self.logger.info('Ignoring extant region: %s', points)\n continue\n block_type = it.BlockType()\n if block_type in [\n PT.FLOWING_TEXT,\n PT.HEADING_TEXT,\n PT.PULLOUT_TEXT,\n PT.CAPTION_TEXT,\n PT.VERTICAL_TEXT,\n PT.INLINE_EQUATION,\n PT.EQUATION,\n PT.TABLE] and (\n xywh['w'] < 20 / 300.0*(dpi or 300) or\n xywh['h'] < 10 / 300.0*(dpi or 300)):\n self.logger.info('Ignoring too small region: %s', points)\n continue\n region_image_bin = it.GetBinaryImage(RIL.BLOCK)\n if not region_image_bin or not region_image_bin.getbbox():\n self.logger.info('Ignoring binary-empty region: %s', points)\n continue\n #\n # keep and annotate new region\n ID = \"region%04d\" % index\n #\n # region type switch\n block_type = it.BlockType()\n self.logger.info(\"Detected region '%s': %s (%s)\",\n ID, points, membername(PT, block_type))\n if block_type in [PT.FLOWING_TEXT,\n PT.HEADING_TEXT,\n PT.PULLOUT_TEXT,\n PT.CAPTION_TEXT,\n # TABLE is contained in PTIsTextType, but\n # it is a bad idea to create a TextRegion\n # for it (better set `find_tables` False):\n # PT.TABLE,\n # will also get a 90° @orientation\n # (but that can be overridden by deskew/OSD):\n PT.VERTICAL_TEXT]:\n region = TextRegionType(id=ID, Coords=coords,\n type=TextTypeSimpleType.PARAGRAPH)\n if block_type == PT.VERTICAL_TEXT:\n region.set_orientation(90.0)\n elif block_type == PT.HEADING_TEXT:\n region.set_type(TextTypeSimpleType.HEADING)\n elif block_type == PT.PULLOUT_TEXT:\n region.set_type(TextTypeSimpleType.FLOATING)\n elif block_type == PT.CAPTION_TEXT:\n region.set_type(TextTypeSimpleType.CAPTION)\n page.add_TextRegion(region)\n og.add_RegionRefIndexed(RegionRefIndexedType(regionRef=ID, index=index))\n if self.parameter['textequiv_level'] not in ['region', 'cell']:\n self._process_lines_in_region(it, region, page_coords, mapping)\n elif self.parameter.get('model', ''):\n region.add_TextEquiv(TextEquivType(\n Unicode=it.GetUTF8Text(RIL.BLOCK).rstrip(\"\\n\\f\"),\n # iterator scores are arithmetic averages, too\n conf=it.Confidence(RIL.BLOCK)/100.0))\n elif block_type in [PT.FLOWING_IMAGE,\n PT.HEADING_IMAGE,\n PT.PULLOUT_IMAGE]:\n region = ImageRegionType(id=ID, Coords=coords)\n page.add_ImageRegion(region)\n og.add_RegionRefIndexed(RegionRefIndexedType(regionRef=ID, index=index))\n elif block_type in [PT.HORZ_LINE,\n PT.VERT_LINE]:\n region = SeparatorRegionType(id=ID, Coords=coords)\n page.add_SeparatorRegion(region)\n elif block_type in [PT.INLINE_EQUATION,\n PT.EQUATION]:\n region = MathsRegionType(id=ID, Coords=coords)\n page.add_MathsRegion(region)\n og.add_RegionRefIndexed(RegionRefIndexedType(regionRef=ID, index=index))\n elif block_type == PT.TABLE:\n # without API access to StructuredTable we cannot\n # do much for a TableRegionType (i.e. nrows, ncols,\n # coordinates of cells for recursive regions etc),\n # but this can be achieved afterwards by segment-table\n region = TableRegionType(id=ID, Coords=coords)\n page.add_TableRegion(region)\n rogroup = OrderedGroupIndexedType(id=ID + '_order', regionRef=ID, index=index)\n og.add_OrderedGroupIndexed(rogroup)\n if self.parameter['textequiv_level'] == 'region':\n pass # impossible (see exception above)\n # todo: TableRegionType has no TextEquiv in PAGE\n # region.add_TextEquiv(TextEquivType(\n # Unicode=it.GetUTF8Text(RIL.BLOCK).rstrip(\"\\n\\f\"),\n # # iterator scores are arithmetic averages, too\n # conf=it.Confidence(RIL.BLOCK)/100.0))\n else:\n self._process_cells_in_table(it, region, rogroup, page_coords, mapping)\n else:\n region = NoiseRegionType(id=ID, Coords=coords)\n page.add_NoiseRegion()\n # \n # add orientation\n if isinstance(region, (TextRegionType, TableRegionType,\n ImageRegionType, MathsRegionType)):\n self._add_orientation(it, region, page_coords)\n #\n # iterator increment\n #\n index += 1\n if (not og.get_RegionRefIndexed() and\n not og.get_OrderedGroupIndexed() and\n not og.get_UnorderedGroupIndexed()):\n # schema forbids empty OrderedGroup\n ro.set_OrderedGroup(None)\n \n def _process_cells_in_table(self, result_it, region, rogroup, page_coords, mapping):\n if self.parameter['segmentation_level'] == 'cell':\n ril = RIL.BLOCK # for sparse_text mode\n else:\n ril = RIL.PARA # for \"cells\" in PT.TABLE block\n for index, it in enumerate(iterate_level(result_it, ril)):\n bbox = it.BoundingBox(ril, padding=self.parameter['padding'])\n if self.parameter['shrink_polygons'] and not it.Empty(RIL.SYMBOL):\n polygon = join_polygons([polygon_from_x0y0x1y1(\n symbol.BoundingBox(RIL.SYMBOL, padding=self.parameter['padding']))\n for symbol in iterate_level(it, RIL.SYMBOL, parent=ril)])\n if ril == RIL.BLOCK:\n # simulate a RestartBlock(), not defined by Tesseract:\n it.Begin()\n for j, it in enumerate(iterate_level(it, RIL.BLOCK)):\n if index == j:\n break\n else:\n it.RestartParagraph()\n else:\n polygon = polygon_from_x0y0x1y1(bbox)\n polygon = coordinates_for_segment(polygon, None, page_coords)\n polygon2 = polygon_for_parent(polygon, region)\n if polygon2 is not None:\n polygon = polygon2\n points = points_from_polygon(polygon)\n coords = CoordsType(points=points)\n if polygon2 is None:\n self.logger.info('Ignoring extant cell: %s', points)\n continue\n ID = region.id + \"_cell%04d\" % index\n self.logger.info(\"Detected cell '%s': %s\", ID, points)\n cell = TextRegionType(id=ID, Coords=coords)\n region.add_TextRegion(cell)\n self._add_orientation(it, cell, page_coords)\n if rogroup:\n rogroup.add_RegionRefIndexed(RegionRefIndexedType(regionRef=ID, index=index))\n if self.parameter['textequiv_level'] != 'cell':\n self._process_lines_in_region(it, cell, page_coords, mapping, parent_ril=ril)\n elif self.parameter.get('model', ''):\n cell.add_TextEquiv(TextEquivType(\n Unicode=it.GetUTF8Text(ril).rstrip(\"\\n\\f\"),\n # iterator scores are arithmetic averages, too\n conf=it.Confidence(ril)/100.0))\n\n def _process_lines_in_region(self, result_it, region, page_coords, mapping, parent_ril=RIL.BLOCK):\n if self.parameter['sparse_text']:\n it = result_it\n region.set_type(TextTypeSimpleType.OTHER)\n line = TextLineType(id=region.id + '_line',\n Coords=region.get_Coords())\n region.add_TextLine(line)\n if self.parameter['textequiv_level'] != 'line':\n self._process_words_in_line(it, line, page_coords, mapping)\n elif self.parameter.get('model', ''):\n # todo: consider BlankBeforeWord, SetLineSeparator\n line.add_TextEquiv(TextEquivType(\n Unicode=it.GetUTF8Text(RIL.TEXTLINE).rstrip(\"\\n\\f\"),\n # iterator scores are arithmetic averages, too\n conf=it.Confidence(RIL.TEXTLINE)/100.0))\n return\n for index, it in enumerate(iterate_level(result_it, RIL.TEXTLINE, parent=parent_ril)):\n bbox = it.BoundingBox(RIL.TEXTLINE, padding=self.parameter['padding'])\n if self.parameter['shrink_polygons'] and not it.Empty(RIL.SYMBOL):\n polygon = join_polygons([polygon_from_x0y0x1y1(\n symbol.BoundingBox(RIL.SYMBOL, padding=self.parameter['padding']))\n for symbol in iterate_level(it, RIL.SYMBOL, parent=RIL.TEXTLINE)])\n it.RestartRow()\n else:\n polygon = polygon_from_x0y0x1y1(bbox)\n polygon = coordinates_for_segment(polygon, None, page_coords)\n polygon2 = polygon_for_parent(polygon, region)\n if polygon2 is not None:\n polygon = polygon2\n points = points_from_polygon(polygon)\n coords = CoordsType(points=points)\n if polygon2 is None:\n self.logger.info('Ignoring extant line: %s', points)\n continue\n ID = region.id + \"_line%04d\" % index\n self.logger.info(\"Detected line '%s': %s\", ID, points)\n line = TextLineType(id=ID, Coords=coords)\n region.add_TextLine(line)\n if self.parameter['textequiv_level'] != 'line':\n self._process_words_in_line(it, line, page_coords, mapping)\n elif self.parameter.get('model', ''):\n # todo: consider BlankBeforeWord, SetLineSeparator\n line.add_TextEquiv(TextEquivType(\n Unicode=it.GetUTF8Text(RIL.TEXTLINE).rstrip(\"\\n\\f\"),\n # iterator scores are arithmetic averages, too\n conf=it.Confidence(RIL.TEXTLINE)/100.0))\n\n def _process_words_in_line(self, result_it, line, coords, mapping):\n for index, it in enumerate(iterate_level(result_it, RIL.WORD)):\n bbox = it.BoundingBox(RIL.WORD, padding=self.parameter['padding'])\n if self.parameter['shrink_polygons'] and not it.Empty(RIL.SYMBOL):\n polygon = join_polygons([polygon_from_x0y0x1y1(\n symbol.BoundingBox(RIL.SYMBOL, padding=self.parameter['padding']))\n for symbol in iterate_level(it, RIL.SYMBOL, parent=RIL.WORD)])\n # simulate a BeginWord(index), not exposed by tesserocr:\n it.RestartRow()\n for j, it in enumerate(iterate_level(it, RIL.WORD)):\n if index == j:\n break\n else:\n polygon = polygon_from_x0y0x1y1(bbox)\n polygon = coordinates_for_segment(polygon, None, coords)\n polygon2 = polygon_for_parent(polygon, line)\n if polygon2 is not None:\n polygon = polygon2\n points = points_from_polygon(polygon)\n if polygon2 is None:\n self.logger.info('Ignoring extant word: %s', points)\n continue\n ID = line.id + \"_word%04d\" % index\n self.logger.debug(\"Detected word '%s': %s\", ID, points)\n word = WordType(id=ID, Coords=CoordsType(points=points))\n line.add_Word(word)\n if self.parameter['textequiv_level'] != 'word':\n self._process_glyphs_in_word(it, word, coords, mapping)\n elif self.parameter.get('model', ''):\n word.add_TextEquiv(TextEquivType(\n Unicode=it.GetUTF8Text(RIL.WORD),\n # iterator scores are arithmetic averages, too\n conf=it.Confidence(RIL.WORD)/100.0))\n\n def _process_glyphs_in_word(self, result_it, word, coords, mapping):\n for index, it in enumerate(iterate_level(result_it, RIL.SYMBOL)):\n bbox = it.BoundingBox(RIL.SYMBOL, padding=self.parameter['padding'])\n polygon = polygon_from_x0y0x1y1(bbox)\n polygon = coordinates_for_segment(polygon, None, coords)\n polygon2 = polygon_for_parent(polygon, word)\n if polygon2 is not None:\n polygon = polygon2\n points = points_from_polygon(polygon)\n if polygon2 is None:\n self.logger.info('Ignoring extant glyph: %s', points)\n continue\n ID = word.id + '_glyph%04d' % index\n #self.logger.debug(\"Detected glyph '%s': %s\", ID, points)\n glyph = GlyphType(id=ID, Coords=CoordsType(points))\n word.add_Glyph(glyph)\n if self.parameter['textequiv_level'] != 'glyph':\n pass\n elif self.parameter.get('model', ''):\n glyph_text = it.GetUTF8Text(RIL.SYMBOL) # equals first choice?\n glyph_conf = it.Confidence(RIL.SYMBOL)/100 # equals first choice?\n #self.logger.debug('best glyph: \"%s\" [%f]', glyph_text, glyph_conf)\n glyph.add_TextEquiv(TextEquivType(\n index=0,\n Unicode=glyph_text,\n conf=glyph_conf))\n choice_it = it.GetChoiceIterator()\n for choice_no, choice in enumerate(choice_it, 1):\n alternative_text = choice.GetUTF8Text() or ''\n alternative_conf = choice.Confidence()/100\n if alternative_text == glyph_text:\n continue\n #self.logger.debug('alternative glyph: \"%s\" [%f]', alternative_text, alternative_conf)\n if (glyph_conf - alternative_conf > CHOICE_THRESHOLD_CONF or\n choice_no > CHOICE_THRESHOLD_NUM):\n break\n # todo: consider SymbolIsSuperscript (TextStyle), SymbolIsDropcap (RelationType) etc\n glyph.add_TextEquiv(TextEquivType(\n index=choice_no,\n Unicode=alternative_text,\n conf=alternative_conf))\n\n def _process_existing_tables(self, tessapi, tables, page, page_image, page_coords, mapping):\n # prepare dict of reading order\n reading_order = dict()\n ro = page.get_ReadingOrder()\n if not ro:\n self.logger.warning(\"Page contains no ReadingOrder\")\n rogroup = None\n else:\n rogroup = ro.get_OrderedGroup() or ro.get_UnorderedGroup()\n page_get_reading_order(reading_order, rogroup)\n segment_only = self.parameter['textequiv_level'] == 'none' or not self.parameter.get('model', '')\n # dive into tables\n for table in tables:\n cells = table.get_TextRegion()\n if cells:\n if not self.parameter['overwrite_segments']:\n self._process_existing_regions(tessapi, cells, page_image, page_coords, mapping)\n continue\n self.logger.info('Removing existing TextRegion cells in table %s', table.id)\n for cell in table.get_TextRegion():\n if cell.id in reading_order:\n regionref = reading_order[cell.id]\n self.logger.debug('removing cell %s ref %s', cell.id, regionref.regionRef)\n # could be any of the 6 types above:\n regionrefs = regionref.parent_object_.__getattribute__(\n regionref.__class__.__name__.replace('Type', ''))\n # remove in-place\n regionrefs.remove(regionref)\n del reading_order[cell.id]\n # TODO: adjust index to make contiguous again?\n table.set_TextRegion([])\n roelem = reading_order.get(table.id)\n if not roelem:\n self.logger.warning(\"Table '%s' is not referenced in reading order (%s)\",\n table.id, \"no target to add cells into\")\n elif isinstance(roelem, (OrderedGroupType, OrderedGroupIndexedType)):\n self.logger.warning(\"Table '%s' already has an ordered group (%s)\",\n table.id, \"cells will be appended\")\n elif isinstance(roelem, (UnorderedGroupType, UnorderedGroupIndexedType)):\n self.logger.warning(\"Table '%s' already has an unordered group (%s)\",\n table.id, \"cells will not be appended\")\n roelem = None\n elif isinstance(roelem, RegionRefIndexedType):\n # replace regionref by group with same index and ref\n # (which can then take the cells as subregions)\n roelem2 = OrderedGroupIndexedType(id=table.id + '_order',\n index=roelem.index,\n regionRef=roelem.regionRef)\n roelem.parent_object_.add_OrderedGroupIndexed(roelem2)\n roelem.parent_object_.get_RegionRefIndexed().remove(roelem)\n roelem = roelem2\n elif isinstance(roelem, RegionRefType):\n # replace regionref by group with same ref\n # (which can then take the cells as subregions)\n roelem2 = OrderedGroupType(id=table.id + '_order',\n regionRef=roelem.regionRef)\n roelem.parent_object_.add_OrderedGroup(roelem2)\n roelem.parent_object_.get_RegionRef().remove(roelem)\n roelem = roelem2\n # set table image\n table_image, table_coords = self.workspace.image_from_segment(\n table, page_image, page_coords)\n if not table_image.width or not table_image.height:\n self.logger.warning(\"Skipping table region '%s' with zero size\", table.id)\n continue\n if self.parameter['padding']:\n tessapi.SetImage(pad_image(table_image, self.parameter['padding']))\n table_coords['transform'] = shift_coordinates(\n table_coords['transform'], 2*[self.parameter['padding']])\n else:\n tessapi.SetImage(table_image)\n tessapi.SetPageSegMode(PSM.SPARSE_TEXT) # retrieve \"cells\"\n # TODO: we should XY-cut the sparse cells in regroup them into consistent cells\n if segment_only:\n self.logger.debug(\"Detecting cells in table '%s'\", table.id)\n tessapi.AnalyseLayout()\n else:\n self._reinit(tessapi, table, mapping)\n self.logger.debug(\"Recognizing text in table '%s'\", table.id)\n tessapi.Recognize()\n self._process_cells_in_table(tessapi.GetIterator(), table, roelem, table_coords, mapping)\n \n def _process_existing_regions(self, tessapi, regions, page_image, page_coords, mapping):\n if self.parameter['textequiv_level'] in ['region', 'cell'] and not self.parameter.get('model', ''):\n return\n segment_only = self.parameter['textequiv_level'] == 'none' or not self.parameter.get('model', '')\n for region in regions:\n region_image, region_coords = self.workspace.image_from_segment(\n region, page_image, page_coords)\n if not region_image.width or not region_image.height:\n self.logger.warning(\"Skipping text region '%s' with zero size\", region.id)\n continue\n if (region.get_TextEquiv() and not self.parameter['overwrite_text']\n if self.parameter['textequiv_level'] in ['region', 'cell']\n else self.parameter['segmentation_level'] != 'line'):\n pass # image not used here\n elif self.parameter['padding']:\n region_image = pad_image(region_image, self.parameter['padding'])\n tessapi.SetImage(region_image)\n region_coords['transform'] = shift_coordinates(\n region_coords['transform'], 2*[self.parameter['padding']])\n else:\n tessapi.SetImage(region_image)\n tessapi.SetPageSegMode(PSM.SINGLE_BLOCK)\n if not segment_only:\n self._reinit(tessapi, region, mapping)\n # cell (region in table): we could enter from existing_tables or top-level existing regions\n if self.parameter['textequiv_level'] in ['region', 'cell']:\n #if region.get_primaryScript() not in tessapi.GetLoadedLanguages()...\n if region.get_TextEquiv():\n if not self.parameter['overwrite_text']:\n continue\n self.logger.warning(\"Region '%s' already contained text results\", region.id)\n region.set_TextEquiv([])\n self.logger.debug(\"Recognizing text in region '%s'\", region.id)\n # todo: consider SetParagraphSeparator\n region.add_TextEquiv(TextEquivType(\n Unicode=tessapi.GetUTF8Text().rstrip(\"\\n\\f\"),\n # iterator scores are arithmetic averages, too\n conf=tessapi.MeanTextConf()/100.0))\n continue # next region (to avoid indentation below)\n ## line, word, or glyph level:\n textlines = region.get_TextLine()\n if self.parameter['segmentation_level'] == 'line' and (\n not textlines or self.parameter['overwrite_segments']):\n if textlines:\n self.logger.info('Removing existing text lines in region %s', region.id)\n region.set_TextLine([])\n if segment_only:\n self.logger.debug(\"Detecting lines in region '%s'\", region.id)\n tessapi.AnalyseLayout()\n else:\n self.logger.debug(\"Recognizing text in region '%s'\", region.id)\n tessapi.Recognize()\n self._process_lines_in_region(tessapi.GetIterator(), region, region_coords, mapping)\n elif textlines:\n self._process_existing_lines(tessapi, textlines, region_image, region_coords, mapping)\n else:\n self.logger.warning(\"Region '%s' contains no text lines (but segmentation is off)\",\n region.id)\n\n def _process_existing_lines(self, tessapi, textlines, region_image, region_coords, mapping):\n if self.parameter['textequiv_level'] == 'line' and not self.parameter.get('model', ''):\n return\n segment_only = self.parameter['textequiv_level'] == 'none' or not self.parameter.get('model', '')\n for line in textlines:\n line_image, line_coords = self.workspace.image_from_segment(\n line, region_image, region_coords)\n if not line_image.width or not line_image.height:\n self.logger.warning(\"Skipping text line '%s' with zero size\", line.id)\n continue\n if (line.get_TextEquiv() and not self.parameter['overwrite_text']\n if self.parameter['textequiv_level'] == 'line'\n else self.parameter['segmentation_level'] != 'word'):\n pass # image not used here\n elif self.parameter['padding']:\n line_image = pad_image(line_image, self.parameter['padding'])\n tessapi.SetImage(line_image)\n line_coords['transform'] = shift_coordinates(\n line_coords['transform'], 2*[self.parameter['padding']])\n else:\n tessapi.SetImage(line_image)\n if self.parameter['raw_lines']:\n tessapi.SetPageSegMode(PSM.RAW_LINE)\n else:\n tessapi.SetPageSegMode(PSM.SINGLE_LINE)\n if not segment_only:\n self._reinit(tessapi, line, mapping)\n #if line.get_primaryScript() not in tessapi.GetLoadedLanguages()...\n if self.parameter['textequiv_level'] == 'line':\n if line.get_TextEquiv():\n if not self.parameter['overwrite_text']:\n continue\n self.logger.warning(\"Line '%s' already contained text results\", line.id)\n line.set_TextEquiv([])\n self.logger.debug(\"Recognizing text in line '%s'\", line.id)\n # todo: consider BlankBeforeWord, SetLineSeparator\n line.add_TextEquiv(TextEquivType(\n Unicode=tessapi.GetUTF8Text().rstrip(\"\\n\\f\"),\n # iterator scores are arithmetic averages, too\n conf=tessapi.MeanTextConf()/100.0))\n continue # next line (to avoid indentation below)\n ## word, or glyph level:\n words = line.get_Word()\n if self.parameter['segmentation_level'] == 'word' and (\n not words or self.parameter['overwrite_segments']):\n if words:\n self.logger.info('Removing existing words in line %s', line.id)\n line.set_Word([])\n if segment_only:\n self.logger.debug(\"Detecting words in line '%s'\", line.id)\n tessapi.AnalyseLayout()\n else:\n self.logger.debug(\"Recognizing text in line '%s'\", line.id)\n tessapi.Recognize()\n ## internal word and glyph layout:\n self._process_words_in_line(tessapi.GetIterator(), line, line_coords, mapping)\n elif words:\n ## external word layout:\n self.logger.warning(\"Line '%s' contains words already, recognition might be suboptimal\", line.id)\n self._process_existing_words(tessapi, words, line_image, line_coords, mapping)\n else:\n self.logger.warning(\"Line '%s' contains no words (but segmentation if off)\",\n line.id)\n\n def _process_existing_words(self, tessapi, words, line_image, line_coords, mapping):\n if self.parameter['textequiv_level'] == 'word' and not self.parameter.get('model', ''):\n return\n segment_only = self.parameter['textequiv_level'] == 'none' or not self.parameter.get('model', '')\n for word in words:\n word_image, word_coords = self.workspace.image_from_segment(\n word, line_image, line_coords)\n if not word_image.width or not word_image.height:\n self.logger.warning(\"Skipping word '%s' with zero size\", word.id)\n continue\n if (word.get_TextEquiv() and not self.parameter['overwrite_text']\n if self.parameter['textequiv_level'] == 'word'\n else self.parameter['segmentation_level'] != 'glyph'):\n pass # image not used here\n elif self.parameter['padding']:\n word_image = pad_image(word_image, self.parameter['padding'])\n tessapi.SetImage(word_image)\n word_coords['transform'] = shift_coordinates(\n word_coords['transform'], 2*[self.parameter['padding']])\n else:\n tessapi.SetImage(word_image)\n tessapi.SetPageSegMode(PSM.SINGLE_WORD)\n if not segment_only:\n self._reinit(tessapi, word, mapping)\n if self.parameter['textequiv_level'] == 'word':\n if word.get_TextEquiv():\n if not self.parameter['overwrite_text']:\n continue\n self.logger.warning(\"Word '%s' already contained text results\", word.id)\n word.set_TextEquiv([])\n self.logger.debug(\"Recognizing text in word '%s'\", word.id)\n word_conf = tessapi.AllWordConfidences()\n word.add_TextEquiv(TextEquivType(\n Unicode=tessapi.GetUTF8Text().rstrip(\"\\n\\f\"),\n conf=word_conf[0]/100.0 if word_conf else 0.0))\n continue # next word (to avoid indentation below)\n ## glyph level:\n glyphs = word.get_Glyph()\n if self.parameter['segmentation_level'] == 'glyph' and (\n not glyphs or self.parameter['overwrite_segments']):\n if glyphs:\n self.logger.info('Removing existing glyphs in word %s', word.id)\n word.set_Glyph([])\n if segment_only:\n self.logger.debug(\"Detecting glyphs in word '%s'\", word.id)\n tessapi.AnalyseLayout()\n else:\n self.logger.debug(\"Recognizing text in word '%s'\", word.id)\n tessapi.Recognize()\n ## internal glyph layout:\n self._process_glyphs_in_word(tessapi.GetIterator(), word, word_coords, mapping)\n elif glyphs:\n ## external glyph layout:\n self.logger.warning(\"Word '%s' contains glyphs already, recognition might be suboptimal\", word.id)\n self._process_existing_glyphs(tessapi, glyphs, word_image, word_coords, mapping)\n else:\n self.logger.warning(\"Word '%s' contains no glyphs (but segmentation if off)\",\n word.id)\n\n def _process_existing_glyphs(self, tessapi, glyphs, word_image, word_xywh, mapping):\n if not self.parameter.get('model', ''):\n return\n for glyph in glyphs:\n glyph_image, _ = self.workspace.image_from_segment(\n glyph, word_image, word_xywh)\n if not glyph_image.width or not glyph_image.height:\n self.logger.warning(\"Skipping glyph '%s' with zero size\", glyph.id)\n continue\n if glyph.get_TextEquiv() and not self.parameter['overwrite_text']:\n pass # image not used here\n elif self.parameter['padding']:\n tessapi.SetImage(pad_image(glyph_image, self.parameter['padding']))\n else:\n tessapi.SetImage(glyph_image)\n tessapi.SetPageSegMode(PSM.SINGLE_CHAR)\n self._reinit(tessapi, glyph, mapping)\n if glyph.get_TextEquiv():\n if not self.parameter['overwrite_text']:\n continue\n self.logger.warning(\"Glyph '%s' already contained text results\", glyph.id)\n glyph.set_TextEquiv([])\n self.logger.debug(\"Recognizing text in glyph '%s'\", glyph.id)\n glyph_text = tessapi.GetUTF8Text().rstrip(\"\\n\\f\")\n glyph_conf = tessapi.AllWordConfidences()\n glyph_conf = glyph_conf[0]/100.0 if glyph_conf else 1.0\n #self.logger.debug('best glyph: \"%s\" [%f]', glyph_text, glyph_conf)\n glyph.add_TextEquiv(TextEquivType(\n index=0,\n Unicode=glyph_text,\n conf=glyph_conf))\n result_it = tessapi.GetIterator()\n if not result_it or result_it.Empty(RIL.SYMBOL):\n self.logger.error(\"No text in glyph '%s'\", glyph.id)\n continue\n choice_it = result_it.GetChoiceIterator()\n for choice_no, choice in enumerate(choice_it, 1):\n alternative_text = choice.GetUTF8Text()\n alternative_conf = choice.Confidence()/100\n if alternative_text == glyph_text:\n continue\n #self.logger.debug('alternative glyph: \"%s\" [%f]', alternative_text, alternative_conf)\n if (glyph_conf - alternative_conf > CHOICE_THRESHOLD_CONF or\n choice_no > CHOICE_THRESHOLD_NUM):\n break\n # todo: consider SymbolIsSuperscript (TextStyle), SymbolIsDropcap (RelationType) etc\n glyph.add_TextEquiv(TextEquivType(\n index=choice_no,\n Unicode=alternative_text,\n conf=alternative_conf))\n \n def _add_orientation(self, result_it, region, coords):\n # Tesseract layout analysis already rotates the image, even for each\n # sub-segment (depending on RIL).\n # (These images can be queried via GetBinaryImage/GetImage, cf. segment_region)\n # Unfortunately, it does _not_ use expand=True, but chops off corners.\n # So the accuracy is not as good as setting the image to the sub-segments and\n # running without iterator. But there are other reasons to do all-in-one\n # segmentation (like overlaps), and its up to the user now.\n # Here we don't know whether the iterator will be used or the created PAGE segments.\n # For the latter case at least, we must annotate the angle, so the segment image\n # can be rotated before the next step.\n orientation, writing_direction, textline_order, deskew_angle = result_it.Orientation()\n # defined as 'how many radians does one have to rotate the block anti-clockwise'\n # i.e. positive amount to be applied counter-clockwise for deskewing:\n deskew_angle *= 180 / math.pi\n self.logger.debug('orientation/deskewing for %s: %s / %s / %s / %.3f°', region.id,\n membername(Orientation, orientation),\n membername(WritingDirection, writing_direction),\n membername(TextlineOrder, textline_order),\n deskew_angle)\n # defined as 'the amount of clockwise rotation to be applied to the input image'\n # i.e. the negative amount to be applied counter-clockwise for deskewing:\n # (as defined in Tesseract OrientationIdToValue):\n angle = {\n Orientation.PAGE_RIGHT: 90,\n Orientation.PAGE_DOWN: 180,\n Orientation.PAGE_LEFT: 270\n }.get(orientation, 0)\n # annotate result:\n angle += deskew_angle\n # get deskewing (w.r.t. top image) already applied to image\n angle0 = coords['angle']\n # page angle: PAGE @orientation is defined clockwise,\n # whereas PIL/ndimage rotation is in mathematical direction:\n orientation = -(angle + angle0)\n orientation = 180 - (180 - orientation) % 360 # map to [-179.999,180]\n region.set_orientation(orientation)\n if isinstance(region, TextRegionType):\n region.set_readingDirection({\n WritingDirection.LEFT_TO_RIGHT: 'left-to-right',\n WritingDirection.RIGHT_TO_LEFT: 'right-to-left',\n WritingDirection.TOP_TO_BOTTOM: 'top-to-bottom'\n }.get(writing_direction, 'bottom-to-top'))\n region.set_textLineOrder({\n TextlineOrder.LEFT_TO_RIGHT: 'left-to-right',\n TextlineOrder.RIGHT_TO_LEFT: 'right-to-left',\n TextlineOrder.TOP_TO_BOTTOM: 'top-to-bottom'\n }.get(textline_order, 'bottom-to-top'))\n \n def _reinit(self, tessapi, segment, mapping):\n \"\"\"Reset Tesseract API to initial state, and apply API-level settings for the given segment.\n \n If ``xpath_parameters`` is used, try each XPath expression against ``segment``,\n and in case of a match, apply given parameters, respectively.\n \n If ``xpath_model`` is used, try each XPath expression against ``segment``,\n and in case of a match, load the given language/model, respectively.\n \n If ``auto_model`` is used, and no ``xpath_model`` was applied yet,\n try each given language/model individually on ``segment``, compare\n their confidences, and load the best-scoring language/model.\n \n Before returning, store all previous settings (to catch by the next call).\n \"\"\"\n # Tesseract API is stateful but does not allow copy constructors\n # for segment-by-segment configuration we therefore need to\n # re-initialize the API with the currently loaded settings,\n # and add some custom choices\n node = mapping.get(id(segment), None)\n tag = segment.__class__.__name__[:-4]\n if hasattr(segment, 'id'):\n at_ident = 'id'\n else:\n at_ident = 'imageFilename'\n ident = getattr(segment, at_ident)\n with tessapi:\n # apply temporary changes\n if self.parameter['xpath_parameters']:\n if node is not None and node.attrib.get(at_ident, None) == ident:\n ns = {'re': 'http://exslt.org/regular-expressions',\n 'pc': node.nsmap[node.prefix],\n node.prefix: node.nsmap[node.prefix]}\n for xpath, params in self.parameter['xpath_parameters'].items():\n if node.xpath(xpath, namespaces=ns):\n self.logger.info(\"Found '%s' in '%s', setting '%s'\",\n xpath, ident, params)\n for name, val in params.items():\n tessapi.SetVariable(name, val)\n else:\n self.logger.error(\"Cannot find segment '%s' in etree mapping, \" \\\n \"ignoring xpath_parameters\", ident)\n if self.parameter['xpath_model']:\n if node is not None and node.attrib.get(at_ident, None) == ident:\n ns = {'re': 'http://exslt.org/regular-expressions',\n 'pc': node.nsmap[node.prefix],\n node.prefix: node.nsmap[node.prefix]}\n models = []\n for xpath, model in self.parameter['xpath_model'].items():\n if node.xpath(xpath, namespaces=ns):\n self.logger.info(\"Found '%s' in '%s', reloading with '%s'\",\n xpath, ident, model)\n models.append(model)\n if models:\n model = '+'.join(models)\n self.logger.debug(\"Reloading model '%s' for %s '%s'\", model, tag, ident)\n tessapi.Reset(lang=model)\n return\n else:\n self.logger.error(\"Cannot find segment '%s' in etree mapping, \" \\\n \"ignoring xpath_model\", ident)\n if self.parameter['auto_model']:\n models = self.parameter['model'].split('+')\n if len(models) > 1:\n confs = list()\n for model in models:\n tessapi.Reset(lang=model)\n tessapi.Recognize()\n confs.append(tessapi.MeanTextConf())\n model = models[np.argmax(confs)]\n self.logger.debug(\"Reloading best model '%s' for %s '%s'\", model, tag, ident)\n tessapi.Reset(lang=model)\n return\n if self.parameter['xpath_model'] or self.parameter['auto_model']:\n # default: undo all settings from previous calls (reset to init-state)\n tessapi.Reset()\n\ndef page_element_unicode0(element):\n \"\"\"Get Unicode string of the first text result.\"\"\"\n if element.get_TextEquiv():\n return element.get_TextEquiv()[0].Unicode or ''\n else:\n return ''\n\ndef page_element_conf0(element):\n \"\"\"Get confidence (as float value) of the first text result.\"\"\"\n if element.get_TextEquiv():\n # generateDS does not convert simpleType for attributes (yet?)\n return float(element.get_TextEquiv()[0].conf or \"1.0\")\n return 1.0\n\ndef page_get_reading_order(ro, rogroup):\n \"\"\"Add all elements from the given reading order group to the given dictionary.\n \n Given a dict ``ro`` from layout element IDs to ReadingOrder element objects,\n and an object ``rogroup`` with additional ReadingOrder element objects,\n add all references to the dict, traversing the group recursively.\n \"\"\"\n regionrefs = list()\n if isinstance(rogroup, (OrderedGroupType, OrderedGroupIndexedType)):\n regionrefs = (rogroup.get_RegionRefIndexed() +\n rogroup.get_OrderedGroupIndexed() +\n rogroup.get_UnorderedGroupIndexed())\n if isinstance(rogroup, (UnorderedGroupType, UnorderedGroupIndexedType)):\n regionrefs = (rogroup.get_RegionRef() +\n rogroup.get_OrderedGroup() +\n rogroup.get_UnorderedGroup())\n for elem in regionrefs:\n ro[elem.get_regionRef()] = elem\n if not isinstance(elem, (RegionRefType, RegionRefIndexedType)):\n page_get_reading_order(ro, elem)\n \ndef page_update_higher_textequiv_levels(level, pcgts, overwrite=True):\n \"\"\"Update the TextEquivs of all PAGE-XML hierarchy levels above ``level`` for consistency.\n \n Starting with the lowest hierarchy level chosen for processing,\n join all first TextEquiv.Unicode (by the rules governing the respective level)\n into TextEquiv.Unicode of the next higher level, replacing them.\n If ``overwrite`` is false and the higher level already has text, keep it.\n \n When two successive elements appear in a ``Relation`` of type ``join``,\n then join them directly (without their respective white space).\n \n Likewise, average all first TextEquiv.conf into TextEquiv.conf of the next higher level.\n \n In the process, traverse the words and lines in their respective ``readingDirection``,\n the (text) regions which contain lines in their respective ``textLineOrder``, and\n the (text) regions which contain text regions in their ``ReadingOrder``\n (if they appear there as an ``OrderedGroup``).\n Where no direction/order can be found, use XML ordering.\n \n Follow regions recursively, but make sure to traverse them in a depth-first strategy.\n \"\"\"\n page = pcgts.get_Page()\n relations = page.get_Relations() # get RelationsType\n if relations:\n relations = relations.get_Relation() # get list of RelationType\n else:\n relations = []\n joins = list() # \n for relation in relations:\n if relation.get_type() == 'join': # ignore 'link' type here\n joins.append((relation.get_SourceRegionRef().get_regionRef(),\n relation.get_TargetRegionRef().get_regionRef()))\n reading_order = dict()\n ro = page.get_ReadingOrder()\n if ro:\n page_get_reading_order(reading_order, ro.get_OrderedGroup() or ro.get_UnorderedGroup())\n if level != 'region':\n for region in page.get_AllRegions(classes=['Text']):\n # order is important here, because regions can be recursive,\n # and we want to concatenate by depth first;\n # typical recursion structures would be:\n # - TextRegion/@type=paragraph inside TextRegion\n # - TextRegion/@type=drop-capital followed by TextRegion/@type=paragraph inside TextRegion\n # - any region (including TableRegion or TextRegion) inside a TextRegion/@type=footnote\n # - TextRegion inside TableRegion\n subregions = region.get_TextRegion()\n if subregions: # already visited in earlier iterations\n # do we have a reading order for these?\n # TODO: what if at least some of the subregions are in reading_order?\n if (all(subregion.id in reading_order for subregion in subregions) and\n isinstance(reading_order[subregions[0].id], # all have .index?\n (OrderedGroupType, OrderedGroupIndexedType))):\n subregions = sorted(subregions, key=lambda subregion:\n reading_order[subregion.id].index)\n region_unicode = page_element_unicode0(subregions[0])\n for subregion, next_subregion in zip(subregions, subregions[1:]):\n if (subregion.id, next_subregion.id) not in joins:\n region_unicode += '\\n' # or '\\f'?\n region_unicode += page_element_unicode0(next_subregion)\n region_conf = sum(page_element_conf0(subregion) for subregion in subregions)\n region_conf /= len(subregions)\n else: # TODO: what if a TextRegion has both TextLine and TextRegion children?\n lines = region.get_TextLine()\n if ((region.get_textLineOrder() or\n page.get_textLineOrder()) ==\n TextLineOrderSimpleType.BOTTOMTOTOP):\n lines = list(reversed(lines))\n if level != 'line':\n for line in lines:\n words = line.get_Word()\n if ((line.get_readingDirection() or\n region.get_readingDirection() or\n page.get_readingDirection()) ==\n ReadingDirectionSimpleType.RIGHTTOLEFT):\n words = list(reversed(words))\n if level != 'word':\n for word in words:\n glyphs = word.get_Glyph()\n if ((word.get_readingDirection() or\n line.get_readingDirection() or\n region.get_readingDirection() or\n page.get_readingDirection()) ==\n ReadingDirectionSimpleType.RIGHTTOLEFT):\n glyphs = list(reversed(glyphs))\n word_unicode = ''.join(page_element_unicode0(glyph) for glyph in glyphs)\n word_conf = sum(page_element_conf0(glyph) for glyph in glyphs)\n if glyphs:\n word_conf /= len(glyphs)\n if not word.get_TextEquiv() or overwrite:\n word.set_TextEquiv( # replace old, if any\n [TextEquivType(Unicode=word_unicode, conf=word_conf)])\n line_unicode = ' '.join(page_element_unicode0(word) for word in words)\n line_conf = sum(page_element_conf0(word) for word in words)\n if words:\n line_conf /= len(words)\n if not line.get_TextEquiv() or overwrite:\n line.set_TextEquiv( # replace old, if any\n [TextEquivType(Unicode=line_unicode, conf=line_conf)])\n region_unicode = ''\n region_conf = 0\n if lines:\n region_unicode = page_element_unicode0(lines[0])\n for line, next_line in zip(lines, lines[1:]):\n words = line.get_Word()\n next_words = next_line.get_Word()\n if not(words and next_words and (words[-1].id, next_words[0].id) in joins):\n region_unicode += '\\n'\n region_unicode += page_element_unicode0(next_line)\n region_conf = sum(page_element_conf0(line) for line in lines)\n region_conf /= len(lines)\n if not region.get_TextEquiv() or overwrite:\n region.set_TextEquiv( # replace old, if any\n [TextEquivType(Unicode=region_unicode, conf=region_conf)])\n\ndef page_shrink_higher_coordinate_levels(maxlevel, minlevel, pcgts):\n \"\"\"Project the coordinate hull of all PAGE-XML hierarchy levels above ``minlevel`` up to ``maxlevel``.\n \n Starting with the lowest hierarchy level chosen for processing,\n join all segments into a convex hull for the next higher level,\n replacing the parent coordinates, respectively.\n \n Follow regions recursively, but make sure to traverse them in a depth-first strategy.\n \"\"\"\n LOG = getLogger('processor.TesserocrRecognize')\n page = pcgts.get_Page()\n regions = page.get_AllRegions(classes=['Text'])\n if minlevel != 'region':\n for region in regions:\n lines = region.get_TextLine()\n if minlevel != 'line':\n for line in lines:\n words = line.get_Word()\n if minlevel != 'word':\n for word in words:\n glyphs = word.get_Glyph()\n if maxlevel in ['region', 'line', 'word', 'glyph'] and glyphs:\n joint_polygon = join_segments(glyphs)\n LOG.debug(\"setting hull for word '%s' from %d vertices\",\n word.id, len(joint_polygon))\n word.get_Coords().set_points(points_from_polygon(joint_polygon))\n if maxlevel in ['region', 'line', 'word'] and words:\n joint_polygon = join_segments(words)\n LOG.debug(\"setting hull for line '%s' from %d vertices\",\n line.id, len(joint_polygon))\n line.get_Coords().set_points(points_from_polygon(joint_polygon))\n if maxlevel in ['region', 'line'] and lines:\n joint_polygon = join_segments(lines)\n LOG.debug(\"setting hull for region '%s' from %d vertices\",\n region.id, len(joint_polygon))\n region.get_Coords().set_points(points_from_polygon(joint_polygon))\n\ndef join_segments(segments):\n return join_polygons([polygon_from_points(segment.get_Coords().points)\n for segment in segments])\n\ndef join_polygons(polygons, extend=2):\n # FIXME: construct concave hull / alpha shape\n jointp = unary_union([make_valid(Polygon(polygon)).buffer(extend)\n for polygon in polygons]).convex_hull\n if jointp.minimum_clearance < 1.0:\n # follow-up calculations will necessarily be integer;\n # so anticipate rounding here and then ensure validity\n jointp = asPolygon(np.round(jointp.exterior.coords))\n jointp = make_valid(jointp)\n return jointp.exterior.coords[:-1]\n\ndef pad_image(image, padding):\n # TODO: input padding can create extra edges if not binarized; at least try to smooth\n stat = ImageStat.Stat(image)\n # workaround for Pillow#4925\n if len(stat.bands) > 1:\n background = tuple(stat.median)\n else:\n background = stat.median[0]\n padded = Image.new(image.mode,\n (image.width + 2 * padding,\n image.height + 2 * padding),\n background)\n padded.paste(image, (padding, padding))\n return padded\n\ndef polygon_for_parent(polygon, parent):\n \"\"\"Clip polygon to parent polygon range.\n \n (Should be moved to ocrd_utils.coordinates_for_segment.)\n \"\"\"\n childp = Polygon(polygon)\n if isinstance(parent, PageType):\n if parent.get_Border():\n parentp = Polygon(polygon_from_points(parent.get_Border().get_Coords().points))\n else:\n parentp = Polygon([[0, 0], [0, parent.get_imageHeight()],\n [parent.get_imageWidth(), parent.get_imageHeight()],\n [parent.get_imageWidth(), 0]])\n else:\n parentp = Polygon(polygon_from_points(parent.get_Coords().points))\n # ensure input coords have valid paths (without self-intersection)\n # (this can happen when shapes valid in floating point are rounded)\n childp = make_valid(childp)\n parentp = make_valid(parentp)\n if not childp.is_valid:\n return None\n if not parentp.is_valid:\n return None\n # check if clipping is necessary\n if childp.within(parentp):\n return childp.exterior.coords[:-1]\n # clip to parent\n interp = childp.intersection(parentp)\n if interp.is_empty or interp.area == 0.0:\n # this happens if Tesseract \"finds\" something\n # outside of the valid Border of a deskewed/cropped page\n # (empty corners created by masking); will be ignored\n return None\n if interp.type == 'GeometryCollection':\n # heterogeneous result: filter zero-area shapes (LineString, Point)\n interp = unary_union([geom for geom in interp.geoms if geom.area > 0])\n if interp.type == 'MultiPolygon':\n # homogeneous result: construct convex hull to connect\n # FIXME: construct concave hull / alpha shape\n interp = interp.convex_hull\n if interp.minimum_clearance < 1.0:\n # follow-up calculations will necessarily be integer;\n # so anticipate rounding here and then ensure validity\n interp = asPolygon(np.round(interp.exterior.coords))\n interp = make_valid(interp)\n return interp.exterior.coords[:-1] # keep open\n\ndef make_valid(polygon):\n for split in range(1, len(polygon.exterior.coords)-1):\n if polygon.is_valid or polygon.simplify(polygon.area).is_valid:\n break\n # simplification may not be possible (at all) due to ordering\n # in that case, try another starting point\n polygon = Polygon(polygon.exterior.coords[-split:]+polygon.exterior.coords[:-split])\n for tolerance in range(1, int(polygon.area)):\n if polygon.is_valid:\n break\n # simplification may require a larger tolerance\n polygon = polygon.simplify(tolerance)\n return polygon\n\ndef iterate_level(it, ril, parent=None):\n LOG = getLogger('processor.TesserocrRecognize')\n # improves over tesserocr.iterate_level by\n # honouring multi-level semantics so iterators\n # can be combined across levels\n if parent is None:\n parent = ril - 1\n pos = 0\n while it and not it.Empty(ril):\n yield it\n # With upstream Tesseract, these assertions may fail:\n # if ril > 0 and it.IsAtFinalElement(parent, ril):\n # for level in range(parent, ril):\n # assert it.IsAtFinalElement(parent, level), \\\n # \"level %d iterator at %d is final w.r.t. %d but level %d is not\" % (\n # ril, pos, parent, level)\n # Hence the following workaround avails itself:\n if ril > 0 and all(it.IsAtFinalElement(parent, level)\n for level in range(parent, ril + 1)):\n break\n if not it.Next(ril):\n break\n while it.Empty(ril) and not it.Empty(0):\n # This happens when\n # - on RIL.PARA, RIL.TEXTLINE and RIL.WORD,\n # empty non-text (pseudo-) blocks intervene\n # - on RIL.SYMBOL, a word has no cblobs at all\n # (because they have all been rejected)\n # We must _not_ yield these (as they have strange\n # properties and bboxes). But most importantly,\n # they will have met IsAtFinalElement prematurely\n # (hence the similar loop above).\n # Since this may happen multiple consecutive times,\n # enclose this in a while loop.\n LOG.warning(\"level %d iterator at %d needs to skip empty segment\",\n ril, pos)\n if not it.Next(ril):\n break\n pos += 1\n" ]
[ [ "numpy.round", "numpy.argmax" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
colinsongf/opinion_reader_bert
[ "ec036fb27161ba454b865e387ed601fe10f57b28" ]
[ "script/test.py" ]
[ "import tensorflow as tf\n\nprint(tf.__version__)\n\nwith tf.device('/device:GPU:0'):\n a = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[2, 3], name='a')\n b = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[3, 2], name='b')\n sess = tf.Session(config=tf.ConfigProto(log_device_placement=True))\n" ]
[ [ "tensorflow.ConfigProto", "tensorflow.device", "tensorflow.constant" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.5", "1.7", "0.12", "1.0", "1.2" ] } ]
mansueto-institute/Urban-Growth-Emergent-Statistics
[ "429f24f7c6c4fdd229b9c9f818197f56910b466d", "429f24f7c6c4fdd229b9c9f818197f56910b466d" ]
[ "Fig4C.py", "Fig2C.py" ]
[ "import matplotlib.pyplot as plt\nimport numpy as np\nimport matplotlib.mlab as mlab\nimport scipy.stats as stats\n\nbbar=0.2\nabar=0.1\n\nK1=2. # This is like Hook's constant or the curvature of the potential that keeps the noise localized \n#K0=5.\nK0=(4.*K1)**0.5 # This is the damping\nK2=0. # This is like a mass correction. \nmu=0.\nvar = 0.0001\nsigma= var**0.5\n\nv_error=[]\nv_PV=[]\n\n\nEND=100\n#initializations\nDt=.2\np_error=0.\nset_point=0.1\nPV=set_point\noutput = 0.\nintegral=0.\nq=0.\np=0.\nerror=0.\nperror=0.\nold_v=0.\nPPV=0.\n\n\nv_set=[]\nv_q=[]\n\n\nvec_v=[]\nvec_u=[]\n\ntime_v=[]\na=[]\nb=[]\n\n\nfor i in range(1, END):\n time_v.append(Dt*i)\n\n#### this is the process, or income part of the dynamics, \n s = np.random.normal(mu, sigma, 1)\n v = 0.05*np.sin(i*Dt/1.) + s[0]/Dt**0.5\n v=s[0]/Dt**0.5\n dv=v\n #(v-old_v)\n b.append(1.1 +v)\n vec_v.append(v)\n \n#### This computes the PID control u\n \n integral = integral + error * Dt\n# derivative = (error - p_error) / Dt\n \n u = K0*error + K1*integral\n #+ K2*derivative\n\n PV=PV + Dt*u - Dt*dv # this is b-a, which fluctuates around the set point \n error = set_point - PV # thus the error is measured relative to the set point.\n\n# p_error=error # this just updates the error for the derivative term.\n\n v_PV.append(PV)\n v_set.append(set_point)\n a.append(1.0 +u) # this is the cost, it has a mean value plus control.\n\n \n#### This is the stochastic system for the error = q \n\n q = q + Dt*p\n p = p - Dt*(K1*q+K0*p) + Dt*dv # this is the stochastic system we should be getting ...\n v_q.append(q)\n# vec_u.append(p+dv)\n#######\n\n v_error.append(error)\n old_v=v\n\n\nfig, ax = plt.subplots()\nee=[]\nfor i in range(len(b)):\n ee.append(b[i]-a[i])\n\nplt.plot(time_v,ee,'g-')\nax.axhspan(0.1,0.1, alpha=0.7, color='green')\nplt.ylabel(r'${\\bar \\eta}, \\epsilon(t)$',fontsize=20)\nplt.xlabel('time',fontsize=20)\nplt.tight_layout()\nplt.savefig('Fig4C.pdf')\nplt.show()\n", "import numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.mlab as mlab\nimport csv\nfrom matplotlib import cm\nimport matplotlib.colors as colors\nfrom colorsys import hsv_to_rgb\nfrom matplotlib import pylab\n\ndef linreg(X, Y):\n \"\"\"\n Summary\n Linear regression of y = ax + b\n Usage\n real, real, real = linreg(list, list)\n Returns coefficients to the regression line \"y=ax+b\" from x[] and y[], and R^2 Value\n \"\"\"\n if len(X) != len(Y): raise ValueError(\"unequal length\")\n N = len(X)\n Sx = Sy = Sxx = Syy = Sxy = 0.0\n for x, y in zip(X, Y):\n Sx = Sx + x\n Sy = Sy + y\n Sxx = Sxx + x*x\n Syy = Syy + y*y\n Sxy = Sxy + x*y\n det = Sxx * N - Sx * Sx\n a, b = (Sxy * N - Sy * Sx)/det, (Sxx * Sy - Sx * Sxy)/det\n meanerror = residual = 0.0\n for x, y in zip(X, Y):\n meanerror = meanerror + (y - Sy/N)**2\n residual = residual + (y - a * x - b)**2\n RR = 1 - residual/meanerror\n ss = residual / (N-2)\n Var_a, Var_b = ss * N / det, ss * Sxx / det\n return a, b, RR, Var_a, Var_b\n\n\ndef TwoGaussianFit(bbins, mu, sigma, n):\n\n dimens=100.\n bestfactor=2.8\n bestmix=0.14\n sssum=100.\n\n for jj in range(int(dimens)):\n factor =1.2+(jj)/dimens*4.0\n for hh in range(int(dimens)):\n mix=0.03+(hh)/dimens*0.5\n\n y = mlab.normpdf(bbins, mu, sigma)\n ssigma=sigma/factor\n yy= mlab.normpdf(bbins, mu, ssigma)\n\n norm=0.\n ndist=[]\n for j in range(len(y)):\n ndist.append(y[j]+mix*yy[j])\n norm+=y[j]+mix*yy[j]\n ndist=ndist/norm/(bbins[1]-bbins[0])\n\n\n ssum=0.\n dd=[]\n for j in range(len(bbins)):\n dd.append(n[j]-ndist[j])\n ssum+=abs(n[j]-ndist[j])\n\n if (ssum< sssum):\n sssum=ssum\n bestfactor=factor\n bestmix=mix\n \n return sssum, bestfactor, bestmix\n\n\n\n# 1) Scaling parameters and SAMIs\n\navx=[]# all cities, all years\navy=[]\nxx_tot=[]\nyy_tot=[]\nlabel=[]\n\ngradients=[]\npops=[]\nintercepts=[]\nmean_log_pop=[]\nmean_log_wages=[]\n\nyear=[]\n\nw, h =47, 382 \nSami = [[0 for x in range(w)] for y in range(h)]\nPops = [[0 for x in range(w)] for y in range(h)]\nWag = [[0 for x in range(w)] for y in range(h)]\n\ncity1=[]\n\nfor yr in range(1969,2016):\n count=0\n ii=yr-1967\n f=open('wages.csv', 'r')\n wreader=csv.reader(f,delimiter=',')\n code=[]\n city=[]\n wages=[]\n name=[]\n for row in wreader:\n if (count>5 and count<388):\n name.append(row[1])\n code.append(row[0])\n wages.append(float(row[ii])) # all cities year by year\n city.append(row[1])\n count+=1\n f.close()\n\n pop=[]\n for i in range(len(code)):\n pop.append(0.)\n count=0\n g=open('population.csv', 'r')\n preader=csv.reader(g,delimiter=',')\n for row in preader:\n if (count>5 and count<388):\n for i in range(len(code)):\n if (code[i]==row[0]):\n pop[i]=float(row[ii])\n count+=1\n g.close()\n\n poplog=np.log(pop)\n wageslog=np.log(wages)\n\n xx=poplog\n yy=wageslog\n\n xx_av=np.mean(xx)\n yy_av=np.mean(yy)\n\n # making best fit\n gradient, intercept, r_value, var_gr, var_it = linreg(xx,yy)\n \n gradients.append(gradient) # beta(t)\n intercepts.append(intercept) # Y0(t)\n mean_log_pop.append(xx_av) # < ln N >(t) center N\n mean_log_wages.append(yy_av) # < ln Y >(t) center Y\n\n res=[]\n for i in range(0,len(xx)):\n res.append( yy[i] - (intercept + gradient*xx[i]))\n\n sigma = np.std(res) # ensemble average of residuals\n mu = np.mean(res)\n year.append(yr)\n \n for jj in range(len(xx)):\n Sami[jj][ii-2]=res[jj]\n Pops[jj][ii-2]=pop[jj]\n Wag[jj][ii-2]=wages[jj]\n\n\nw, h =46, 382 \nDeltaLogW= [[0 for x in range(w)] for y in range(h)]\nDeltaLogP= [[0 for x in range(w)] for y in range(h)]\n\nw_eta=[]\nw_sigma=[]\np_eta=[]\np_sigma=[]\n\n\nfor jj in range(len(xx)): # This computes temporal growth rates and volatilities from time series for Pop and Wages \n#for jj in range(1):\n S=[]\n T=[]\n for i in range(47):\n S.append(Wag[jj][i])\n T.append(Pops[jj][i])\n vr = np.log(S) # logs of wages\n vt = np.log(T)\n \n #print(S)\n #print(vr)\n \n r=np.diff(vr) # This is the difference of the logs of WAGES\n rr=np.diff(vt) # This is the difference of the logs of POP\n\n for ii in range(len(r)):\n DeltaLogW[jj][ii]=r[ii]\n DeltaLogP[jj][ii]=rr[ii]\n\n # Compute the TEMPORAL averages \n esigma = np.std(r) # This is the standard deviation of the growth rate of WAGES : sqrt of volatility\n emu = np.mean(r)+0.5*esigma*esigma # This is the (temporal) mean returns for WAGES\n \n tsigma= np.std(rr) # This is the standard deviation of the growth rate of POP : sqrt of volatility\n tmu = np.mean(rr)+0.5*tsigma*tsigma # This is the standard deviation of the growth rate of POP : sqrt of volatility\n\n w_eta.append(emu) \n w_sigma.append(esigma)\n #if ( esigma**2>0.005 ):\n # print(city[jj],esigma**2)\n p_eta.append(tmu)\n p_sigma.append(tsigma)\n\n if (emu<0.03):\n print(jj,city[jj],'emu=',emu, tmu, 'esigma=',esigma, tsigma)\n\nprint ('')\ngamma_w=[]\ngamma_p=[]\nss=0.\nsss=[]\nfor ii in range(46): # This computes the ensemble averages (over cities) for each time.\n g = 0.\n p = 0.\n ct=0\n gg=[]\n for j in range (382):\n g+=DeltaLogW[j][ii] # This is the ensemble average of the growth rate of WAGES\n p+=DeltaLogP[j][ii] ## This is the ensemble average of the growth rate of POPULATION\n gg.append(DeltaLogW[j][ii])\n ct+=1\n sigmagg=np.std(gg)\n sss.append(np.std(gg)**2)\n p = p/float(ct)\n g = g/float(ct)\n\n gamma_w.append(g)\n gamma_p.append(p)\n\nss_var = np.std(sss)**2\nss=np.mean(sss)\n\n\n######## Figures\n\n\n#### Figure BM2 #####\n#fig, ax = plt.subplots()\n\nxx=[]\nyear=[]\naux2=0.\n\nfor ii in range(len(r)): # over time\n \n year.append(1969+ii)\n aux=0.\n count=0\n \n for jj in range(382): # for each city\n aux+=(Sami[jj][ii]-Sami[jj][0] )**2\n #aux+=(DeltaLogW[jj][ii]-gamma_w[ii]) - gradients[ii]*(DeltaLogP[jj][ii] -gamma_p[ii])\n aa=0.\n for iii in range(ii):\n aa+=(DeltaLogW[jj][iii]-gamma_w[iii]) - gradients[iii]*(DeltaLogP[jj][iii] -gamma_p[iii])\n #((DeltaLogW[jj][ii]-gamma_w[ii]) - gradients[ii]*(DeltaLogP[jj][ii] -gamma_p[ii]))**2\n #print(ii,aa)\n #aux+=aa**2\n count+=1\n #print(aux)\n aux2=aux/float(count)\n \n xx.append(aux2) \n\n\n#ax.axvspan(1969.91667, 1970.8333, alpha=0.3, color='grey')\n#ax.axvspan(1973.8333, 1975.167, alpha=0.3, color='grey')\n#ax.axvspan(1969+11.0, 1969+11.5, alpha=0.3, color='grey')\n#ax.axvspan(1969+12.5, 1969+13.8333, alpha=0.3, color='grey')\n#ax.axvspan(1969+21.5, 1969+22.167, alpha=0.3, color='grey')\n#ax.axvspan(1969+32.167, 1969+32.8333, alpha=0.3, color='grey')\n#ax.axvspan(1969+38.8333, 1969+40.416667, alpha=0.3, color='grey')\n\n#xxlog=np.log(xx)\n\n# global best fit\ngradient, intercept, r_value, var_gr, var_it = linreg(year,xx)\n#print( \"Gradient=\", gradient, \", 95 % CI = [\",gradient- 2.*np.sqrt(var_gr),\",\",gradient+2.*np.sqrt(var_gr),\"]\")\n#print(\"intercept=\", intercept, \", 95 % CI = [\",intercept- 2.*np.sqrt(var_it),\",\",intercept+2.*np.sqrt(var_it),\"]\")\n#print(\"R-squared\", r_value**2)\n\ntt=year\ntt.sort()\nfitx=np.arange(float(tt[0])-0.1,float(tt[-1])+0.1,0.1,dtype=float)\nfity=intercept + fitx*gradient\n#plt.plot(fitx,fity,'-', c='black', linewidth=2, alpha=0.8,label=r'$\\beta=??$, $r^2=??$, p-value $<1.e^{-20}$')\n\n\n\n# local best fits\nyear1=[]\nxx1=[]\nfor ii in range(18):\n year1.append(year[ii])\n xx1.append(xx[ii])\n\ngradient, intercept, r_value, var_gr, var_it = linreg(year1,xx1)\n#print( \"Gradient=\", gradient, \", 95 % CI = [\",gradient- 2.*np.sqrt(var_gr),\",\",gradient+2.*np.sqrt(var_gr),\"]\")\n#print(\"intercept=\", intercept, \", 95 % CI = [\",intercept- 2.*np.sqrt(var_it),\",\",intercept+2.*np.sqrt(var_it),\"]\")\n#print(\"R-squared\", r_value**2)\n\n\ntt=year1\ntt.sort()\nfitx=np.arange(float(tt[0])-5,float(tt[-1])+5,0.1,dtype=float)\nfity=intercept + fitx*gradient\n#plt.plot(fitx,fity,'-', c='red', linewidth=2, alpha=0.8,label=r'$\\beta=??$, $r^2=??$, p-value $<1.e^{-20}$')\n\n\nyear2=[]\nxx2=[]\nfor ii in range(22,len(r)):\n year2.append(year[ii])\n xx2.append(xx[ii])\n\ngradient, intercept, r_value, var_gr, var_it = linreg(year2,xx2)\n#print( \"Gradient=\", gradient, \", 95 % CI = [\",gradient- 2.*np.sqrt(var_gr),\",\",gradient+2.*np.sqrt(var_gr),\"]\")\n#print(\"intercept=\", intercept, \", 95 % CI = [\",intercept- 2.*np.sqrt(var_it),\",\",intercept+2.*np.sqrt(var_it),\"]\")\n#print(\"R-squared\", r_value**2)\n\n\ntt=year2\ntt.sort()\nfitx=np.arange(float(tt[0])-9,float(tt[-1])+5,0.1,dtype=float)\nfity=intercept + fitx*gradient\n#plt.plot(fitx,fity,'-', c='red', linewidth=2, alpha=0.8,label=r'$\\beta=??$, $r^2=??$, p-value $<1.e^{-20}$')\n\n\n#plt.plot(year,xx,'bo',ms=8, alpha=0.4)\n\n#plt.ylabel(r'$\\langle \\Delta_i^2 \\rangle$',fontsize=20)\n#plt.xlabel('Year',fontsize=20)\n#plt.tight_layout()\n#plt.xlim(1968,2016)\n#plt.ylim(0.,0.0)\n#plt.savefig('Mean_Square_Displacement_Wages_Total.png', format='png', dpi=1200)\n#plt.show()\n\n\n\n\n\n\n\n\n#### Figure BM2 #####\n#plt.clf()\n#fig, ax = plt.subplots()\n\nfor jj in range(382): # for each city \n year=[]\n yydelta=[]\n aux2=0.\n \n for ii in range(len(r)): # over time\n aux2= Sami[jj][ii]-Sami[jj][0]\n \n yydelta.append(aux2) # this is DELTA SAMIs IN TIME\n \n year.append(1970+ii)\n# plt.plot(year,yydelta,'-',alpha=0.4)\n\nmean_trajp=[]\nmean_trajm=[]\n\nfor j in range(len(r)):\n mean_trajp.append(np.sqrt(0.00120544646369 +0.00108420769819*j))\n mean_trajm.append(-np.sqrt(0.00120544646369 +0.00108420769819*j))\n \n#plt.plot(year,mean_trajp,'r-',linewidth=3)\n#plt.plot(year,mean_trajm,'r-',linewidth=3)\n\n\n#plt.plot((1969, 2016), (0., 0.), 'k-')\n\n#plt.xlabel('Year',fontsize=20)\n#plt.ylabel(r'$t \\Delta_i(t)$',fontsize=20)\n\n#plt.xlim(1970,2015)\n#plt.ylim(-1.,1.)\n#plt.tight_layout()\n#plt.savefig('Fig5B.pdf')\n#plt.show()\n\n\n\n\n\n\n### Figure variance prediction ###\n\n#plt.clf()\n#fig, ax = plt.subplots()\n\nsigmas=[]\nav_sigma=0.\nfor jj in range(382):\n sigmas.append(w_sigma[jj]**2)\n av_sigma+=w_sigma[jj]**2\n\n \nav_sigma=av_sigma/float(len(w_sigma))\nsigsig=0.\nfor jj in range(382):\n sigsig+=((av_sigma-sigmas[jj])**2)\nsigsig=sigsig/float(len(w_sigma)-1)\nsigsig=np.sqrt(sigsig)\nprint(av_sigma, sigsig)\n#ax.axhspan(av_sigma-sigsig,av_sigma+sigsig, alpha=0.2, color='grey')\n#ax.axhspan(av_sigma,av_sigma, alpha=1.0, color='blue')\n\nprint('standard deviation',ss,np.sqrt(ss_var))\n#ax.axhspan(ss-np.sqrt(ss_var),ss+np.sqrt(ss_var), alpha=0.2, color='grey') # not right.\n#ax.axhspan(ss,ss, alpha=1.0, color='green')\n\n#ax.axhspan(gradient-np.sqrt(var_gr),gradient-np.sqrt(var_gr), alpha=0.2, color='grey')\n#ax.axhspan(gradient,gradient, alpha=1.0, color='red')\n\n#plt.plot(w_eta,sigmas,'bo',alpha=0.2)\n#plt.ylabel(r'$\\sigma^2_i$',fontsize=20)\n#plt.xlabel(r'${\\bar \\eta}_i$',fontsize=20)\n#plt.tight_layout()\n#plt.savefig('Variance_Prediction.png', format='png', dpi=1200)\n#plt.show()\n\n####### Now histograms of SAMIs to show increase in varience over time ####\n\n#1) Average of SAMIs in time, histogram and best fit as Gaussiana and as sum of Gaussians\n\n# build vector of all SAMIs\n\n#plt.clf()\nfig, ax = plt.subplots()\n\nall_res=[]\nfor i in range(len(year)):\n for jj in range (len(pop)): \n all_res.append(Sami[jj][i])\n\n#all_res=all_res\n#/np.sqrt(len(year))\n\nsigma = np.std(all_res)\nmu = np.mean(all_res)\nnum_bins=60\n\nn, bins, patches = plt.hist(all_res, num_bins, normed=1, facecolor='grey', alpha=0.5) #############\n \nbbins=[]\nfor j in range(len(n)):\n bbins.append( (bins[j]+bins[j+1])/2.)\n\nsssum, bestfactor, bestmix = TwoGaussianFit(bbins, mu, sigma, n) #### this computes the 2 Gaussian fit\n \nprint( 'mean= ',mu, 'sigma= ',sigma, 'second amplitude= ',bestmix, 'sec sigma= ',sigma/bestfactor )\n\ny = mlab.normpdf(bbins, mu, sigma)\nssigma=sigma/bestfactor\nyy= mlab.normpdf(bbins, mu, ssigma)\n \nnorm=0.\nndist=[]\nfor j in range(len(y)):\n ndist.append(y[j]+bestmix*yy[j])\n norm+=y[j]+bestmix*yy[j]\nndist=ndist/norm/(bbins[1]-bbins[0])\nplt.plot(bbins, ndist, 'r--',lw=2)\n\ndd=[]\nfor j in range(len(bbins)):\n dd.append(n[j]-ndist[j])\n\n\nplt.plot(bbins, y, 'b-',lw=3)\nplt.xlim(-1,1)\nplt.ylim(0.,3.5)\nplt.xlabel(r'$\\xi_i(t)$',fontsize=20)\nplt.ylabel('Probability',fontsize=20)\nplt.tight_layout()\nplt.savefig('Fig2C.pdf')\nplt.show()\n" ]
[ [ "matplotlib.pyplot.tight_layout", "matplotlib.pyplot.subplots", "matplotlib.pyplot.savefig", "numpy.sin", "matplotlib.pyplot.plot", "numpy.random.normal", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel" ], [ "numpy.log", "matplotlib.pyplot.tight_layout", "numpy.sqrt", "matplotlib.pyplot.ylim", "matplotlib.pyplot.subplots", "matplotlib.pyplot.savefig", "matplotlib.pyplot.plot", "numpy.std", "matplotlib.pyplot.xlim", "numpy.mean", "numpy.diff", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.show", "matplotlib.pyplot.hist", "matplotlib.mlab.normpdf", "matplotlib.pyplot.ylabel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
warwickdatascience/podcast-animations
[ "b2b477ab6f2fe220715a95bb850c89501ed3d08b" ]
[ "rule_30/rule_30.py" ]
[ "\"\"\"Generate an animation of the cellular automaton Rule 30.\"\"\"\n\nimport json\nimport os\nimport pathlib\nimport shutil\nimport subprocess\nimport tempfile\n\nimport colour\nimport cv2\nimport imageio\nimport numpy as np\nimport scipy.signal as sg\nimport tqdm\n\n# Global parameters\nCONFIG_PATH = 'config/full.json'\nFFMPEG_PATH = '/usr/bin/ffmpeg'\n\n# Load configuration\nwith open(CONFIG_PATH) as f:\n config = json.load(f)\n VIDEO_WIDTH = config['video_width']\n VIDEO_HEIGHT = config['video_height']\n SECS = config['secs']\n FPS = config['fps']\n PIXEL_SIZE = config['pixel_size']\n OUTPUT_PATH = config['output_path']\n # Trade-off speed and temp storage requirements\n COMP_LEVEL = config['comp_level']\n COLOURS = map(colour.Color, config['colours'])\n\n# Constants\nSTATE_WIDTH = VIDEO_WIDTH // PIXEL_SIZE\nSTATE_HEIGHT = VIDEO_HEIGHT // PIXEL_SIZE\nNUM_FRAMES = SECS * FPS\n\n\nclass Rule30:\n \"\"\"A class for generating Rule 30.\"\"\"\n\n neighbours = np.array([[1, 2, 4]], np.uint8)\n kernel = np.array([0, 1, 2, 3, 4, 0, 0, 0])\n colours = np.array([\n list(map(lambda x: round(255 * x), c.rgb)) for c in COLOURS\n ], np.uint8)\n\n def __init__(self, width, height):\n \"\"\"Initialise the Rule 30 generator and set initial state.\n\n Args:\n width (int): State width\n height(int): State height\n \"\"\"\n self.width = width\n self.height = height\n\n self.state = np.zeros((self.height, self.width), np.uint8)\n self.peak_height = 1\n self.state[-1, self.width // 2] = 2\n\n self.rgb = None\n self._update_rgb()\n\n def step(self):\n \"\"\"Update the state and RGB representation.\"\"\"\n self._update_state()\n self._update_rgb()\n \n def _update_state(self):\n \"\"\"Update the state by applying Rule 30.\"\"\"\n conv_row_alive = (self.state[-1, None, :] > 0).astype(np.uint8)\n rule_index = sg.convolve2d(conv_row_alive, self.neighbours,\n mode='same', boundary='wrap')\n new_row = self.kernel[rule_index]\n self.state = np.concatenate((self.state[1:], new_row))\n\n if self.peak_height < self.height:\n self.peak_height += 1\n self.state[-self.peak_height, self.width // 2] = 2\n\n def _update_rgb(self):\n \"\"\"Convert the state to an RGB array.\"\"\"\n self.rgb = self.colours[self.state]\n\n\nclass VideoConverter:\n \"\"\"A class for converting frames of NumPy arrays to a video.\"\"\"\n\n def __init__(self, fps=30):\n \"\"\"Initialise the converter and create a temporary directory.\n\n Args:\n fps (int): Frames per second for the converted video\n \"\"\"\n self.fps = fps\n self.tmp_dir = tempfile.TemporaryDirectory()\n self.curr_frame = 0\n\n def add_frame(self, frame):\n \"\"\"Adds a new frame to the video.\n\n Args:\n frame (uint8 NumPy array of shape: (video_height, video_width, 3))\n Data of the new frame as RGB. All frames must have the same\n dimensions.\n \"\"\"\n frame_path = os.path.join(self.tmp_dir.name, f'{self.curr_frame}.png')\n imageio.imwrite(frame_path, frame, compress_level=COMP_LEVEL)\n self.curr_frame += 1\n\n def write(self, output_path):\n \"\"\"Converts the accumulated frames to video and writes the result.\n\n Args:\n output_path: (string) Path where to save the video file\n \"\"\"\n abs_tmp_dir_path = pathlib.Path(self.tmp_dir.name).absolute()\n abs_output_path = pathlib.Path(output_path).absolute()\n os.makedirs(os.path.dirname(abs_output_path), exist_ok=True)\n if OUTPUT_PATH[-4:] == '.mp4':\n subprocess.call([FFMPEG_PATH,\n '-framerate', f'{self.fps}',\n '-i', f'{abs_tmp_dir_path}/%d.png',\n '-vcodec', 'libx264',\n '-pix_fmt', 'yuv420p',\n # Video quality, lower is better, but zero\n # (lossless) doesn't work.\n '-crf', '1',\n '-y', # overwrite output files without asking\n abs_output_path\n ])\n elif OUTPUT_PATH[-4:] == '.gif':\n subprocess.call([FFMPEG_PATH,\n '-i', f'{abs_tmp_dir_path}/%d.png',\n '-y', # overwrite output files without asking\n abs_output_path\n ])\n else:\n raise NotImplementedError(\n \"filetype not support\"\n )\n self.tmp_dir.cleanup()\n print(f\"Video written to: {abs_output_path}\")\n\n\ndef main():\n converter = VideoConverter(fps=FPS)\n animation = Rule30(STATE_WIDTH, STATE_HEIGHT + 1)\n\n for __ in tqdm.trange(NUM_FRAMES // PIXEL_SIZE,\n desc='Generating frames'):\n small_frame = animation.rgb\n enlarged_frame = cv2.resize(small_frame,\n (VIDEO_WIDTH, VIDEO_HEIGHT + PIXEL_SIZE),\n interpolation=cv2.INTER_NEAREST)\n for i in range(PIXEL_SIZE):\n converter.add_frame(enlarged_frame[i:(-PIXEL_SIZE + i)])\n animation.step()\n\n converter.write(OUTPUT_PATH)\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "numpy.concatenate", "numpy.array", "numpy.zeros", "scipy.signal.convolve2d" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] } ]
tiskw/random-fourier-features
[ "4a12185e44d1f9aba594f8a7569042e73675d6cd", "4a12185e44d1f9aba594f8a7569042e73675d6cd" ]
[ "rfflearn/explainer/permutation.py", "examples/least_square_regression/main_least_square_regression.py" ]
[ "#!/usr/bin/env python3\n#\n# Wrapper functions for the permutation feature importance.\n#\n##################################################### SOURCE START #####################################################\n\n\nimport numpy as np\nimport matplotlib.pyplot as mpl\nimport sklearn.inspection\n\n\n### Calculate permutation importance, and set the feature importance\n### (mean of permutation importance for each trial) as model.feature_importances_.\ndef permutation_feature_importance(model, Xs, ys, **kwargs):\n\n ### Calculate permutation importance.\n permutation_importance = sklearn.inspection.permutation_importance(model, Xs, ys, **kwargs)\n\n ### Calculate the average of permutation importance for each feature and set the average values\n ### as model.feature_importances_ for providing compatible interface with scikit-learn.\n setattr(model, \"feature_importances_\", permutation_importance.importances_mean)\n\n return permutation_importance.importances\n\n\n### Visualize permutation importance as a box diagram.\n### The input arguments are:\n### - permutation_importance: np.array with shape (num_features, num_repeats),\n### - feature_names: list with length num_features,\n### - show: True or False.\ndef permutation_plot(permutation_importances, feature_names, show = True):\n\n ### Sort faetures by the average of permutation order.\n sorted_idx = np.mean(permutation_importances, axis = 1).argsort()\n importances = permutation_importances[sorted_idx].T\n label_names = feature_names[sorted_idx]\n\n ### Plot box diagram.\n mpl.boxplot(importances, labels = label_names, vert = False)\n mpl.xlabel(\"Permutation feature importances (impact on model output)\")\n mpl.grid()\n\n if show: mpl.show()\n\n\n##################################################### SOURCE FINISH ####################################################\n# Author: Tetsuya Ishikawa <[email protected]>\n# vim: expandtab tabstop=4 shiftwidth=4 fdm=marker\n", "#!/usr/bin/env python3\n#\n# This Python script provides an example usage of RFFRegression class which is a class\n# for least square regression using RFF. Interface of RFFRegression is quite close to\n# sklearn.linear_model.LinearRegression.\n#################################### SOURCE START ###################################\n\n\"\"\"\nOverview:\n Train Random Fourier Feature least square regression and plot results.\n\nUsage:\n main_rff_regression.py [--random_type <str>] [--kdim <int>] [--std_kernel <float>]\n [--rtype <str>] [--n_train <int>] [--n_test <int>] [--seed <int>]\n main_rff_regression.py (-h | --help)\n\nOptions:\n --rtype <str> Random matrix type (rff ot orf). [default: rff]\n --kdim <int> Dimention of RFF/ORF. [default: 8]\n --std_kernel <float> Standard deviation of RFF/ORF. [default: 0.5]\n --n_train <int> Number of training data points. [default: 21]\n --n_test <int> Number of test data points. [default: 101]\n --seed <int> Random seed. [default: 111]\n -h, --help Show this message.\n\"\"\"\n\nimport os\nimport sys\n\nimport docopt\nimport numpy as np\nimport matplotlib.pyplot as mpl\n\n### Main procedure\ndef main(args):\n\n ### Fix seed for random fourier feature calclation\n rfflearn.seed(111)\n\n ### Create classifier instance\n if args[\"--rtype\"] == \"rff\": reg = rfflearn.RFFRegression(dim_kernel = args[\"--kdim\"], std_kernel = args[\"--std_kernel\"])\n elif args[\"--rtype\"] == \"orf\": reg = rfflearn.ORFRegression(dim_kernel = args[\"--kdim\"], std_kernel = args[\"--std_kernel\"])\n else : raise RuntimeError(\"Error: 'random_type' must be 'rff' or 'orf'.\")\n\n ### Prepare training data\n with utils.Timer(\"Creating dataset: \"):\n Xs_train = np.linspace(0, 3, args[\"--n_train\"]).reshape((args[\"--n_train\"], 1))\n ys_train = np.sin(Xs_train**2)\n Xs_test = np.linspace(0, 3, args[\"--n_test\"]).reshape((args[\"--n_test\"], 1))\n ys_test = np.sin(Xs_test**2)\n\n ### Train regression with random fourier features\n with utils.Timer(\"Train regressor: \"):\n reg.fit(Xs_train, ys_train)\n\n ### Conduct prediction for the test data\n with utils.Timer(\"Prediction: \"):\n predict = reg.predict(Xs_test)\n\n ### Plot regression results\n mpl.figure(0)\n mpl.title(\"Regression for function y = sin(x^2) with RFF\")\n mpl.xlabel(\"X\")\n mpl.ylabel(\"Y\")\n mpl.plot(Xs_train, ys_train, \"o\")\n mpl.plot(Xs_test, ys_test, \".\")\n mpl.plot(Xs_test, predict, \"-\")\n mpl.legend([\"Training data\", \"Test data\", \"Prediction by RFF regression\"])\n mpl.grid()\n mpl.show()\n\nif __name__ == \"__main__\":\n\n ### Parse input arguments.\n args = docopt.docopt(__doc__)\n\n ### Add path to 'rfflearn/' directory.\n ### The followings are not necessary if you copied 'rfflearn/' to the current\n ### directory or other directory which is included in the Python path.\n current_dir = os.path.dirname(__file__)\n module_path = os.path.join(current_dir, \"../../\")\n sys.path.append(module_path)\n\n import rfflearn.cpu as rfflearn\n import rfflearn.utils as utils\n\n ### Convert all arguments to an appropriate type.\n for k, v in args.items():\n try : args[k] = eval(str(v))\n except: args[k] = str(v)\n\n ### Run main procedure.\n main(args)\n\n#################################### SOURCE FINISH ##################################\n# Author: Tetsuya Ishikawa <[email protected]>\n# vim: expandtab tabstop=4 shiftwidth=4 fdm=marker\n" ]
[ [ "matplotlib.pyplot.boxplot", "numpy.mean", "matplotlib.pyplot.grid", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.show" ], [ "matplotlib.pyplot.legend", "matplotlib.pyplot.title", "numpy.linspace", "matplotlib.pyplot.figure", "numpy.sin", "matplotlib.pyplot.plot", "matplotlib.pyplot.grid", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
lukasbindreiter/lingvo
[ "d6f2e6901fadc8440a9e6222ac54b68a8b6faf02", "d6f2e6901fadc8440a9e6222ac54b68a8b6faf02" ]
[ "lingvo/core/quant_utils.py", "lingvo/tasks/asr/decoder_test.py" ]
[ "# Lint as: python3\n# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Utilities for model quantization.\"\"\"\n\nimport enum\nimport lingvo.compat as tf\nfrom lingvo.core import base_layer\nfrom lingvo.core import hyperparams\nfrom lingvo.core import py_utils\nfrom lingvo.core import summary_utils\nimport numpy as np\n\n\nclass QuantizableLayer(base_layer.BaseLayer):\n \"\"\"A layer that supports various forms of quantization.\n\n It is always safe to extend QuantizableLayer instead of BaseLayer (i.e. at\n the base of layer inheritance hierarchies) if any layer in the hierarchy\n may be quantized. Unless if configured/used, all quantization behavior\n is disabled by default.\n\n Most quantization strategies employed at training time fall into the\n \"fake quantization\" category, where we add various constraints in the\n forward propagation to quantify and simulate the effect of quantization.\n Within that, we have two major approaches:\n\n - Active clipping: Usually via a schedule, tensors are actively\n clipped to fall into ranges that we know apriori that the model should\n be able to deal with.\n - Passive tracking and simulation: Passively track the min/max ranges\n of tensors and insert special ops at training and eval time that\n constrain to those ranges.\n\n The tensors of interest for both approaches are top-level inputs (or\n embeddings), outputs of arithmetic operations (add, mul, tanh, etc) and\n weights. While the actual process of quantizing can be quite complex and\n involve an end to end view of the system, from a modeling perspective, it\n can be thought of as providing tags/decorators to arithmetic inputs/outputs.\n It would be appropriate to think of these as casts which alter the way that\n the arithmetic operation is tracked and quantize (if Python/Tensorflow were\n a more strongly typed environment, they would indeed represent types in the\n type system but given the loose typing, it is just an honor system).\n\n The \"decorators\" are:\n\n - QWeight: Tags a tensor (typically a var) as a weight quantized type.\n - QR* (QRTanh, QRSigmoid, QRSoftmax, etc): Tags a tensor as the result\n of a fixed activation function with a known output range (the range\n is implied in the name).\n - QRPadding: Tags a tensor as containing a padding value (as we define\n them as 0..1). While such values are numeric, they generally exist with\n very different ranges from the rest of the graph and should not be\n arithmetically combined with tensors that may have a different/variable\n range.\n - QTensor: Tags a tensor as a generic quantized intermediate value.\n These are also tagged with a layer-unique name. All QTensors with the\n same name will be considered the same from a numerical range/precision\n perspective.\n\n Tagging things in this way allows us to, via hyperparameters, associate\n one or more quantization domains (QDomain) with the layer that will\n actually perform the necessary tracking and transformations needed at\n training and inference time to ensure that the layer can operate in low\n bit inference engines that only have quantized numeric representations.\n See the SampleQuantizedProjectionLayer in the unit test for an example layer\n that has had these tags applied.\n\n As a note on terminology, domain/QDomain here refers to a distinct set of\n quantization rules to apply to a subset of tensors. Most layers will only\n have one QDomain (default). The concept exists for layers which have been\n specially created to operate in more than one quantized precision (i.e. an\n RNN cell that uses 8bit quantization for inputs/outputs and 16bit\n quantization for internal state arithmetic). Such uses should be rare.\n\n\n **Convenience functions:**\n\n The layer adds a number of convenience functions to the layer's 'fns'\n function library. These mirror similarly named functions in TensorFlow but\n automatically add the necessary annotations. All such functions take the\n following named parameters:\n\n - qt: Name of QTensor (setup with TrackQTensor) for dynamic range tracking.\n - qmin/qmax/qdomain: Constant min/max range plus optional QDomain name to\n resolve against. Typically, only qmin/qmax are used.\n\n Functions that have a natural output range will have default values for\n qmin/qmax so that they just work. Functions that do not have a natural\n output range must have either qt or qmin/qmax specified manually.\n\n Natural range functions\n\n - qtanh\n - qsigmoid\n - qsoftmax\n - qrelu6\n - qrandom_uniform\n\n Dynamic range functions:\n\n - qadd\n - qsubtract\n - qmultiply\n - qmatmul (defers to `.py_utils.Matmul` and only accepts rank-2 tensors)\n - qbatchmatmul (defers to `tf.matmul` directly)\n - qconv1d\n - qlog\n - qlogsoftmax\n - qrelu\n \"\"\"\n\n @classmethod\n def Params(cls):\n p = super().Params()\n p.Define('qdomain', hyperparams.Params(),\n 'Container for quantization domains.')\n p.qdomain.Define('default', None, 'Default quantization domain.')\n return p\n\n def __init__(self, params):\n super().__init__(params)\n p = self.params\n\n self._tracked_tensors = dict() # tracked t_name -> (QDomain)\n self._aqt_weights = dict() # aqt w_name -> (Qdomain)\n self._qstate = None # t_name -> Tensor\n\n # Instantiate quantization domains.\n self._qdomains = dict() # Dict of qdname -> QDomain or None\n for qdname in dir(p.qdomain):\n qdparams = p.qdomain.Get(qdname)\n if qdparams is None:\n continue\n assert issubclass(\n qdparams.cls,\n QDomain), ('Expected quantized domain %s to extend QDomain' % qdname)\n qdchild_name = 'qdomain_' + qdname\n self.CreateChild(qdchild_name, qdparams)\n self._qdomains[qdname] = self.children[qdchild_name]\n self._AddQuantizationFunctions()\n\n def _CreateChildrenVariables(self):\n # Backwards compatibility: child.InstantiateVariables() in custom scope.\n p = self.params\n with tf.variable_scope(p.name + '/q'):\n for qdomain in self._qdomains.values():\n qdomain.InstantiateVariables()\n super()._CreateChildrenVariables()\n\n def QRTanh(self, t, domain='actf'):\n \"\"\"Quantizes the output of a tanh (-1.0, 1.0).\"\"\"\n qd = self.GetQDomain(domain)\n return qd.QuantizeNaturalRange(t, -1.0, 1.0) if qd else t\n\n def QRSigmoid(self, t, domain='actf'):\n \"\"\"Quantizes the output of a sigmoid (0, 1.0).\"\"\"\n qd = self.GetQDomain(domain)\n return qd.QuantizeNaturalRange(t, 0.0, 1.0) if qd else t\n\n def QRSoftmax(self, t, domain='softmax', narrow_to_asym_bit_depth=False):\n \"\"\"Quantizes the output of a softmax (0, 1.0 - 1.0/2^-bits).\"\"\"\n qd = self.GetQDomain(domain)\n # Override based on TFLite softmax support.\n softmax_max = 1.0\n if qd is not None and narrow_to_asym_bit_depth:\n softmax_max = (2**qd.bits - 1) / (2**qd.bits)\n return qd.QuantizeNaturalRange(t, 0.0, softmax_max) if qd else t\n\n def QRRelu(self, t, domain='relu'):\n \"\"\"Quantizes the output of a relu (0, 1.0).\"\"\"\n qd = self.GetQDomain(domain)\n return qd.QuantizeNaturalRange(t, 0.0, 1.0) if qd else t\n\n def QRRelu6(self, t, domain='relu6'):\n \"\"\"Quantizes the output of a relu6 (0, 6.0).\"\"\"\n qd = self.GetQDomain(domain)\n return qd.QuantizeNaturalRange(t, 0.0, 6.0) if qd else t\n\n def QRPadding(self, t, domain='padding'):\n \"\"\"Quantizes the padding.\"\"\"\n qd = self.GetQDomain(domain)\n return qd.QuantizeConstantRange(t, 0.0, 1.0) if qd else t\n\n def TrackQTensor(self, *t_names, **kwargs):\n r\"\"\"Creates one or more QTensors for later use.\n\n Any tensor that will later be quantized must be created first, preferably\n in _CreateLayerVariables().\n\n Along with a list of tensor names to create, they can be associated with\n a 'domain'. Most layers are simple enough to only have a single quantization\n domain (QDomain), typically 'default'. However, additional QDomains can\n be defined as parameters to control fine grained aspects of quantization.\n\n If no explicit domain is passed, then the domain ('tensor\\_' + t_name) is\n tried. If that is not defined, then 'default'.\n\n Args:\n *t_names: Positional parameters are taken to be QTensor names to create.\n **kwargs: Can contain an explicit 'domain'. Written this way due to\n python2 limitations.\n \"\"\"\n domain_override = kwargs['domain'] if 'domain' in kwargs else None\n for t_name in t_names:\n domain = domain_override\n if domain is None:\n domain = 'tensor_' + t_name\n qd = self.GetQDomain(domain)\n self._tracked_tensors[t_name] = qd\n if qd:\n qd.CreateTensor(t_name)\n\n def CreateAqtWeight(self, w_name, shape, feature_axis, domain='weight'):\n \"\"\"Creates Quantized weights for later use.\n\n Weight that will later be quantized must be created first, preferably\n in _CreateLayerVariables().\n\n Args:\n w_name: Positional parameters are taken to be QTensor names to create.\n shape: Can contain an explicit 'domain'. Written this way due to python2\n limitations\n feature_axis: axis corresponding to output channel/feature for weights.\n domain: Custom domain to match (defaults to 'weight').\n \"\"\"\n qd = self.GetQDomain(domain)\n self._aqt_weights[w_name] = qd\n if qd:\n qd.CreateTensorWithShape(w_name, shape, feature_axis)\n\n def QTensor(self, t_name, t, eval_only=False):\n \"\"\"Quantizes a general tensor input/output in one step.\n\n t_name must have been previously created via TrackQTensor.\n\n Args:\n t_name: Previously created QTensor t_name to quantize to.\n t: Tensor to quantize.\n eval_only: Whether to only apply quantization pressure at eval time.\n\n Returns:\n The tensor, quantized.\n \"\"\"\n return self.QTensorMulti(t_name, t, eval_only=eval_only)[0]\n\n def QTensorMulti(self, t_name, *ts, **kwargs):\n \"\"\"Quantizes multiple tensors simultaneously.\n\n t_name must have been previously created via TrackQTensor.\n\n This is different from multiple calls to QTensor because each of the\n tensors will contribute to the min/max of the same constraint.\n Typically used for tensors that are being added together.\n\n Args:\n t_name: Previously created QTensor t_name to quantize to.\n *ts: Tensor to quantize.\n **kwargs: Additional kwargs as per QTensor.\n Returns:\n Tuple of quantized tensors.\n \"\"\"\n assert t_name in self._tracked_tensors, (\n ('Call to QTensor without first calling TrackQTensor: %s '\n '(all known = %r)') % (t_name, list(self._tracked_tensors.keys())))\n eval_only = kwargs['eval_only'] if 'eval_only' in kwargs else False\n qd = self._tracked_tensors[t_name]\n if not qd:\n return ts\n return qd.QuantizeTensors(t_name, ts, eval_only=eval_only)\n\n def GetQTensorRange(self, t_name, ts):\n \"\"\"Returns the range for a quantized tensor.\n\n t_name must have been previously create via TrackQTensor and t should be\n previously quantized.\n\n Args:\n t_name: Preivously created QTensor t_name to fetch range from.\n ts: Tensor to retrieve range from.\n\n Returns:\n The (min, max) range of the quantized tensor.\n \"\"\"\n qd = self._tracked_tensors[t_name]\n return qd.GetTensorRange(t_name, ts)\n\n def QWeight(self, w, domain='weight'):\n \"\"\"Quantizes a weight.\n\n Args:\n w: The weight tensor.\n domain: Custom domain to match (defaults to 'weight' or 'default').\n Returns:\n The weights quantized.\n \"\"\"\n qd = self.GetQDomain(domain)\n return qd.QuantizeWeight(w) if qd else w\n\n # TODO(shivaniagrawal): This helper function is not being used anywhere. We\n # are keeping here since it is fast-and easy way to assert model quality\n # after AQT weights quantization. But note that using it is likely to be less\n # performant than using ToAqt and FromAqt around matmul\n # (compare existing code).\n def FqWeight(self, w_name, w, feature_axis, expected_scale_shape=None):\n \"\"\"AQT Quantized weight FQ style.\n\n This is analogous to QWeight; either AqtWeight or QWeight should be identity\n for alll domains. AqtQDomain additionally supports per channel quantization.\n\n w_name must have been previously created via CreateAqtWeight.\n\n Args:\n w_name: Previously created w_name QWeight to quantize weight.\n w: The weight tensor.\n feature_axis: axis corresponding to output channel/feature for weights.\n expected_scale_shape: Optional shape to verify if scale shape is as\n expected. Defaults to None.\n\n Returns:\n Quantized weights.\n \"\"\"\n assert w_name in self._aqt_weights, (\n ('Call to AqtWeight without first calling CreateAqtWeight: %s '\n '(all known = %r)') % (w_name, list(self._aqt_weights.keys())))\n qd = self._aqt_weights[w_name]\n if not qd:\n return w\n return qd.FqWeight(\n w_name,\n w,\n feature_axis=feature_axis,\n expected_scale_shape=expected_scale_shape)\n\n def ToAqtWeight(self, w_name, w, feature_axis, expected_scale_shape=None):\n \"\"\"Quantized integer weight AQT style.\n\n This only scales, rounds and clips; resulting quantized weight would be\n either integer ot integer emulated in float.\n\n w_name must have been previously created via CreateAqtWeight.\n\n Args:\n w_name: Previously created w_name QWeight to quantize weight.\n w: The weight tensor.\n feature_axis: axis corresponding to output channel/feature for weights.\n expected_scale_shape: Optional shape to verify if scale shape is expected.\n Defaults to None.\n\n Returns:\n Quantized weights.\n \"\"\"\n assert w_name in self._aqt_weights, (\n ('Call to ToAqtWeight without first calling CreateAqtWeight: %s '\n '(all known = %r)') % (w_name, list(self._aqt_weights.keys())))\n qd = self._aqt_weights[w_name]\n if not qd:\n return w\n return qd.ToAqtWeight(\n w_name,\n w,\n feature_axis=feature_axis,\n expected_scale_shape=expected_scale_shape)\n\n def FromAqtWeight(self, w_name, out):\n \"\"\"Rescales the output corresponding to AQT style quantized matmul's weight.\n\n Uses the same scale used by `ToAqtWeight` and apply its inverse to rescale.\n\n w_name must have been previously created via CreateAqtWeight.\n\n Args:\n w_name: Previously created w_name QWeight to quantize weight.\n out: The tensor to rescale.\n\n Returns:\n Rescaled output.\n \"\"\"\n assert w_name in self._aqt_weights, (\n ('Call to FromAqtWeight without first calling CreateAqtWeight: %s '\n '(all known = %r)') % (w_name, list(self._aqt_weights.keys())))\n qd = self._aqt_weights[w_name]\n return qd.FromAqtWeight(w_name, out) if qd else out\n\n def ToAqtActActInputs(self,\n act_lhs,\n act_rhs,\n act_lhs_distribution,\n act_rhs_distribution,\n domain=None):\n \"\"\"Quantizes activations for (act * act) matmul AQT style.\n\n This only scales, rounds and clips; resulting quantized acts would be\n either integer ot integer emulated in float.\n\n Args:\n act_lhs: Left hand side activation.\n act_rhs: Right hand side activation.\n act_lhs_distribution: Distribution of act_lhs; of type InputDistribution.\n act_rhs_distribution: Distribution of act_rhs; of type InputDistribution.\n domain: Custom domain to match (defaults to 'default').\n\n Returns:\n Quantized activations corresponding to act_lhs and act_rhs.\n \"\"\"\n qd = self.GetQDomain(domain)\n if not qd:\n return act_lhs, act_rhs\n\n return qd.ToAqtActActInputs(act_lhs, act_rhs, act_lhs_distribution,\n act_rhs_distribution)\n\n def FromAqtActActMatmul(self, output, domain=None):\n \"\"\"Rescales the output of (act*act) matmul for AQT style quantized acts.\n\n Args:\n output: Previously created w_name QWeight to quantize weight.\n domain: Custom domain to match (defaults to 'default').\n\n Returns:\n Rescaled output.\n \"\"\"\n qd = self.GetQDomain(domain)\n if not qd:\n return output\n\n return qd.FromAqtActActMatmul(output)\n\n def GetQDomain(self, domain):\n \"\"\"Gets the QDomain matching a given domain name.\n\n Args:\n domain: User specified domain name.\n\n Returns:\n The requested QDomain, the 'default' QDomain or None.\n \"\"\"\n qd = self._qdomains.get(domain)\n if qd:\n return qd\n qd = self._qdomains.get('default')\n return qd\n\n def _AddQuantizationFunctions(self):\n \"\"\"Adds standard quantization functions against the given layer.\"\"\"\n\n def WrapOp(fnname, f, default_qmin=None, default_qmax=None):\n \"\"\"Adds a wrapper op to the layer's fns.\"\"\"\n\n def Wrapped(*args, **kwargs):\n \"\"\"Wraps a native op.\"\"\"\n # Validate and pop args 'qt', 'qmin', 'qmax' and 'qdomain'.\n qt = kwargs.get('qt')\n if qt is not None:\n del kwargs['qt']\n qmin = kwargs.get('qmin')\n if qmin is not None:\n del kwargs['qmin']\n qmax = kwargs.get('qmax')\n if qmax is not None:\n del kwargs['qmax']\n qdomain = kwargs.get('qdomain')\n if qdomain is not None:\n del kwargs['qdomain']\n narrow_to_asym_bit_depth = kwargs.get('narrow_to_asym_bit_depth')\n if narrow_to_asym_bit_depth is not None:\n del kwargs['narrow_to_asym_bit_depth']\n if qmin is None:\n qmin = default_qmin\n if qmax is None:\n qmax = default_qmax\n assert qt is not None or (qmin is not None and qmax is not None), (\n ('Quantized function \"%s\" requires either qt (QTensor name) or '\n 'qmin/qmax to be set.') % fnname)\n\n # Provide a better default name if none provided.\n if 'name' not in kwargs and qt is not None:\n kwargs['name'] = '%s_%s' % (fnname, qt)\n\n # Invoke original.\n y = f(*args, **kwargs)\n\n # Handle the output.\n if qt is not None:\n y = self.QTensor(qt, y)\n else:\n qd = self.GetQDomain(qdomain)\n if qd:\n if narrow_to_asym_bit_depth:\n qrange = qmax - qmin\n qmax = qmin + qrange * (2**qd.bits - 1) / (2**qd.bits)\n y = qd.QuantizeNaturalRange(y, qmin, qmax)\n return y\n\n self.AddFunction(fnname, Wrapped)\n\n # Supported quantized functions.\n WrapOp('qadd', tf.add)\n WrapOp('qsubtract', tf.subtract)\n WrapOp('qmultiply', tf.multiply)\n WrapOp('qmatmul', py_utils.Matmul)\n WrapOp('qbatchmatmul', tf.matmul)\n WrapOp('qconv1d', tf.nn.conv1d)\n WrapOp('qtanh', tf.tanh, default_qmin=-1.0, default_qmax=1.0)\n WrapOp('qsigmoid', tf.sigmoid, default_qmin=0.0, default_qmax=1.0)\n WrapOp('qsoftmax', tf.nn.softmax, default_qmin=0.0, default_qmax=1.0)\n WrapOp('qlog', tf.math.log)\n WrapOp('qlogsoftmax', tf.nn.log_softmax)\n WrapOp('qrelu', tf.nn.relu)\n WrapOp('qrelu6', tf.nn.relu6, default_qmin=0.0, default_qmax=6.0)\n WrapOp(\n 'qrandom_uniform',\n tf.random.uniform,\n default_qmin=0.0,\n default_qmax=1.0)\n\n # Convenience for quantizing weights.\n self.AddFunction('qweight', self.QWeight)\n\n\nclass BaseClippingCapSchedule(base_layer.BaseLayer):\n \"\"\"Base class for clipping cap schedules.\"\"\"\n\n @property\n def is_quantized(self):\n return False\n\n def GetEndRange(self):\n \"\"\"Public method to get the final range as a constant.\n\n Note that this returns the \"ideal\" end range (i.e. -1..1) as opposed to\n the actual range, which has its upper bound slightly adjusted based on\n the bit depth of the quantized type. In this sense, this value is a lie,\n but it is a consistent lie that can be corrected for downstream by the\n inference engine once it has inferred the actual quantized types being\n used.\n\n Note that this also assumes the default start/end caps. Some internal\n parts may use altered caps or bit depths.\n\n Returns:\n Tuple of (min, max) for the final range.\n \"\"\"\n raise NotImplementedError('Abstract Method: GetEndRange')\n\n def GetQuantizedEndRange(self):\n \"\"\"Gets the quantized ending range.\n\n Unlike GetEndRange(), this takes quantization effects into account.\n The default implementation just returns self.GetEndRange(). Subclasses\n can include additional keyword arguments, tightly coupling them to callers\n of specific types.\n\n Returns:\n Tuple of (min, max) for the final range.\n \"\"\"\n assert not self.is_quantized\n return self.GetEndRange()\n\n def ApplyConstantClip(self, x, min_value, max_value):\n \"\"\"Applies a constant clip with the clipping op for the implementation.\n\n This is a special case which allows applying a custom clipping range to\n constants that are not used arithmetically. This exists to support padding.\n\n Args:\n x: Tensor to clip.\n min_value: Minimum value.\n max_value: Maximum value.\n Returns:\n Tensor clipped.\n \"\"\"\n raise NotImplementedError('Abstract method: ApplyConstantClip')\n\n def GetState(self, theta):\n \"\"\"Gets a state tensor that can be used to calculate clipping.\n\n The state will be a float32 tensor that is safe to pass to TF functions.\n\n Args:\n theta: Layer theta.\n Returns:\n An opaque tensor to be passed to ApplyClippingWithState().\n \"\"\"\n raise NotImplementedError('Abstract method: GetState')\n\n def ApplyClipping(self, theta, x, **kwargs):\n \"\"\"Applies clipping to x.\n\n Args:\n theta: Layer theta.\n x: Input tensor to clip.\n **kwargs: Additional implementation specific kwargs.\n Returns:\n Clipped (or identity) x.\n \"\"\"\n return self.ApplyClippingWithState(self.GetState(theta), x, **kwargs)\n\n def ApplyClippingWithState(self, state, x):\n \"\"\"Applies clipping to x.\n\n Args:\n state: A previously obtained value of GetState().\n x: Input tensor to clip.\n Returns:\n Clipped (or identity) x.\n \"\"\"\n raise NotImplementedError('Abstract Method: ApplyClippingWithState')\n\n\nclass IdentityClippingCapSchedule(BaseClippingCapSchedule):\n \"\"\"Dummy cc schedule (useful in some cases instead of None).\"\"\"\n\n def GetEndRange(self):\n np_dtype = self.params.dtype.as_numpy_dtype\n np_info = np.finfo(np_dtype)\n return (np_info.min, np_info.max)\n\n def ApplyConstantClip(self, x, min_value, max_value):\n return x\n\n def GetState(self, theta):\n return tf.zeros([1], tf.float32)\n\n def ApplyClippingWithState(self, state, x):\n return x\n\n\nclass LinearClippingCapSchedule(BaseClippingCapSchedule):\n \"\"\"Class for linear clipping cap decay.\"\"\"\n\n @classmethod\n def Params(cls):\n p = super().Params()\n p.Define('start_step', 0,\n 'We start gradually narrowing clipping cap from start_step.')\n p.Define('end_step', 15000,\n 'We reach end_cap by end_step.')\n p.Define('start_cap', 8.0,\n 'We gradually narrow the clipping range over the course of '\n 'training. This is the clipping range we apply when training '\n 'starts.')\n p.Define('end_cap', 1.0,\n 'We gradually narrow the clipping range over the course of '\n 'training. This is the clipping range we apply towards the end '\n 'of training.')\n p.name = 'CCSchedule'\n return p\n\n @property\n def is_quantized(self):\n return False\n\n def ApplyConstantClip(self, x, min_value, max_value):\n return tf.clip_by_value(x, min_value, max_value)\n\n def GetState(self, theta):\n return self._Value()\n\n def ApplyClippingWithState(self, state, x):\n \"\"\"Applies clipping to x.\n\n Args:\n state: Clipping state.\n x: Input tensor to clip.\n Returns:\n Clipped (or identity) x.\n \"\"\"\n cap = tf.cast(state, x.dtype)\n return tf.clip_by_value(x, -cap, cap)\n\n def GetEndRange(self):\n \"\"\"Returns the range of values that are clipped towards the end of training.\n\n This is always a constant and is used by downstream systems.\n\n Returns:\n Tuple of (min, max).\n \"\"\"\n return (-self.params.end_cap, self.params.end_cap)\n\n def _Value(self):\n \"\"\"Returns the current clipping cap.\"\"\"\n p = self.params\n start_step = tf.cast(p.start_step, tf.float32)\n end_step = tf.cast(p.end_step, tf.float32)\n current_step = tf.cast(py_utils.GetGlobalStep(), tf.float32)\n steps_ratio = (\n tf.minimum(end_step - start_step, current_step - start_step)/\n (end_step - start_step))\n rmax_tensor = (\n steps_ratio * p.end_cap + (1.0 - steps_ratio) * p.start_cap)\n return tf.cond(\n tf.less(current_step,\n p.start_step), lambda: tf.cast(p.start_cap, tf.float32),\n lambda: tf.cast(rmax_tensor, tf.float32))\n\n\nclass FakeQuantizationSchedule(BaseClippingCapSchedule):\n \"\"\"Manages application of fake quantization via a schedule.\n\n This implementation is a general-purpose clipping cap schedule but also\n works with the Fake Quantization approach used by mobile inference engines.\n It is tightly coupled to the FakeQuantizedLSTMCell. See more exhaustive\n documentation and links there.\n \"\"\"\n\n @classmethod\n def Params(cls):\n p = super().Params()\n p.name = 'FQSchedule'\n p.Define('clip_start_step', 0,\n 'We start gradually narrowing clipping cap from start_step.')\n p.Define('clip_end_step', 15000, 'We reach end_cap by end_step.')\n p.Define('quant_start_step', 15000,\n 'Step at which we begin to apply quantization.')\n p.Define('start_cap', 8.0, 'Default clipping/quant start cap.')\n p.Define('end_cap', 1.0, 'Default clipping/quant end cap.')\n p.Define('bits', 8, 'Default quantized bit depth.')\n return p\n\n def __init__(self, params):\n super().__init__(params)\n p = self.params\n # We may relax this constraint at some point to allow gradual quantization\n # but enforce for now as it is easy to mess up and we have not evaluated\n # how it would work otherwise.\n assert p.quant_start_step >= p.clip_end_step, (\n 'quant_start_step must be >= clip_end_step')\n\n @property\n def is_quantized(self):\n return True\n\n @property\n def bits(self):\n p = self.params\n return p.bits\n\n def GetEndRange(self):\n \"\"\"Public method to get the final range as a constant.\n\n Note that this returns the \"ideal\" end range (i.e. -1..1) as opposed to\n the actual range, which has its upper bound slightly adjusted based on\n the bit depth of the quantized type. In this sense, this value is a lie,\n but it is a consistent lie that can be corrected for downstream by the\n inference engine once it has inferred the actual quantized types being\n used.\n\n Note that this also assumes the default start/end caps. Some internal\n parts may use altered caps or bit depths.\n\n Returns:\n Tuple of (min, max) for the final range.\n \"\"\"\n p = self.params\n return (-p.end_cap, p.end_cap)\n\n def GetQuantizedEndRange(self, end_cap=None, bits=None):\n \"\"\"Gets the quantized ending range.\n\n Unlike GetEndRange(), this takes quantization effects into account.\n\n Args:\n end_cap: Override end_cap value.\n bits: Override bits value.\n Returns:\n Tuple of (min, max) for the final range.\n \"\"\"\n p = self.params\n if end_cap is None:\n end_cap = p.end_cap\n if bits is None:\n bits = p.bits\n return self._GetQuantizedRangeForCap(end_cap, bits)\n\n def ApplyConstantClip(self, x, min_value, max_value):\n return tf.quantization.fake_quant_with_min_max_vars(\n x, min_value, max_value, num_bits=self.params.bits)\n\n def GetState(self, theta):\n \"\"\"Gets the state from theta.\"\"\"\n p = self.params\n if p.is_inference:\n # State is not used for inference. Just return dummy.\n return tf.zeros([1], tf.float32)\n else:\n # Calculations/vars need to be float but these can be ints in the params.\n clip_end_step = tf.cast(p.clip_end_step, tf.float32)\n clip_start_step = tf.cast(p.clip_start_step, tf.float32)\n quant_start_step = tf.cast(p.quant_start_step, tf.float32)\n global_step = tf.cast(py_utils.GetGlobalStep(), tf.float32)\n\n # Will be negative if before clipping starts.\n clip_ratio = (\n tf.minimum(clip_end_step - clip_start_step,\n global_step - clip_start_step) /\n tf.maximum(1.0, clip_end_step - clip_start_step))\n # Currently fq is either on (1.0) or off (-1.0). Progressive quantization\n # may later occupy 0..1.0.\n fq_ratio = tf.where(global_step < quant_start_step, -1.0, 1.0)\n\n return tf.stack([clip_ratio, fq_ratio])\n\n def _GetQuantizedRangeForCap(self, current_cap, bits):\n \"\"\"Gets the range for the given cap and number of bits.\n\n Args:\n current_cap: Cap to compute against.\n bits: Number of bits (8, 16, etc).\n Returns:\n If current_cap is a python float, the result will be a float. If a Tensor\n scalar, then a Tensor scalar.\n \"\"\"\n dt_max = 2**(bits - 1) # i.e. 8bit = 128, 16bit = 32768\n return -current_cap, current_cap * (dt_max - 1) / dt_max\n\n def _GetCurrentMinMax(self,\n state,\n start_cap,\n end_cap,\n bits,\n fixate_to_end_state=False):\n \"\"\"Gets the current min/max for the bit depth and caps.\n\n Args:\n state: Clipping state.\n start_cap: Starting cap.\n end_cap: Ending cap once clipping saturates.\n bits: Number of bits of the quantized datatype.\n fixate_to_end_state: Whether to fixate the cap to the end state.\n Returns:\n (min_value, max_value) as python scalars or 0D Tensors (\n if not fixate_to_end_state).\n \"\"\"\n if fixate_to_end_state:\n current_cap = end_cap\n else:\n clip_ratio = state[0] if not fixate_to_end_state else 1.0\n current_cap = clip_ratio * end_cap + (1.0 - clip_ratio) * start_cap\n return self._GetQuantizedRangeForCap(current_cap, bits)\n\n def ApplyClippingWithState(self,\n state,\n x,\n start_cap=None,\n end_cap=None,\n bits=None):\n \"\"\"Applies clipping.\n\n The start_cap, end_cap and bits can be set explicitly and take the default\n if None.\n\n Args:\n state: Clipping state.\n x: Tensor to clip.\n start_cap: Clipping value at the start of the ramp.\n end_cap: Clipping value at the end of the ramp.\n bits: Number of bits to quantize to.\n Returns:\n x with clipping applied.\n \"\"\"\n p = self.params\n if start_cap is None:\n start_cap = p.start_cap\n if end_cap is None:\n end_cap = p.end_cap\n if bits is None:\n bits = p.bits\n if p.is_inference:\n # For inference, we assume that both clipping and quantization have\n # saturated and just output a saturated quant op.\n min_value, max_value = self._GetCurrentMinMax(\n state, start_cap, end_cap, bits, fixate_to_end_state=True)\n # Note that the inference version uses the *_args variant, which requires\n # constants for min/max. The _GetCurrentMinMax will return (python)\n # constants if fixating. This is fragile but works around a Toco bug\n # if trying to run on the *_vars form because it can't seem to read\n # 0D tensors. This form has the benefit of blowing up at export time\n # if the min/max aren't constant.\n return _CopyShape(\n x,\n tf.quantization.fake_quant_with_min_max_args(\n x, min_value, max_value, num_bits=bits))\n\n # Non-inference.\n def Clipped():\n clip_ratio = state[0]\n min_value, max_value = self._GetCurrentMinMax(state, start_cap, end_cap,\n bits)\n min_value = tf.stop_gradient(min_value)\n max_value = tf.stop_gradient(max_value)\n return tf.where(clip_ratio >= 0.0,\n (lambda: tf.clip_by_value(x, min_value, max_value))(),\n (lambda: x)())\n\n def Quantized():\n min_value, max_value = self._GetCurrentMinMax(state, start_cap, end_cap,\n bits)\n min_value = tf.stop_gradient(min_value)\n max_value = tf.stop_gradient(max_value)\n return tf.quantization.fake_quant_with_min_max_vars(\n x, min_value, max_value, num_bits=bits)\n\n # Quantization will implicitly clip, so if we are in the quant phase, just\n # do that. Otherwise, clip (which will return identity if not in that\n # phase yet).\n fq_ratio = state[1]\n # return _CopyShape(x, Clipped())\n return _CopyShape(x, tf.where(fq_ratio <= 0.0, Clipped(), Quantized()))\n\n\nclass QDomain(base_layer.BaseLayer):\n \"\"\"Base class for a quantization domain layer.\n\n This implementation doubles as a no-op quantization domain.\n \"\"\"\n\n @property\n def bits(self):\n \"\"\"Retrieves the bits used by this quantization layer.\n\n Returns:\n The number of bits available to this qdomain or None if unquantized.\n \"\"\"\n return None\n\n def QuantizeWeight(self, w):\n \"\"\"Quantizes a weight.\n\n Args:\n w: Weight tensor to quantize.\n Returns:\n Quantized weight.\n \"\"\"\n return w\n\n def FqWeight(self, w_name, w, feature_axis, expected_scale_shape):\n \"\"\"AQT Quantized weight FQ style .\n\n Args:\n w_name: weight name.\n w: The weight tensor.\n feature_axis: axis corresponding to output channel/feature for weights.\n expected_scale_shape: Optional shape to verify if scale shape is expected.\n\n Returns:\n Quantized weights.\n \"\"\"\n del feature_axis, expected_scale_shape, w_name\n return w\n\n def ToAqtWeight(self, w_name, w, feature_axis, expected_scale_shape):\n \"\"\"Quantized weight AQT style.\n\n Refer to quantizable_layer.ToAqtWeight.\n\n Args:\n w_name: weight name.\n w: The weight tensor.\n feature_axis: axis corresponding to output channel/feature for weights.\n expected_scale_shape: Optional shape to verify if scale shape is expected.\n\n Returns:\n Quantized weights.\n \"\"\"\n del feature_axis, expected_scale_shape, w_name\n return w\n\n def FromAqtWeight(self, w_name, out):\n \"\"\"Rescales the output corresponding to AQT quantized matmuls' weight.\n\n Refer to quantizable_layer.FromAqtWeight.\n\n Args:\n w_name: weight name.\n out: The tensor to rescale.\n\n Returns:\n Rescaled output.\n \"\"\"\n del w_name\n return out\n\n def ToAqtActActInputs(self, act_lhs, act_rhs, act_lhs_distribution,\n act_rhs_distribution):\n \"\"\"Quantizes activations for (act * act) matmul AQT style.\n\n This only scales, rounds and clips; resulting quantized acts would be\n either integer ot integer emulated in float.\n\n Args:\n act_lhs: Left hand side activation.\n act_rhs: Right hand side activation.\n act_lhs_distribution: Distribution of act_lhs; of type InputDistribution.\n act_rhs_distribution: Distribution of act_rhs; of type InputDistribution.\n\n Returns:\n Quantized activations corresponding to act_lhs and act_rhs.\n \"\"\"\n del act_lhs_distribution, act_rhs_distribution\n return act_lhs, act_rhs\n\n def FromAqtActActMatmul(self, output):\n \"\"\"Rescales output of dynamic matmul (act * act).\n\n Args:\n output: output, corresponds to tf.matmul(act_lhs, act_rhs)\n\n Returns:\n Rescaled output.\n \"\"\"\n\n return output\n\n def QuantizeConstantRange(self, t, min_value, max_value):\n \"\"\"Quantizes a true-constant range that is not used for arithmetic.\n\n This supports special values like padding that should have a precise\n range that we do not deviate from.\n\n Args:\n t: Tensor to quantize.\n min_value: Min of the range.\n max_value: Max of the range.\n\n Returns:\n Quantized tensor.\n \"\"\"\n return t\n\n def QuantizeNaturalRange(self, t, min_value, max_value):\n \"\"\"Quantizes a tensor with a known, natural range.\n\n Args:\n t: Tensor to quantize.\n min_value: Min value of the range.\n max_value: Max value of the range.\n Returns:\n Quantized tensor.\n \"\"\"\n return t\n\n def CreateTensor(self, t_name):\n \"\"\"Creates a QTensor with t_name.\n\n Args:\n t_name: Unique name (within layer) for this tensor.\n \"\"\"\n pass\n\n def CreateTensorWithShape(self, t_name, shape, feature_axis):\n \"\"\"Creates a QTensor with t_name and given shape.\n\n Args:\n t_name: Unique name (within layer) for this tensor.\n shape: Expected shape of the tensor.\n feature_axis: axis corresponding to output channel/feature for weights.\n \"\"\"\n pass\n\n def QuantizeTensors(self, t_name, ts, eval_only=False):\n \"\"\"Quantizes a tensor with t_name previously created with CreateTensor.\n\n If applicable, each of the passed tensors contributes to a shared\n range.\n\n Args:\n t_name: Tensor name.\n ts: List of tensors to quantize.\n eval_only: Whether to only apply quantization pressure at eval time.\n Returns:\n Quantized tensors.\n \"\"\"\n return ts\n\n def GetTensorRange(self, t_name, ts):\n \"\"\"Retrieves the range of a tensor given the t_name used by CreateTensor.\n\n Note, this computes the batch range across the list of tensors at training\n time but fetches the stored tensor over time. This depends on\n QuantizeTensors updating the appropriate value.\n\n Args:\n t_name: Tensor name.\n ts: Tensor to determine the range for.\n\n Returns:\n A min-max pair that represents the tensor range.\n \"\"\"\n raise NotImplementedError('Abstract method: NormalizeTensors')\n\n\nclass SymmetricScheduledClipQDomain(QDomain):\n \"\"\"A quantization domain that does symmetric scheduled clipping.\n\n This contains a BaseClippingCapSchedule which handles the actual clipping. It\n defaults to a FakeQuantizationSchedule.\n\n This clipping domain will aid in quantizing layers that are known to tolerate\n operation within known ranges (such as LSTM cells). The clipping range will\n converge over a range of steps and is setup to match ideal, symmetric ranges\n for quantized types.\n \"\"\"\n\n @classmethod\n def Params(cls):\n p = super().Params()\n p.Define('cc_schedule', FakeQuantizationSchedule.Params(),\n 'Quantization clipping schedule.')\n return p\n\n def __init__(self, params):\n super().__init__(params)\n p = self.params\n\n self.CreateChild('cc_schedule', p.cc_schedule)\n\n @property\n def bits(self):\n return self.cc_schedule.bits\n\n def QuantizeWeight(self, w):\n return self.cc_schedule.ApplyClipping(self.theta.cc_schedule, w)\n\n def QuantizeNaturalRange(self, t, min_value, max_value):\n # Note: We apply the scheduled clip here, completely overriding the\n # known natural range. This is intentional and assumes that when this\n # layer is used for symmetric clipping, it is applied uniformly to all\n # active elements.\n return self.cc_schedule.ApplyClipping(self.theta.cc_schedule, t)\n\n def QuantizeConstantRange(self, t, min_value, max_value):\n # Constant ranges, such as padding are handled separately. They are merely\n # constrained to the given range and assumed to be quantizable as-is.\n # This is used for padding.\n return tf.clip_by_value(t, min_value, max_value)\n\n def QuantizeTensors(self, t_name, ts, eval_only=False):\n if eval_only and not self.do_eval:\n return ts\n else:\n return [\n self.cc_schedule.ApplyClipping(self.theta.cc_schedule, t) for t in ts\n ]\n\n\nclass _CountedMinMaxAccumulator(base_layer.Accumulator):\n \"\"\"Accumulator for a counted min/max.\n\n Represented as a tensor of shape [count, min, max]. Every update\n increases the count and expands the min/max (initially zeros).\n \"\"\"\n\n def __init__(self, dtype):\n super().__init__()\n self.dtype = dtype\n\n def DefaultValue(self):\n return tf.zeros([3], dtype=self.dtype, name='qstate_zero')\n\n def Update(self, new_value):\n state0 = self.GetValue()\n state1 = tf.stack([\n state0[0] + new_value[0],\n tf.minimum(state0[1], new_value[1]),\n tf.maximum(state0[2], new_value[2]),\n ])\n self.SetValue(state1)\n\n\nclass PassiveAsymQDomain(QDomain):\n \"\"\"A quantization domain that does passive, asymmetric quantization.\n\n See: https://arxiv.org/abs/1712.05877\n\n This quantization domain will adjust to min/max ranges during training\n time, recording them into vars via an exponential moving average and then\n applying them at eval/inference time.\n \"\"\"\n\n @classmethod\n def Params(cls):\n p = super().Params()\n p.Define('bits', 8, 'Default quantized bit depth.')\n p.Define('ema_decay', 0.99, 'Moving average decay.')\n p.Define('default_min', -1.0,\n 'Default minimum value (so initial graphs are valid).')\n p.Define('default_max', 1.0,\n 'Default maximum value (so initial graphs are valid).')\n p.Define('quantize_weight_epsilon', 0.0,\n 'Default epsilon for weight quantization to prevent zero range.')\n p.Define(\n 'delay_start_steps', 0,\n 'Delays applying quantization at training time until after '\n 'this many steps. 0 = start immediately. -1 = start never. '\n 'This is often needed to allow the model to reach some level '\n 'of convergence prior to applying quantization. Only affects '\n 'training (not eval/inference).')\n return p\n\n def __init__(self, params):\n super().__init__(params)\n\n self._t_names = set() # set of known t_name (from CreateTensor)\n self._qvars = py_utils.NestedMap() # var_name -> tf.Variable\n\n def _CreateLayerVariables(self):\n # Save a scope for lazily created variables.\n with tf.variable_scope('q'):\n self._qvars_scope = tf.get_variable_scope()\n\n def _MaybeFakeQuant(self, inputs, min_v, max_v, num_bits):\n p = self.params\n\n def Apply():\n return tf.quantization.fake_quant_with_min_max_vars(\n inputs, min_v, max_v, num_bits=num_bits)\n\n if p.delay_start_steps != 0 and not self.do_eval:\n if p.delay_start_steps == -1:\n return inputs\n return tf.where(py_utils.GetGlobalStep() >= p.delay_start_steps, Apply(),\n inputs)\n else:\n return Apply()\n\n @property\n def bits(self):\n p = self.params\n return p.bits\n\n def QuantizeWeight(self, w):\n p = self.params\n w_min = tf.reduce_min(w)\n w_max = tf.reduce_max(w)\n # NOTE: We force a small, non-zero range because otherwise, zero weights\n # can cause downstream inference engines to blow up.\n w_min = tf.minimum(w_min, -p.quantize_weight_epsilon)\n w_max = tf.maximum(w_max, p.quantize_weight_epsilon)\n quant_w = self._MaybeFakeQuant(w, w_min, w_max, num_bits=p.bits)\n if self.do_eval:\n return quant_w\n else:\n # If quantizing during training, skip quantization if it produces\n # NANs. Sometimes early in the training process, things are unstable\n # and ranges can produce numerical instability that makes it\n # impossible to perform a fake_quant.\n quant_w_has_nans = tf.math.is_nan(quant_w)\n return tf.where(quant_w_has_nans, w, quant_w)\n\n def QuantizeNaturalRange(self, t, min_value, max_value):\n p = self.params\n return self._MaybeFakeQuant(t, min_value, max_value, num_bits=p.bits)\n\n def QuantizeConstantRange(self, t, min_value, max_value):\n p = self.params\n return self._MaybeFakeQuant(t, min_value, max_value, num_bits=p.bits)\n\n def CreateTensor(self, t_name):\n p = self.params\n assert t_name not in self._t_names, (\n 'QTensor already registered: %s' % t_name)\n self._t_names.add(t_name)\n\n # Create accumulator\n accumulator_name = self._GetAccumulatorNameForTensor(t_name)\n self.RegisterAccumulator(accumulator_name,\n _CountedMinMaxAccumulator(p.dtype))\n # Register vars.\n min_pc = py_utils.WeightParams((),\n py_utils.WeightInit.Constant(p.default_min),\n p.dtype)\n max_pc = py_utils.WeightParams((),\n py_utils.WeightInit.Constant(p.default_max),\n p.dtype)\n self._CreateQStateVar(t_name, 'min', min_pc)\n self._CreateQStateVar(t_name, 'max', max_pc)\n\n def QuantizeTensors(self, t_name, ts, eval_only=False):\n p = self.params\n # Always straddle a real zero point.\n if self.do_eval:\n # At eval/inference time, use the memorized range.\n # Important: Don't capture these variables in training mode so as to\n # avoid extra/unnecessary captures.\n min_var = self._GetQStateVar(t_name, 'min')\n max_var = self._GetQStateVar(t_name, 'max')\n return [\n self._MaybeFakeQuant(t, min_var, max_var, num_bits=p.bits) for t in ts\n ]\n else:\n # At training time, use the batch calculated min/max.\n accumulator_name = self._GetAccumulatorNameForTensor(t_name)\n # Calculate min/max for all tensors.\n batch_min = 0.0\n batch_max = 0.0\n for t in ts:\n batch_min = tf.minimum(tf.reduce_min(t), batch_min)\n batch_max = tf.maximum(tf.reduce_max(t), batch_max)\n\n # New state.\n state1 = tf.stack([1.0, batch_min, batch_max])\n self.accumulators[accumulator_name].Update(state1)\n\n # Results.\n ts_out = []\n for i, t in enumerate(ts):\n if eval_only:\n # If only quantizing at eval time, still record ranges as above\n # but don't quantize.\n quant_t = t\n else:\n # If quantizing during training, skip quantization if it produces\n # NANs. Sometimes early in the training process, things are unstable\n # and ranges can produce numerical instability that makes it\n # impossible to perform a fake_quant.\n quant_t = self._MaybeFakeQuant(\n t, batch_min, batch_max, num_bits=p.bits)\n # TODO(laurenzo): Plumb quant_t_has_nans through state and report.\n quant_t_has_nans = tf.math.is_nan(quant_t)\n quant_t = tf.where(quant_t_has_nans, t, quant_t)\n ts_out.append(quant_t)\n summary_utils.histogram(\n '%s/%s_%d' % (self._qvars_scope.name, t_name, i), t)\n return ts_out\n\n def GetTensorRange(self, t_name, ts):\n # Always straddle a real zero point.\n if self.do_eval:\n # At eval/inference time, use the memorized range.\n # Important: Don't capture these variables in training mode so as to\n # avoid extra/unnecessary captures.\n min_var = tf.stop_gradient(self._GetQStateVar(t_name, 'min'))\n max_var = tf.stop_gradient(self._GetQStateVar(t_name, 'max'))\n return (min_var, max_var)\n # Calculate min/max for all tensors.\n batch_min = tf.minimum(tf.reduce_min(ts), 0.0)\n batch_max = tf.maximum(tf.reduce_max(ts), 0.0)\n return (tf.stop_gradient(batch_min), tf.stop_gradient(batch_max))\n\n def PostTrainingStepUpdate(self):\n ops = [super().PostTrainingStepUpdate()]\n for t_name in self._t_names:\n ops.extend(self._RecordTensor(t_name))\n self._SummarizeTensor(t_name)\n return tf.group(ops)\n\n def _CreateQStateVar(self, t_name, suffix, params):\n name = t_name + '_' + suffix\n assert name not in self._qvars, 'QState var already exists: %s' % name\n var_name = self._qvars_scope.name + '/' + name\n with tf.variable_scope(py_utils.GetGlobalVariableScope()):\n v = py_utils.CreateVariable(var_name, params, trainable=False)\n self._qvars[name] = v\n return v\n\n def _GetAccumulatorNameForTensor(self, t_name):\n return 'qtensor_' + t_name\n\n def _GetQStateVar(self, t_name, suffix):\n v = self._qvars[t_name + '_' + suffix]\n return v\n\n def _SummarizeTensor(self, t_name):\n min_var = self._GetQStateVar(t_name, 'min')\n max_var = self._GetQStateVar(t_name, 'max')\n # foo/q/somet_min:0 -> foo/q/somet_min\n summary_name_min = min_var.name.split(':')[0]\n summary_name_max = max_var.name.split(':')[0]\n summary_utils.scalar(summary_name_min, min_var)\n summary_utils.scalar(summary_name_max, max_var)\n\n def _RecordTensor(self, t_name):\n p = self.params\n if self.do_eval:\n return []\n\n accumulator_name = self._GetAccumulatorNameForTensor(t_name)\n accumulator = self.accumulators[accumulator_name]\n min_var = self._GetQStateVar(t_name, 'min')\n max_var = self._GetQStateVar(t_name, 'max')\n\n # Unpack state tensor.\n current_value = accumulator.GetValue()\n count = current_value[0]\n min_value = current_value[1]\n max_value = current_value[2]\n accumulator.Reset()\n\n def Ema(variable, value):\n return (1.0 - p.ema_decay) * (variable - value)\n\n # Note that small floating point issues can cause ranges that naturally\n # begin or end at zero to move slightly past, causing hard failures\n # downstream (checks that all ranges straddle zero). We therefore repeat\n # the straddling constraint here.\n return [\n tf.assign(\n min_var,\n tf.minimum(\n 0.,\n min_var - tf.where(count > 0., Ema(min_var, min_value), 0.))),\n tf.assign(\n max_var,\n tf.maximum(\n 0.,\n max_var - tf.where(count > 0., Ema(max_var, max_value), 0.))),\n ]\n\n\ndef _CopyShape(from_t, to_t):\n if isinstance(from_t, tf.Tensor) and isinstance(to_t, tf.Tensor):\n to_t.set_shape(from_t.shape)\n return to_t\n\n\nclass InputDistribution(enum.Enum):\n \"\"\"Distribution type for the inputs for AqtQdomain.\n\n Symmetric distribution is for signed inputs, here we quantize the inputs using\n symmetric range around 0 i.e. in range [-max, max]. Weights are signed.\n Positive distribution is for unsigned distribution, here we quantize the\n inputs in range [0, max_val]\n \"\"\"\n SYMMETRIC = enum.auto()\n POSITIVE = enum.auto()\n", "# Lint as: python3\n# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tests for speech decoder.\"\"\"\n\nimport lingvo.compat as tf\nfrom lingvo.core import cluster_factory\nfrom lingvo.core import layers as lingvo_layers\nfrom lingvo.core import py_utils\nfrom lingvo.core import symbolic\nfrom lingvo.core import test_utils\nfrom lingvo.core.ops.hyps_pb2 import Hypothesis\nfrom lingvo.tasks.asr import decoder\nimport numpy as np\n\nfrom google.protobuf import text_format\n\nFLAGS = tf.flags.FLAGS\n\n\ndef _DecoderParams(vn_config, num_classes=32, num_rnn_layers=1):\n \"\"\"Create a small decoder for testing.\"\"\"\n p = decoder.AsrDecoder.Params()\n p.random_seed = 12345\n\n p.name = 'decoder'\n uniform_init = py_utils.WeightInit.Uniform(0.1, seed=12345)\n\n # Set up embedding params.\n p.emb.vocab_size = num_classes\n p.emb.max_num_shards = 1\n p.emb.params_init = uniform_init\n\n # Set up decoder RNN layers.\n p.rnn_layers = num_rnn_layers\n rnn_params = p.rnn_cell_tpl\n rnn_params.params_init = uniform_init\n\n # Set up attention.\n p.attention.hidden_dim = 16\n p.attention.params_init = uniform_init\n\n # Set up final softmax layer.\n p.softmax.num_classes = num_classes\n p.softmax.params_init = uniform_init\n\n # Set up variational noise params.\n p.vn = vn_config\n p.vn.scale = tf.constant(0.1)\n\n p.target_seq_len = 5\n p.source_dim = 8\n p.emb_dim = 2\n p.rnn_cell_dim = 4\n\n return p\n\n\ndef _CreateSourceAndTargets(params):\n \"\"\"Creates encoder outputs and targets from params for the decoder.\"\"\"\n src_seq_len = 5\n src_enc = tf.random.normal([src_seq_len, 2, 8],\n seed=982774838,\n dtype=py_utils.FPropDtype(params))\n src_enc_padding = tf.constant(\n [[0.0, 0.0], [0.0, 0.0], [0.0, 0.0], [0.0, 1.0], [1.0, 1.0]],\n dtype=py_utils.FPropDtype(params))\n encoder_outputs = py_utils.NestedMap(encoded=src_enc, padding=src_enc_padding)\n # shape=[4, 5]\n target_ids = tf.transpose(\n tf.constant([[0, 1, 2, 3], [1, 2, 3, 4], [10, 11, 12, 15], [5, 6, 7, 8],\n [10, 5, 2, 5]],\n dtype=tf.int32))\n # shape=[4, 5]\n target_labels = tf.transpose(\n tf.constant([[0, 1, 2, 3], [1, 2, 3, 4], [10, 11, 12, 13], [5, 7, 8, 10],\n [10, 5, 2, 4]],\n dtype=tf.int32))\n # shape=[4, 5]\n target_paddings = tf.transpose(\n tf.constant([[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 1, 0], [0, 1, 0, 0],\n [1, 1, 1, 0]],\n dtype=py_utils.FPropDtype(params)))\n target_transcripts = tf.constant(['abcd', 'bcde', 'klmp', 'fghi', 'kfcf'])\n target_weights = 1.0 - target_paddings\n # ids/labels/weights/paddings are all in [batch, time] shape.\n targets = py_utils.NestedMap({\n 'ids': target_ids,\n 'labels': target_labels,\n 'weights': target_weights,\n 'paddings': target_paddings,\n 'transcripts': target_transcripts,\n })\n return encoder_outputs, targets\n\n\nclass DecoderTest(test_utils.TestCase):\n\n def _getDecoderFPropMetrics(self, params):\n \"\"\"Creates decoder from params and computes metrics with random inputs.\"\"\"\n dec = params.Instantiate()\n encoder_outputs, targets = _CreateSourceAndTargets(params)\n decoder_outputs = dec.FPropDefaultTheta(encoder_outputs, targets)\n return decoder_outputs.metrics, decoder_outputs.per_sequence['loss']\n\n def _testDecoderFPropHelper(self, params):\n metrics, per_sequence_loss = self._getDecoderFPropMetrics(params)\n return metrics['loss'], per_sequence_loss\n\n def _testDecoderFPropFloatHelper(self,\n func_inline=False,\n num_decoder_layers=1,\n target_seq_len=5,\n residual_start=0):\n \"\"\"Computes decoder from params and computes loss with random inputs.\"\"\"\n cluster = cluster_factory.ForTestingWorker(add_summary=True)\n config = tf.config_pb2.ConfigProto(\n graph_options=tf.GraphOptions(\n optimizer_options=tf.OptimizerOptions(\n do_function_inlining=func_inline)))\n with cluster, self.session(use_gpu=False, config=config):\n tf.random.set_seed(8372749040)\n vn_config = py_utils.VariationalNoiseParams(None, False, False)\n p = _DecoderParams(vn_config)\n p.rnn_layers = num_decoder_layers\n p.residual_start = residual_start\n p.target_seq_len = target_seq_len\n dec = p.Instantiate()\n src_seq_len = 5\n src_enc = tf.random.normal([src_seq_len, 2, 8], seed=9283748)\n src_enc_padding = tf.constant(\n [[0.0, 0.0], [0.0, 0.0], [0.0, 0.0], [0.0, 1.0], [1.0, 1.0]],\n dtype=tf.float32)\n encoder_outputs = py_utils.NestedMap(\n encoded=src_enc, padding=src_enc_padding)\n target_ids = tf.transpose(\n tf.constant([[0, 1, 2, 3], [1, 2, 3, 4], [10, 11, 12, 15],\n [5, 6, 7, 8], [10, 5, 2, 5]],\n dtype=tf.int32))\n target_labels = tf.transpose(\n tf.constant([[0, 1, 2, 3], [1, 2, 3, 4], [10, 11, 12, 13],\n [5, 7, 8, 10], [10, 5, 2, 4]],\n dtype=tf.int32))\n target_paddings = tf.transpose(\n tf.constant([[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 1, 0], [0, 1, 0, 0],\n [1, 1, 1, 1]],\n dtype=tf.float32))\n target_transcripts = tf.constant(['abcd', 'bcde', 'klmp', 'fghi', 'kfcf'])\n target_weights = 1.0 - target_paddings\n targets = py_utils.NestedMap({\n 'ids': target_ids,\n 'labels': target_labels,\n 'weights': target_weights,\n 'paddings': target_paddings,\n 'transcripts': target_transcripts,\n })\n metrics = dec.FPropDefaultTheta(encoder_outputs, targets).metrics\n loss = metrics['loss'][0]\n correct_predicts = metrics['fraction_of_correct_next_step_preds'][0]\n summaries = tf.summary.merge(tf.get_collection(tf.GraphKeys.SUMMARIES))\n\n self.evaluate(tf.global_variables_initializer())\n loss_v, _ = self.evaluate([loss, correct_predicts])\n\n summaries.eval()\n\n return loss_v\n\n # Actual tests follow.\n\n def testDecoderConstruction(self):\n \"\"\"Test that decoder can be constructed from params.\"\"\"\n p = _DecoderParams(\n vn_config=py_utils.VariationalNoiseParams(\n None, True, False, seed=12345))\n _ = decoder.AsrDecoder(p)\n\n def testDecoderFProp(self):\n \"\"\"Create decoder with default params, and verify that FProp runs.\"\"\"\n with self.session(use_gpu=False):\n tf.random.set_seed(8372749040)\n\n p = _DecoderParams(\n vn_config=py_utils.VariationalNoiseParams(\n None, True, False, seed=12345))\n\n metrics, per_sequence_loss = self._getDecoderFPropMetrics(params=p)\n self.assertIn('fraction_of_correct_next_step_preds', metrics)\n self.evaluate(tf.global_variables_initializer())\n metrics_val, per_sequence_loss_val = self.evaluate(\n [metrics, per_sequence_loss])\n tf.logging.info('metrics=%s, per_sequence_loss=%s', metrics_val,\n per_sequence_loss_val)\n\n self.assertEqual(metrics_val['loss'], metrics_val['log_pplx'])\n # Target batch size is 4. Therefore, we should expect 4 here.\n self.assertEqual(per_sequence_loss_val.shape, (4,))\n\n def testDecoderFPropWithMeanSeqLoss(self):\n \"\"\"Create and fprop a decoder with different dims per layer.\"\"\"\n with self.session(use_gpu=False):\n tf.random.set_seed(8372749040)\n\n p = _DecoderParams(\n vn_config=py_utils.VariationalNoiseParams(\n None, True, False, seed=12345))\n p.token_normalized_per_seq_loss = True\n p.per_token_avg_loss = False\n\n metrics, per_sequence_loss = self._getDecoderFPropMetrics(params=p)\n self.evaluate(tf.global_variables_initializer())\n metrics_val, per_sequence_loss_val = self.evaluate(\n [metrics, per_sequence_loss])\n tf.logging.info('metrics=%s, per_sequence_loss=%s', metrics_val,\n per_sequence_loss_val)\n\n self.assertNotEqual(metrics_val['loss'][0], metrics_val['log_pplx'][0])\n self.assertAllClose(metrics_val['loss'], (3.484608, 4.0))\n self.assertAllClose(metrics_val['log_pplx'], (3.496482, 15.0))\n # Target batch size is 4. Therefore, we should expect 4 here.\n self.assertEqual(per_sequence_loss_val.shape, (4,))\n\n def testDecoderFPropWithProjection(self):\n \"\"\"Create decoder with projection layers, and verify that FProp runs.\"\"\"\n with self.session(use_gpu=False):\n tf.random.set_seed(8372749040)\n\n p = _DecoderParams(\n vn_config=py_utils.VariationalNoiseParams(\n None, True, False, seed=12345))\n rnn_cell_tpl = p.rnn_cell_tpl\n p.rnn_cell_tpl = [\n rnn_cell_tpl.Copy().Set(\n num_output_nodes=i + 2, num_hidden_nodes=i + 5)\n for i in range(p.rnn_layers)\n ]\n p.rnn_cell_dim = -1\n p.rnn_cell_hidden_dim = -1\n\n loss, per_sequence_loss = self._testDecoderFPropHelper(params=p)\n self.evaluate(tf.global_variables_initializer())\n loss_val, per_sequence_loss_val = self.evaluate([loss, per_sequence_loss])\n\n print('loss = ', loss_val, 'per sequence loss = ', per_sequence_loss_val)\n # Target batch size is 4. Therefore, we should expect 4 here.\n self.assertEqual(per_sequence_loss_val.shape, (4,))\n\n def testDecoderFPropWithPerLayerDims(self):\n \"\"\"Create and fprop a decoder with different dims per layer.\"\"\"\n with self.session(use_gpu=False):\n tf.random.set_seed(8372749040)\n\n p = _DecoderParams(\n vn_config=py_utils.VariationalNoiseParams(\n None, True, False, seed=12345))\n p.rnn_cell_hidden_dim = 6\n\n loss, per_sequence_loss = self._testDecoderFPropHelper(params=p)\n self.evaluate(tf.global_variables_initializer())\n loss_val, per_sequence_loss_val = self.evaluate([loss, per_sequence_loss])\n\n print('loss = ', loss_val, 'per sequence loss = ', per_sequence_loss_val)\n # Target batch size is 4. Therefore, we should expect 4 here.\n self.assertEqual(per_sequence_loss_val.shape, (4,))\n\n def testDecoderFPropDtype(self):\n \"\"\"Create decoder with different fprop_type, and verify that FProp runs.\"\"\"\n with self.session(use_gpu=False):\n tf.random.set_seed(8372749040)\n\n p = _DecoderParams(\n vn_config=py_utils.VariationalNoiseParams(\n None, True, False, seed=12345))\n p.fprop_dtype = tf.float64\n\n loss, per_sequence_loss = self._testDecoderFPropHelper(params=p)\n self.evaluate(tf.global_variables_initializer())\n loss_val, per_sequence_loss_val = self.evaluate([loss, per_sequence_loss])\n\n print('loss = ', loss_val, 'per sequence loss = ', per_sequence_loss_val)\n # Target batch size is 4. Therefore, we should expect 4 here.\n self.assertEqual(per_sequence_loss_val.shape, (4,))\n\n def testDecoderFPropDeterministicAttentionDropout(self):\n \"\"\"Verify that attention dropout is deterministic given fixed seeds.\"\"\"\n with self.session(use_gpu=False):\n tf.random.set_seed(8372749040)\n p = _DecoderParams(\n py_utils.VariationalNoiseParams(None, True, False, seed=1792))\n\n p.use_while_loop_based_unrolling = False\n p.attention.atten_dropout_prob = 0.5\n p.attention.atten_dropout_deterministic = True\n\n loss, per_sequence_loss = self._testDecoderFPropHelper(params=p)\n global_step = py_utils.GetGlobalStep()\n self.evaluate(tf.global_variables_initializer())\n loss_val, per_sequence_loss_val, global_steps_val = self.evaluate(\n [loss, per_sequence_loss, global_step])\n\n print('loss = ', loss_val, 'per sequence loss = ', per_sequence_loss_val)\n self.assertAllClose([3.587372, 15.0], loss_val)\n self.assertAllClose([14.171288, 9.965696, 10.221684, 19.451914],\n per_sequence_loss_val)\n self.assertEqual(0, global_steps_val)\n\n # Run another step to test global_step and time_step are incremented\n # correctly.\n self.evaluate(tf.assign_add(global_step, 1))\n loss_val, per_sequence_loss_val, global_steps_val = self.evaluate(\n [loss, per_sequence_loss, global_step])\n\n print('loss = ', loss_val, 'per sequence loss = ', per_sequence_loss_val)\n self.assertAllClose([3.626164, 15.0], loss_val)\n self.assertAllClose([14.70993, 10.572938, 10.516836, 18.592758],\n per_sequence_loss_val)\n self.assertEqual(1, global_steps_val)\n\n def testLabelSmoothing(self):\n \"\"\"Verify that loss computation with label smoothing is as expected..\"\"\"\n with self.session(use_gpu=False):\n tf.random.set_seed(8372749040)\n\n p = _DecoderParams(vn_config=py_utils.VariationalNoiseParams(None))\n p.label_smoothing = lingvo_layers.LocalizedLabelSmoother.Params()\n p.label_smoothing.offsets = [-2, -1, 1, 2]\n p.label_smoothing.weights = [0.015, 0.035, 0.035, 0.015]\n\n loss, _ = self._testDecoderFPropHelper(params=p)\n self.evaluate(tf.global_variables_initializer())\n loss_val = self.evaluate(loss[0])\n\n print('loss = ', loss_val)\n test_utils.CompareToGoldenSingleFloat(self, 3.471763, loss_val)\n\n def testDecoderFPropFloatNoInline(self):\n actual_value = self._testDecoderFPropFloatHelper(func_inline=False)\n test_utils.CompareToGoldenSingleFloat(self, 3.458980, actual_value)\n\n def testDecoderFPropFloatNoInlinePadTargetsToLongerLength(self):\n actual_value = self._testDecoderFPropFloatHelper(\n func_inline=False, target_seq_len=10)\n test_utils.CompareToGoldenSingleFloat(self, 3.458980, actual_value)\n\n def testDecoderFPropFloatInline(self):\n actual_value = self._testDecoderFPropFloatHelper(func_inline=True)\n test_utils.CompareToGoldenSingleFloat(self, 3.458980, actual_value)\n\n def testDecoderFPropFloatNoInline2Layers(self):\n actual_value = self._testDecoderFPropFloatHelper(\n func_inline=False, num_decoder_layers=2)\n test_utils.CompareToGoldenSingleFloat(self, 3.457761, actual_value)\n\n def testDecoderFPropFloatInline2Layers(self):\n actual_value = self._testDecoderFPropFloatHelper(\n func_inline=True, num_decoder_layers=2)\n test_utils.CompareToGoldenSingleFloat(self, 3.457761, actual_value)\n\n def testDecoderFPropFloat2LayersResidual(self):\n actual_value = self._testDecoderFPropFloatHelper(\n num_decoder_layers=2, residual_start=2)\n test_utils.CompareToGoldenSingleFloat(self, 3.458294, actual_value)\n\n def testDecoderFPropDouble(self):\n with self.session(use_gpu=False):\n tf.random.set_seed(8372749040)\n np.random.seed(827374)\n\n p = _DecoderParams(\n vn_config=py_utils.VariationalNoiseParams(None, False, False))\n p.dtype = tf.float64\n\n dec = decoder.AsrDecoder(p)\n src_seq_len = 5\n src_enc = tf.constant(\n np.random.uniform(size=(src_seq_len, 2, 8)), tf.float64)\n src_enc_padding = tf.constant(\n [[0.0, 0.0], [0.0, 0.0], [0.0, 0.0], [0.0, 1.0], [1.0, 1.0]],\n dtype=tf.float64)\n target_ids = tf.transpose(\n tf.constant([[0, 1, 2, 3], [1, 2, 3, 4], [10, 11, 12, 15],\n [5, 6, 7, 8], [10, 5, 2, 5]],\n dtype=tf.int32))\n target_labels = tf.transpose(\n tf.constant([[0, 1, 2, 3], [1, 2, 3, 4], [10, 11, 12, 13],\n [5, 7, 8, 10], [10, 5, 2, 4]],\n dtype=tf.int32))\n target_paddings = tf.transpose(\n tf.constant([[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 1, 0], [0, 1, 0, 0],\n [1, 1, 1, 1]],\n dtype=tf.float64))\n target_transcripts = tf.constant(['abcd', 'bcde', 'klmp', 'fghi', 'kfcf'])\n target_weights = 1.0 - target_paddings\n targets = py_utils.NestedMap({\n 'ids': target_ids,\n 'labels': target_labels,\n 'weights': target_weights,\n 'paddings': target_paddings,\n 'transcripts': target_transcripts,\n })\n encoder_outputs = py_utils.NestedMap(\n encoded=src_enc, padding=src_enc_padding)\n metrics = dec.FPropDefaultTheta(encoder_outputs, targets).metrics\n loss = metrics['loss'][0]\n\n self.evaluate(tf.global_variables_initializer())\n\n test_utils.CompareToGoldenSingleFloat(self, 3.467679, loss.eval())\n # Second run to make sure the function is determistic.\n test_utils.CompareToGoldenSingleFloat(self, 3.467679, loss.eval())\n\n def _testDecoderFPropGradientCheckerHelper(self, func_inline=False):\n config = tf.config_pb2.ConfigProto(\n graph_options=tf.GraphOptions(\n optimizer_options=tf.OptimizerOptions(\n do_function_inlining=func_inline)))\n with self.session(use_gpu=False, config=config) as sess:\n tf.random.set_seed(8372749040)\n np.random.seed(274854)\n vn_config = py_utils.VariationalNoiseParams(None, False, False)\n p = _DecoderParams(vn_config)\n p.dtype = tf.float64\n\n dec = p.Instantiate()\n src_seq_len = 5\n src_enc = tf.constant(\n np.random.uniform(size=(src_seq_len, 2, 8)), tf.float64)\n src_enc_padding = tf.constant(\n [[0.0, 0.0], [0.0, 0.0], [0.0, 0.0], [0.0, 1.0], [1.0, 1.0]],\n dtype=tf.float64)\n encoder_outputs = py_utils.NestedMap(\n encoded=src_enc, padding=src_enc_padding)\n target_ids = tf.transpose(\n tf.constant([[0, 1, 2, 3], [1, 2, 3, 4], [10, 11, 12, 15],\n [5, 6, 7, 8], [10, 5, 2, 5]],\n dtype=tf.int32))\n target_labels = tf.transpose(\n tf.constant([[0, 1, 2, 3], [1, 2, 3, 4], [10, 11, 12, 13],\n [5, 7, 8, 10], [10, 5, 2, 4]],\n dtype=tf.int32))\n target_paddings = tf.transpose(\n tf.constant([[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 1, 0], [0, 1, 0, 0],\n [1, 1, 1, 1]],\n dtype=tf.float64))\n target_transcripts = tf.constant(['abcd', 'bcde', 'klmp', 'fghi', 'kfcf'])\n target_weights = 1.0 - target_paddings\n\n targets = py_utils.NestedMap({\n 'ids': target_ids,\n 'labels': target_labels,\n 'weights': target_weights,\n 'paddings': target_paddings,\n 'transcripts': target_transcripts,\n })\n metrics = dec.FPropDefaultTheta(encoder_outputs, targets).metrics\n loss = metrics['loss'][0]\n all_vars = tf.trainable_variables()\n grads = tf.gradients(loss, all_vars)\n\n def DenseGrad(var, grad):\n if isinstance(grad, tf.Tensor):\n return grad\n elif isinstance(grad, tf.IndexedSlices):\n return tf.math.unsorted_segment_sum(grad.values, grad.indices,\n tf.shape(var)[0])\n\n dense_grads = [DenseGrad(x, y) for (x, y) in zip(all_vars, grads)]\n\n self.evaluate(tf.global_variables_initializer())\n\n test_utils.CompareToGoldenSingleFloat(self, 3.458078, loss.eval())\n # Second run to make sure the function is determistic.\n test_utils.CompareToGoldenSingleFloat(self, 3.458078, loss.eval())\n\n symbolic_grads = [x.eval() for x in dense_grads if x is not None]\n numerical_grads = []\n for v in all_vars:\n numerical_grads.append(test_utils.ComputeNumericGradient(sess, loss, v))\n\n for x, y in zip(symbolic_grads, numerical_grads):\n self.assertAllClose(x, y)\n\n def testDecoderFPropGradientCheckerNoInline(self):\n self._testDecoderFPropGradientCheckerHelper(func_inline=False)\n\n def testDecoderFPropGradientCheckerInline(self):\n self._testDecoderFPropGradientCheckerHelper(func_inline=True)\n\n def _testDecoderBeamSearchDecodeHelperWithOutput(self,\n params,\n src_seq_len=None,\n src_enc_padding=None):\n config = tf.config_pb2.ConfigProto(\n graph_options=tf.GraphOptions(\n optimizer_options=tf.OptimizerOptions(do_function_inlining=False)))\n p = params\n with self.session(use_gpu=False, config=config), self.SetEval(True):\n tf.random.set_seed(837274904)\n np.random.seed(837575)\n p.beam_search.num_hyps_per_beam = 4\n p.dtype = tf.float32\n p.target_seq_len = 5\n\n dec = p.Instantiate()\n if src_seq_len is None:\n src_seq_len = 5\n src_enc = tf.constant(\n np.random.uniform(size=(src_seq_len, 2, 8)), tf.float32)\n if src_enc_padding is None:\n src_enc_padding = tf.constant(\n [[0.0, 0.0], [0.0, 0.0], [0.0, 0.0], [0.0, 1.0], [1.0, 1.0]],\n dtype=tf.float32)\n\n encoder_outputs = py_utils.NestedMap(\n encoded=src_enc, padding=src_enc_padding)\n done_hyps = dec.BeamSearchDecode(encoder_outputs).done_hyps\n self.evaluate(tf.global_variables_initializer())\n\n softmax_wts = self.evaluate(dec.vars.softmax)\n print('softmax wts = ', softmax_wts)\n\n done_hyps_serialized = self.evaluate([done_hyps])[0]\n hyp = Hypothesis()\n print('done hyps shape = ', done_hyps_serialized.shape)\n for i in range(5):\n for j in range(8):\n print(i, j, len(done_hyps_serialized[i, j]))\n hyp.ParseFromString(done_hyps_serialized[2, 5])\n print('hyp = ', hyp)\n return hyp\n\n def _VerifyHypothesesMatch(self, hyp1, hyp2):\n tf.logging.info('hyp1 = %s', hyp1)\n tf.logging.info('hyp2 = %s', hyp2)\n self.assertEqual(hyp1.beam_id, hyp2.beam_id)\n self.assertEqual(list(hyp1.ids), list(hyp2.ids))\n self.assertAllClose(hyp1.scores, hyp2.scores)\n self.assertEqual(len(hyp1.atten_vecs), len(hyp2.atten_vecs))\n for av1, av2 in zip(hyp1.atten_vecs, hyp2.atten_vecs):\n self.assertAllClose(av1.prob, av2.prob)\n\n def testDecoderBeamSearchDecode(self):\n np.random.seed(837575)\n\n p = _DecoderParams(\n vn_config=py_utils.VariationalNoiseParams(None, False, False),\n num_classes=8)\n p.beam_search.num_hyps_per_beam = 4\n p.dtype = tf.float32\n p.target_seq_len = 5\n\n expected_str = \"\"\"\n beam_id: 1\n ids: 0\n ids: 6\n ids: 2\n scores: -2.021608\n scores: -2.000098\n scores: -2.036338\n atten_vecs {\n prob: 0.330158\n prob: 0.342596\n prob: 0.327246\n prob: 0.0\n prob: 0.0\n }\n atten_vecs {\n prob: 0.330158\n prob: 0.342597\n prob: 0.327245\n prob: 0.0\n prob: 0.0\n }\n atten_vecs {\n prob: 0.330158\n prob: 0.342597\n prob: 0.327245\n prob: 0.0\n prob: 0.0\n }\n \"\"\"\n expected_hyp = Hypothesis()\n text_format.Parse(expected_str, expected_hyp)\n\n decoded_hyp = self._testDecoderBeamSearchDecodeHelperWithOutput(params=p)\n self._VerifyHypothesesMatch(expected_hyp, decoded_hyp)\n\n def testDecoderSampleTargetSequences(self):\n p = _DecoderParams(\n vn_config=py_utils.VariationalNoiseParams(None, False, False),\n num_classes=8)\n p.target_seq_len = 5\n p.random_seed = 1\n config = tf.config_pb2.ConfigProto(\n graph_options=tf.GraphOptions(\n optimizer_options=tf.OptimizerOptions(do_function_inlining=False)))\n with self.session(use_gpu=False, config=config):\n tf.random.set_seed(8372740)\n np.random.seed(35315)\n dec = p.Instantiate()\n source_sequence_length = 5\n batch_size = 4\n source_encodings = tf.constant(\n np.random.normal(\n size=[source_sequence_length, batch_size, p.source_dim]),\n dtype=tf.float32)\n source_encoding_padding = tf.constant(\n [[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 1.0], [0.0, 1.0, 1.0, 1.0],\n [0.0, 1.0, 1.0, 1.0], [0.0, 1.0, 1.0, 1.0]],\n dtype=tf.float32)\n encoder_outputs = py_utils.NestedMap(\n encoded=source_encodings, padding=source_encoding_padding)\n sampled_sequences = dec.SampleTargetSequences(\n dec.theta, encoder_outputs, random_seed=tf.cast(123, tf.int32))\n self.assertAllEqual([batch_size, p.target_seq_len],\n sampled_sequences.ids.shape)\n self.evaluate(tf.global_variables_initializer())\n decoder_output = self.evaluate(sampled_sequences)\n print('ids=%s' % np.array_repr(decoder_output.ids))\n lens = np.sum(1 - decoder_output.paddings, axis=1)\n print('lens=%s' % lens)\n # pyformat: disable\n # pylint: disable=bad-whitespace,bad-continuation\n expected_ids = [[6, 2, 2, 2, 2],\n [0, 0, 7, 5, 1],\n [6, 1, 5, 1, 5],\n [6, 7, 7, 4, 4]]\n # pylint: enable=bad-whitespace,bad-continuation\n # pyformat: enable\n expected_lens = [2, 5, 5, 5]\n self.assertAllEqual(expected_lens, lens)\n self.assertAllEqual(expected_ids, decoder_output.ids)\n\n # Sample again with the same random seed.\n decoder_output2 = self.evaluate(\n dec.SampleTargetSequences(\n dec.theta, encoder_outputs, random_seed=tf.cast(123, tf.int32)))\n # Get the same output.\n self.assertAllEqual(decoder_output.ids, decoder_output2.ids)\n self.assertAllEqual(decoder_output.paddings, decoder_output2.paddings)\n\n # Sample again with a different random seed.\n decoder_output3 = self.evaluate(\n dec.SampleTargetSequences(\n dec.theta, encoder_outputs, random_seed=tf.cast(123456,\n tf.int32)))\n # Get different sequences.\n self.assertNotAllClose(expected_ids, decoder_output3.ids)\n\n def testDecoderFPropWithSymbolicShape(self):\n \"\"\"Create decoder with default params, and verify that FProp runs.\"\"\"\n with self.session():\n p = _DecoderParams(\n vn_config=py_utils.VariationalNoiseParams(\n None, True, False, seed=12345))\n p.rnn_cell_dim = symbolic.Symbol('rnn_cell_dim')\n\n with symbolic.SymbolToValueMap(symbolic.STATIC_VALUES,\n {p.rnn_cell_dim: 6}):\n loss, per_sequence_loss = self._testDecoderFPropHelper(params=p)\n self.evaluate(tf.global_variables_initializer())\n loss_val, per_sequence_loss_val = self.evaluate(\n [loss, per_sequence_loss])\n\n print('loss = ', loss_val, 'per sequence loss = ', per_sequence_loss_val)\n # Target batch size is 4. Therefore, we should expect 4 here.\n self.assertEqual(per_sequence_loss_val.shape, (4,))\n\n def testUpdateTargetVocabSize(self):\n p = _DecoderParams(\n vn_config=py_utils.VariationalNoiseParams(\n None, True, False, seed=12345))\n p.label_smoothing = lingvo_layers.LocalizedLabelSmoother.Params()\n p.label_smoothing.num_classes = p.softmax.num_classes\n\n vocab_size = 1024\n self.assertNotEqual(p.emb.vocab_size, vocab_size)\n self.assertNotEqual(p.softmax.num_classes, vocab_size)\n self.assertNotEqual(p.fusion.lm.vocab_size, vocab_size)\n self.assertNotEqual(p.label_smoothing.num_classes, vocab_size)\n p = p.cls.UpdateTargetVocabSize(p, vocab_size)\n dec = p.Instantiate()\n self.assertEqual(vocab_size, dec.params.emb.vocab_size)\n self.assertEqual(vocab_size, dec.params.softmax.num_classes)\n self.assertEqual(vocab_size, p.fusion.lm.vocab_size)\n self.assertEqual(vocab_size, p.label_smoothing.num_classes)\n\n def testDecoderFPropWithAdapters(self):\n \"\"\"Create decoder with adapters, and verify that FProp runs.\"\"\"\n with self.session(use_gpu=False):\n tf.random.set_seed(8372749040)\n\n params = _DecoderParams(\n num_rnn_layers=2,\n vn_config=py_utils.VariationalNoiseParams(\n None, True, False, seed=12345))\n params.rnn_cell_dim = 3\n params.adapter_layer_tpl.Set(\n bottleneck_dim=4,\n num_tasks=16,\n projection_params_init=py_utils.WeightInit.Gaussian(0.01))\n params.adapter_task_id_field = 'domain_ids'\n\n dec = params.Instantiate()\n src_seq_len = 5\n src_enc = tf.random.normal([src_seq_len, 2, 8],\n seed=982774838,\n dtype=py_utils.FPropDtype(params))\n src_enc_padding = tf.constant(\n [[0.0, 0.0], [0.0, 0.0], [0.0, 0.0], [0.0, 1.0], [1.0, 1.0]],\n dtype=py_utils.FPropDtype(params))\n domain_ids = tf.constant(np.random.randint(low=0, high=16, size=[2]))\n encoder_outputs = py_utils.NestedMap(\n encoded=src_enc, padding=src_enc_padding, domain_ids=domain_ids)\n # shape=[4, 5]\n target_ids = tf.transpose(\n tf.constant([[0, 1, 2, 3], [1, 2, 3, 4], [10, 11, 12, 15],\n [5, 6, 7, 8], [10, 5, 2, 5]],\n dtype=tf.int32))\n # shape=[4, 5]\n target_labels = tf.transpose(\n tf.constant([[0, 1, 2, 3], [1, 2, 3, 4], [10, 11, 12, 13],\n [5, 7, 8, 10], [10, 5, 2, 4]],\n dtype=tf.int32))\n # shape=[4, 5]\n target_paddings = tf.transpose(\n tf.constant([[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 1, 0], [0, 1, 0, 0],\n [1, 1, 1, 0]],\n dtype=py_utils.FPropDtype(params)))\n target_transcripts = tf.constant(['abcd', 'bcde', 'klmp', 'fghi', 'kfcf'])\n target_weights = 1.0 - target_paddings\n # ids/labels/weights/paddings are all in [batch, time] shape.\n targets = py_utils.NestedMap({\n 'ids': target_ids,\n 'labels': target_labels,\n 'weights': target_weights,\n 'paddings': target_paddings,\n 'transcripts': target_transcripts,\n })\n decoder_outputs = dec.FPropDefaultTheta(encoder_outputs, targets)\n metrics = decoder_outputs.metrics\n per_sequence_loss = decoder_outputs.per_sequence['loss']\n\n self.assertIn('fraction_of_correct_next_step_preds', metrics)\n self.evaluate(tf.global_variables_initializer())\n metrics_val, per_sequence_loss_val = self.evaluate(\n [metrics, per_sequence_loss])\n tf.logging.info('metrics=%s, per_sequence_loss=%s', metrics_val,\n per_sequence_loss_val)\n\n self.assertEqual(metrics_val['loss'], metrics_val['log_pplx'])\n # Target batch size is 4. Therefore, we should expect 4 here.\n self.assertEqual(per_sequence_loss_val.shape, (4,))\n\n\nclass DecoderWithConfidenceTest(test_utils.TestCase):\n\n def _testComputePredictionsHelper(self,\n use_while_loop_based_unrolling=False,\n confidence_module=False):\n \"\"\"Create decoder and confidence prediction, and verify that FProp runs.\"\"\"\n with self.session():\n p = _DecoderParams(\n vn_config=py_utils.VariationalNoiseParams(\n None, True, False, seed=12345))\n p.use_while_loop_based_unrolling = use_while_loop_based_unrolling\n if confidence_module:\n p.confidence = lingvo_layers.FeedForwardNet.Params()\n p.confidence.hidden_layer_dims = [8, 1]\n p.confidence.activation = ['RELU', 'NONE']\n\n dec = p.Instantiate()\n encoder_outputs, targets = _CreateSourceAndTargets(p)\n predictions = dec.ComputePredictions(dec.theta, encoder_outputs, targets)\n\n self.evaluate(tf.global_variables_initializer())\n predictions_val = self.evaluate(predictions)\n self.assertAllEqual(predictions_val['logits'].shape, [4, 5, 32])\n self.assertAllEqual(predictions_val['softmax_input'].shape, [5, 4, 12])\n if p.confidence is not None:\n self.assertAllEqual(predictions_val['confidence_logits'].shape, [4, 5])\n\n def testComputePredictionsDynamic(self):\n self._testComputePredictionsHelper(use_while_loop_based_unrolling=True)\n\n def testComputePredictionsFunctional(self):\n self._testComputePredictionsHelper(use_while_loop_based_unrolling=False)\n\n def testComputePredictionsDynamicWithConfidence(self):\n self._testComputePredictionsHelper(\n use_while_loop_based_unrolling=True, confidence_module=True)\n\n def testComputePredictionsFunctionalWithConfidence(self):\n self._testComputePredictionsHelper(\n use_while_loop_based_unrolling=False, confidence_module=True)\n\n\nif __name__ == '__main__':\n tf.test.main()\n" ]
[ [ "numpy.finfo" ], [ "numpy.random.seed", "numpy.array_repr", "numpy.random.normal", "numpy.random.uniform", "numpy.sum", "numpy.random.randint" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
RitwickGhosh/DeepSORT-YOLOv4-TensorRT-OpenVINO
[ "104e0433c56cebb7db503e23aaec9e7f1d7fdd3a" ]
[ "yolo_v4.py" ]
[ "import numpy as np\nimport tensorflow as tf\n\nslim = tf.contrib.slim\n\n_BATCH_NORM_DECAY = 0.9\n_BATCH_NORM_EPSILON = 1e-05\n_LEAKY_RELU = 0.1\n\n_ANCHORS = [(12, 16), (19, 36), (40, 28),\n (36, 75), (76, 55), (72, 146),\n (142, 110), (192, 243), (459, 401)]\[email protected]_arg_scope\ndef _fixed_padding(inputs, kernel_size, *args, mode='CONSTANT', **kwargs):\n \"\"\"\n Pads the input along the spatial dimensions independently of input size.\n Args:\n inputs: A tensor of size [batch, channels, height_in, width_in] or\n [batch, height_in, width_in, channels] depending on data_format.\n kernel_size: The kernel to be used in the conv2d or max_pool2d operation.\n Should be a positive integer.\n data_format: The input format ('NHWC' or 'NCHW').\n mode: The mode for tf.pad.\n Returns:\n A tensor with the same format as the input with the data either intact\n (if kernel_size == 1) or padded (if kernel_size > 1).\n \"\"\"\n pad_total = kernel_size - 1\n pad_beg = pad_total // 2\n pad_end = pad_total - pad_beg\n\n if kwargs['data_format'] == 'NCHW':\n padded_inputs = tf.pad(inputs, [[0, 0], [0, 0],\n [pad_beg, pad_end],\n [pad_beg, pad_end]],\n mode=mode)\n else:\n padded_inputs = tf.pad(inputs, [[0, 0], [pad_beg, pad_end],\n [pad_beg, pad_end], [0, 0]], mode=mode)\n return padded_inputs\n\n\n\ndef _conv2d_fixed_padding(inputs, filters, kernel_size, strides=1):\n if strides > 1:\n inputs = _fixed_padding(inputs, kernel_size)\n inputs = slim.conv2d(inputs, filters, kernel_size, stride=strides,\n padding=('SAME' if strides == 1 else 'VALID'))\n return inputs\n\n\ndef _yolo_res_Block(inputs,in_channels,res_num,data_format,double_ch=False):\n out_channels = in_channels\n if double_ch:\n out_channels = in_channels * 2\n net = _conv2d_fixed_padding(inputs,in_channels*2,kernel_size=3,strides=2)\n route = _conv2d_fixed_padding(net,out_channels,kernel_size=1)\n net = _conv2d_fixed_padding(net,out_channels,kernel_size=1)\n\n for _ in range(res_num):\n tmp=net\n net = _conv2d_fixed_padding(net,in_channels,kernel_size=1)\n net = _conv2d_fixed_padding(net,out_channels,kernel_size=3)\n #shortcut\n net = tmp+net\n\n net=_conv2d_fixed_padding(net,out_channels,kernel_size=1)\n\n #concat\n net=tf.concat([net,route],axis=1 if data_format == 'NCHW' else 3)\n net=_conv2d_fixed_padding(net,in_channels*2,kernel_size=1)\n return net\n\ndef _yolo_conv_block(net,in_channels,a,b):\n for _ in range(a):\n out_channels=in_channels/2\n net = _conv2d_fixed_padding(net,out_channels,kernel_size=1)\n net = _conv2d_fixed_padding(net,in_channels,kernel_size=3)\n\n out_channels=in_channels\n for _ in range(b):\n out_channels=out_channels/2\n net = _conv2d_fixed_padding(net,out_channels,kernel_size=1)\n\n return net\n\n\ndef _spp_block(inputs, data_format='NCHW'):\n return tf.concat([slim.max_pool2d(inputs, 13, 1, 'SAME'),\n slim.max_pool2d(inputs, 9, 1, 'SAME'),\n slim.max_pool2d(inputs, 5, 1, 'SAME'),\n inputs],\n axis=1 if data_format == 'NCHW' else 3)\n\n\ndef _upsample(inputs, out_shape, data_format='NCHW'):\n # tf.image.resize_nearest_neighbor accepts input in format NHWC\n if data_format == 'NCHW':\n inputs = tf.transpose(inputs, [0, 2, 3, 1])\n\n if data_format == 'NCHW':\n new_height = out_shape[2]\n new_width = out_shape[3]\n else:\n new_height = out_shape[1]\n new_width = out_shape[2]\n\n inputs = tf.image.resize_nearest_neighbor(inputs, (new_height, new_width))\n\n # back to NCHW if needed\n if data_format == 'NCHW':\n inputs = tf.transpose(inputs, [0, 3, 1, 2])\n\n inputs = tf.identity(inputs, name='upsampled')\n return inputs\n\n\ndef csp_darknet53(inputs,data_format,batch_norm_params):\n \"\"\"\n Builds CSPDarknet-53 model.activation_fn=lambda x: tf.nn.leaky_relu(x, alpha=_LEAKY_RELU)\n \"\"\"\n with slim.arg_scope([slim.conv2d], normalizer_fn=slim.batch_norm,\n normalizer_params=batch_norm_params,\n biases_initializer=None,\n activation_fn=lambda x:x* tf.math.tanh(tf.math.softplus(x))):\n net = _conv2d_fixed_padding(inputs,32,kernel_size=3)\n #downsample\n #res1\n net=_yolo_res_Block(net,32,1,data_format,double_ch=True)\n #res2\n net = _yolo_res_Block(net,64,2,data_format)\n #res8\n net = _yolo_res_Block(net,128,8,data_format)\n\n #features of 54 layer\n up_route_54=net\n #res8\n net = _yolo_res_Block(net,256,8,data_format)\n #featyres of 85 layer\n up_route_85=net\n #res4\n net=_yolo_res_Block(net,512,4,data_format)\n\n with slim.arg_scope([slim.conv2d], normalizer_fn=slim.batch_norm,\n normalizer_params=batch_norm_params,\n biases_initializer=None,\n activation_fn=lambda x: tf.nn.leaky_relu(x, alpha=_LEAKY_RELU)):\n ########\n net = _yolo_conv_block(net,1024,1,1)\n\n net=_spp_block(net,data_format=data_format)\n\n net=_conv2d_fixed_padding(net,512,kernel_size=1)\n net = _conv2d_fixed_padding(net, 1024, kernel_size=3)\n net = _conv2d_fixed_padding(net, 512, kernel_size=1)\n\n #features of 116 layer\n route_3=net\n\n net = _conv2d_fixed_padding(net,256,kernel_size=1)\n upsample_size = up_route_85.get_shape().as_list()\n net = _upsample(net, upsample_size, data_format)\n route= _conv2d_fixed_padding(up_route_85,256,kernel_size=1)\n\n net = tf.concat([route,net], axis=1 if data_format == 'NCHW' else 3)\n net = _yolo_conv_block(net,512,2,1)\n #features of 126 layer\n route_2=net\n\n net = _conv2d_fixed_padding(net,128,kernel_size=1)\n upsample_size = up_route_54.get_shape().as_list()\n net = _upsample(net, upsample_size, data_format)\n route= _conv2d_fixed_padding(up_route_54,128,kernel_size=1)\n net = tf.concat([route,net], axis=1 if data_format == 'NCHW' else 3)\n net = _yolo_conv_block(net,256,2,1)\n #features of 136 layer\n route_1 = net\n\n return route_1, route_2, route_3\n\ndef _get_size(shape, data_format):\n if len(shape) == 4:\n shape = shape[1:]\n return shape[1:3] if data_format == 'NCHW' else shape[0:2]\n\n\ndef _detection_layer(inputs, num_classes, anchors, img_size, data_format):\n num_anchors = len(anchors)\n predictions = slim.conv2d(inputs, num_anchors * (5 + num_classes), 1,\n stride=1, normalizer_fn=None,\n activation_fn=None,\n biases_initializer=tf.zeros_initializer())\n\n shape = predictions.get_shape().as_list()\n grid_size = _get_size(shape, data_format)\n dim = grid_size[0] * grid_size[1]\n bbox_attrs = 5 + num_classes\n\n if data_format == 'NCHW':\n predictions = tf.reshape(\n predictions, [-1, num_anchors * bbox_attrs, dim])\n predictions = tf.transpose(predictions, [0, 2, 1])\n\n predictions = tf.reshape(predictions, [-1, num_anchors * dim, bbox_attrs])\n\n stride = (img_size[0] // grid_size[0], img_size[1] // grid_size[1])\n\n anchors = [(a[0] / stride[0], a[1] / stride[1]) for a in anchors]\n\n box_centers, box_sizes, confidence, classes = tf.split(\n predictions, [2, 2, 1, num_classes], axis=-1)\n\n box_centers = tf.nn.sigmoid(box_centers)\n confidence = tf.nn.sigmoid(confidence)\n\n grid_x = tf.range(grid_size[0], dtype=tf.float32)\n grid_y = tf.range(grid_size[1], dtype=tf.float32)\n a, b = tf.meshgrid(grid_x, grid_y)\n\n x_offset = tf.reshape(a, (-1, 1))\n y_offset = tf.reshape(b, (-1, 1))\n\n x_y_offset = tf.concat([x_offset, y_offset], axis=-1)\n x_y_offset = tf.reshape(tf.tile(x_y_offset, [1, num_anchors]), [1, -1, 2])\n\n box_centers = box_centers + x_y_offset\n box_centers = box_centers * stride\n\n anchors = tf.tile(anchors, [dim, 1])\n box_sizes = tf.exp(box_sizes) * anchors\n box_sizes = box_sizes * stride\n\n detections = tf.concat([box_centers, box_sizes, confidence], axis=-1)\n\n classes = tf.nn.sigmoid(classes)\n predictions = tf.concat([detections, classes], axis=-1)\n return predictions\n\n\n\n\ndef yolo_v4(inputs, num_classes, is_training=False, data_format='NCHW', reuse=False):\n \"\"\"\n Creates YOLO v4 model.\n :param inputs: a 4-D tensor of size [batch_size, height, width, channels].\n Dimension batch_size may be undefined. The channel order is RGB.\n :param num_classes: number of predicted classes.\n :param is_training: whether is training or not.\n :param data_format: data format NCHW or NHWC.\n :param reuse: whether or not the network and its variables should be reused.\n :param with_spp: whether or not is using spp layer.\n :return:\n \"\"\"\n\n # it will be needed later on\n img_size = inputs.get_shape().as_list()[1:3]\n\n # transpose the inputs to NCHW\n if data_format == 'NCHW':\n inputs = tf.transpose(inputs, [0, 3, 1, 2])\n\n # normalize values to range [0..1]\n inputs = inputs / 255\n\n # set batch norm params\n batch_norm_params = {\n 'decay': _BATCH_NORM_DECAY,\n 'epsilon': _BATCH_NORM_EPSILON,\n 'scale': True,\n 'is_training': is_training,\n 'fused': None, # Use fused batch norm if possible.\n }\n\n # Set activation_fn and parameters for conv2d, batch_norm.\n with slim.arg_scope([slim.conv2d, slim.batch_norm, _fixed_padding], data_format=data_format, reuse=reuse):\n\n #weights_regularizer=slim.l2_regularizer(weight_decay)\n #weights_initializer=tf.truncated_normal_initializer(0.0, 0.01)\n with tf.variable_scope('cspdarknet-53'):\n route_1, route_2, route_3 = csp_darknet53(inputs,data_format,batch_norm_params)\n\n with slim.arg_scope([slim.conv2d], normalizer_fn=slim.batch_norm,\n normalizer_params=batch_norm_params,\n biases_initializer=None,\n activation_fn=lambda x: tf.nn.leaky_relu(x, alpha=_LEAKY_RELU)):\n with tf.variable_scope('yolo-v4'):\n #features of y1\n net = _conv2d_fixed_padding(route_1,256,kernel_size=3)\n detect_1 = _detection_layer(\n net, num_classes, _ANCHORS[0:3], img_size, data_format)\n detect_1 = tf.identity(detect_1, name='detect_1')\n\n #features of y2\n net = _conv2d_fixed_padding(route_1, 256, kernel_size=3,strides=2)\n net=tf.concat([net,route_2], axis=1 if data_format == 'NCHW' else 3)\n net=_yolo_conv_block(net,512,2,1)\n route_147 =net\n net = _conv2d_fixed_padding(net,512,kernel_size=3)\n detect_2 = _detection_layer(\n net, num_classes, _ANCHORS[3:6], img_size, data_format)\n detect_2 = tf.identity(detect_2, name='detect_2')\n\n # features of y3\n net=_conv2d_fixed_padding(route_147,512,strides=2,kernel_size=3)\n net = tf.concat([net, route_3], axis=1 if data_format == 'NCHW' else 3)\n net = _yolo_conv_block(net,1024,3,0)\n detect_3 = _detection_layer(\n net, num_classes, _ANCHORS[6:9], img_size, data_format)\n detect_3 = tf.identity(detect_3, name='detect_3')\n\n detections = tf.concat([detect_1, detect_2, detect_3], axis=1)\n detections = tf.identity(detections, name='detections')\n return detections\n" ]
[ [ "tensorflow.concat", "tensorflow.nn.sigmoid", "tensorflow.image.resize_nearest_neighbor", "tensorflow.range", "tensorflow.transpose", "tensorflow.zeros_initializer", "tensorflow.reshape", "tensorflow.identity", "tensorflow.exp", "tensorflow.math.softplus", "tensorflow.pad", "tensorflow.meshgrid", "tensorflow.variable_scope", "tensorflow.split", "tensorflow.tile", "tensorflow.nn.leaky_relu" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
zeddee/info_retrieve
[ "b54f80e1124183221e509c88413a2afc6ff4fae6" ]
[ "src/utils.py" ]
[ "import pandas as pd\nimport numpy as np\nfrom sklearn.metrics.pairwise import cosine_similarity\n\ndef question_cleaner(df_query):\n kb=([int(xx) for xx in (df_query[3].iloc[0]).split(' ')])\n gt = [int(xx) for xx in (df_query[2].iloc[0]).split(' ')]\n ct=0\n negg=0\n withans=[]\n for ii in range(len(df_query)):\n kb=[int(xx) for xx in (df_query[3].iloc[ii]).split(' ')]\n gt = [int(xx) for xx in (df_query[2].iloc[ii]).split(' ')]\n if bool(set(gt) & set(kb)):\n withans.append(ii)\n else:\n negg+=1\n print('total:{}, removed:{}, remainder:{}'.format(len(df_query), negg, len(withans)))\n return df_query.iloc[withans]\n\ndef display_qn_and_ans(df_query, df_doc, index=0):\n kb=[int(xx) for xx in (df_query[3].iloc[index]).split(' ')]\n gt = [int(xx) for xx in (df_query[2].iloc[index]).split(' ')]\n print('Question is: {}'.format(df_query['text'].iloc[index]))\n print('Answer index: ', gt)\n print('Answers: ', df_doc.loc[gt, 'text'].values)\n\ndef read_txt(path):\n \"\"\"Used with split_txt() to read and split kb into clauses\"\"\"\n with open(path, 'r', encoding=\"utf-8\") as f:\n text = f.readlines()\n return text\n\ndef clean_txt(text):\n \"\"\"Strips formatting\"\"\"\n text=[x.replace('\\n', '. ') for x in text] # not sure how newlines are tokenized\n text=[x.replace('.. ', '. ').rstrip() for x in text] # remove artifact\n return text\n\ndef split_txt(text, qa=False):\n \"\"\"Splits a text document into clauses based on whitespaces. \n Additionally, reads a faq document by assuming that the first line is a question \n between each whitespaced group\n \"\"\"\n condition_terms = []\n stringg=''\n for tex in text:\n if (tex=='\\n'):\n if (stringg != ''):\n condition_terms.append(stringg)\n stringg=''\n else: pass\n else: stringg+=tex\n if qa:\n condition_context = [x.split('\\n')[0] for x in condition_terms]\n condition_terms = ['\\n'.join(x.split('\\n')[1:]) for x in condition_terms]\n return condition_terms, condition_context\n else: return condition_terms\n\ndef read_kb_csv(csv_path, meta_col='meta', answer_col='answer', query_col='question', answer_str_col='answer', cutoff=None):\n \"\"\"Only read organization meta, not personal. index=196\"\"\"\n df = pd.read_csv(csv_path)\n if cutoff:\n df = df.iloc[:cutoff]\n df['kb'] = df[meta_col]+df[answer_col]\n df.rename(columns={query_col:'queries', answer_str_col:'answer_str'}, inplace=True)\n # df[answer_col] = [[x] for x in df.index]\n # df['kb']=df['kb'].str.replace('\\n', '. ').replace('.. ', '. ')\n return list(df['kb']), list(df['queries'])\n\ndef aiap_qna(question, answer_array, aiap_qa, model, k=1):\n similarity_score=cosine_similarity(answer_array, model.predict([question], type='query'))\n sortargs=np.flip(similarity_score.argsort(axis=0))\n sortargs=[x[0] for x in sortargs]\n sorted_ans=[]\n for indx in range(k):\n sorted_ans.append(aiap_qa[sortargs[indx]])\n return sorted_ans, sortargs, similarity_score\n\ndef aiap_qna_quickscore(aiap_context, answer_array, aiap_qa, model, k=1):\n \"\"\"Quickly scores the model against the aiap qna dataset. \n This function works because the order of questions and answers are synched in the list.\n \"\"\"\n score=0\n for ii, qn in enumerate(aiap_context):\n _, sortargs, simscore = aiap_qna(qn, answer_array, aiap_qa, model, k)\n # print(qn, aiap_qa[sortargs[0]], simscore)\n if bool(set([ii]) & set(sortargs[:k])):\n score+=1\n return score/len(aiap_context)\n\ndef ranker(model, question_vectors, df_query, df_doc):\n \"\"\"for model evaluation on InsuranceQA datset\"\"\"\n predictions=[]\n gts=[]\n for ii, question_vector in enumerate(question_vectors):\n kb=[int(xx) for xx in (df_query[3].iloc[ii]).split(' ')]\n gt = [int(xx) for xx in (df_query[2].iloc[ii]).split(' ')]\n doc_vectors = model.predict(df_doc.loc[kb]['text'].tolist())\n cossim = cosine_similarity(doc_vectors, question_vector.reshape(1, -1))\n sortargs=np.flip(cossim.argsort(axis=0))\n returnedans = [kb[jj[0]] for jj in sortargs]\n predictions.append(returnedans)\n gts.append(gt)\n return predictions, gts\n \ndef scorer(predictions, gts, k=3):\n \"\"\"For model evaluation on InsuranceQA datset. Returns score@k.\"\"\"\n score=0\n total=0\n for gt, prediction in zip(gts, predictions):\n if bool(set(gt) & set(prediction[:k])):\n score+=1\n total+=1\n return score/total\n\ndef make_pred(row, gr, query_col_name='queries', top_k=3):\n \"\"\"Make line by line predictions, returns top 3 index of kb.\"\"\"\n txt, ind = gr.make_query(row['queries'], top_k=top_k, index=True)\n return ind\n\ndef make_iscorr(row, prediction_col_name='predictions', answer_col_name='answer'):\n \"\"\"Calculates accuracy @3.\"\"\"\n if bool(set(row[answer_col_name]) & set(row[prediction_col_name])):\n return 1\n else: return 0\n \ndef make_closewrong(row, prediction_col_name='predictions', answer_col_name='answer'):\n \"\"\"Find index of wrong answer with highest similarity score aka hardmining.\"\"\"\n try: return [x for x in row[prediction_col_name] if x not in row[answer_col_name]][0]\n except: return 1 #Just return the most common class as the negative eg.\n \ndef make_finetune(row, gr, kb_name='default_kb', query_col_name='queries', answer_col_name='answer', closewrong_col_name='closewrong'):\n \"\"\"Stochastic finetuning sample by sample.\"\"\"\n loss = gr.finetune([row[query_col_name]], [gr.text[kb_name][row[answer_col_name][0]]], [gr.text[kb_name][row[answer_col_name][0]]], [gr.text[kb_name][row[closewrong_col_name]]], [gr.text[kb_name][row[closewrong_col_name]]])\n print(loss)\n \ndef make_contrastive_finetune(row, gr, kb_name='default_kb', query_col_name='queries', answer_col_name='answer', closewrong_col_name='closewrong'):\n \"\"\"Stochastic finetuning for contrastive loss.\"\"\"\n loss = gr.finetune(question=[row[query_col_name]], answer=[gr.text[kb_name][row[answer_col_name][0]]], context=[gr.text[kb_name][row[answer_col_name][0]]], label=[1])\n print('1: ', loss)\n loss = gr.finetune(question=[row[query_col_name]], answer=[gr.text[kb_name][row[closewrong_col_name]]], context=[gr.text[kb_name][row[closewrong_col_name]]], label=[0])\n print('0: ', loss)\n" ]
[ [ "pandas.read_csv" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
mcrimi/opfunu
[ "1ff3ecf4b7818a0edd5d92ce5475839fa9477da1", "1ff3ecf4b7818a0edd5d92ce5475839fa9477da1" ]
[ "opfunu/cec/cec2005/F19.py", "examples/test_cec2013.py" ]
[ "#!/usr/bin/env python\n# ------------------------------------------------------------------------------------------------------%\n# Created by \"Thieu Nguyen\" at 19:26, 20/04/2020 %\n# %\n# Email: [email protected] %\n# Homepage: https://www.researchgate.net/profile/Thieu_Nguyen6 %\n# Github: https://github.com/thieunguyen5991 %\n#-------------------------------------------------------------------------------------------------------%\n\nfrom opfunu.cec.cec2005.root import Root\nfrom numpy import sum, dot, sqrt, array, cos, pi, exp, e, ones, max\n\n\nclass Model(Root):\n def __init__(self, f_name=\"Rotated Hybrid Composition Function 2 with narrow basin global optimum\",\n f_shift_data_file=\"data_hybrid_func2\",\n f_ext='.txt', f_bias=10, f_matrix=None):\n Root.__init__(self, f_name, f_shift_data_file, f_ext, f_bias)\n self.f_matrix = f_matrix\n\n def __f12__(self, solution=None):\n return -20 * exp(-0.2 * sqrt(sum(solution ** 2) / len(solution))) - exp(sum(cos(2 * pi * solution)) / len(solution)) + 20 + e\n\n def __f34__(self, solution=None):\n return sum(solution ** 2 - 10 * cos(2 * pi * solution) + 10)\n\n def __f56__(self, solution=None):\n return sum(solution ** 2)\n\n def __f78__(self, solution=None, a=0.5, b=3, k_max=20):\n result = 0.0\n for i in range(len(solution)):\n result += sum([a ** k * cos(2 * pi * b ** k * (solution + 0.5)) for k in range(0, k_max)])\n return result - len(solution) * sum([a ** k * cos(2 * pi * b ** k * 0.5) for k in range(0, k_max)])\n\n def __f910__(self, solution=None):\n result = sum(solution ** 2) / 4000\n temp = 1.0\n for i in range(len(solution)):\n temp *= cos(solution[i] / sqrt(i + 1))\n return result - temp + 1\n\n def __fi__(self, solution=None, idx=None):\n if idx == 0 or idx == 1:\n return self.__f12__(solution)\n elif idx == 2 or idx == 3:\n return self.__f34__(solution)\n elif idx == 4 or idx == 5:\n return self.__f56__(solution)\n elif idx == 6 or idx == 7:\n return self.__f78__(solution)\n else:\n return self.__f910__(solution)\n\n def _main__(self, solution=None):\n problem_size = len(solution)\n if problem_size > 100:\n print(\"CEC 2005 not support for problem size > 100\")\n return 1\n if problem_size == 10 or problem_size == 30 or problem_size == 50:\n self.f_matrix = \"hybrid_func2_M_D\" + str(problem_size)\n else:\n print(\"CEC 2005 F19 function only support problem size 10, 30, 50\")\n return 1\n num_funcs = 10\n C = 2000\n xichma = array([0.1, 2, 1.5, 1.5, 1, 1, 1.5, 1.5, 2, 2])\n lamda = array([0.1 * 5 / 32, 5.0 / 32, 2 * 1, 1, 2 * 5.0 / 100, 5.0 / 100, 2.0 * 10, 10, 2 * 5.0 / 60, 5.0 / 60])\n bias = array([0, 100, 200, 300, 400, 500, 600, 700, 800, 900])\n y = 5 * ones(problem_size)\n shift_data = self.load_matrix_data(self.f_shift_data_file)\n shift_data = shift_data[:, :problem_size]\n matrix = self.load_matrix_data(self.f_matrix)\n\n weights = ones(num_funcs)\n fits = ones(num_funcs)\n for i in range(0, num_funcs):\n w_i = exp(-sum((solution - shift_data[i]) ** 2) / (2 * problem_size * xichma[i] ** 2))\n z = dot((solution - shift_data[i]) / lamda[i], matrix[i * problem_size:(i + 1) * problem_size, :])\n fit_i = self.__fi__(z, i)\n f_maxi = self.__fi__(dot((y / lamda[i]), matrix[i * problem_size:(i + 1) * problem_size, :]), i)\n fit_i = C * fit_i / f_maxi\n\n weights[i] = w_i\n fits[i] = fit_i\n\n sw = sum(weights)\n maxw = max(weights)\n\n for i in range(0, num_funcs):\n if weights[i] != maxw:\n weights[i] = weights[i] * (1 - maxw ** 10)\n weights[i] = weights[i] / sw\n\n fx = sum(dot(weights, (fits + bias)))\n return fx + self.f_bias\n", "#!/usr/bin/env python\n# ------------------------------------------------------------------------------------------------------%\n# Created by \"Thieu Nguyen\" at 22:21, 25/04/2020 %\n# %\n# Email: [email protected] %\n# Homepage: https://www.researchgate.net/profile/Thieu_Nguyen6 %\n# Github: https://github.com/thieunguyen5991 %\n#-------------------------------------------------------------------------------------------------------%\n\nimport numpy as np\nfrom opfunu.cec.cec2013.unconstraint import Model\n\nproblem_size = 100\nsolution = np.random.uniform(-1, 1, problem_size)\nfunc = Model(problem_size)\n\nprint(func.F28(solution))\n" ]
[ [ "numpy.dot", "numpy.sqrt", "numpy.cos", "numpy.ones", "numpy.max", "numpy.array", "numpy.sum" ], [ "numpy.random.uniform" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
bagustris/deep_mlp_ser
[ "079bc6414287dbfb23a52e1e1869b91584eb037e" ]
[ "code/lstm_mixed_loso.py" ]
[ "# CSL Paper: Dimensional speech emotion recognition from acoustic and text\n# Changelog:\n# 2019-09-01: initial version\n# 2019-10-06: optimizer MTL parameters with linear search (in progress)\n# 2012-12-25: modified fot ser_iemocap_loso_hfs.py\n# feature is either std+mean or std+mean+silence (uncomment line 44)\n\nimport numpy as np\nimport pickle\nimport pandas as pd\n\nimport keras.backend as K\nfrom keras.models import Model\nfrom keras.layers import Input, Dense, CuDNNLSTM, Flatten, \\\n Embedding, Dropout, BatchNormalization, \\\n RNN, concatenate, Activation\n\nfrom keras.callbacks import EarlyStopping\nfrom sklearn.preprocessing import StandardScaler, MinMaxScaler\n\nfrom keras.preprocessing.text import Tokenizer\nfrom keras.preprocessing import sequence\n\nimport random as rn\nimport tensorflow as tf\n\nrn.seed(123)\nnp.random.seed(99)\ntf.set_random_seed(1234)\n\n# load feature and labels\nfeat_iemocap = np.load('/home/s1820002/spro2020/data/feat_ws_3.npy')\nvad_iemocap = np.load('/home/s1820002/IEMOCAP-Emotion-Detection/y_egemaps.npy')\n\nfeat_improv_train = np.load('/home/s1820002/deepMLP/data/feat_hfs_gemaps_msp_train.npy')\nfeat_improv_test = np.load('/home/s1820002/deepMLP/data/feat_hfs_gemaps_msp_test.npy')\n\nfeat_improv = np.vstack([feat_improv_train, feat_improv_test])\n\nlist_path = '/home/s1820002/msp-improv/helper/improv_data.csv'\nlist_file = pd.read_csv(list_path, index_col=None)\nlist_sorted = list_file.sort_values(by=['wavfile'])\nvad_list = [list_sorted['v'], list_sorted['a'], list_sorted['d']]\nvad_improv = np.array(vad_list).T\n\n# for LSTM input shape (batch, steps, features/channel)\nfeat = np.vstack([feat_iemocap, feat_improv])\nvad = np.vstack([vad_iemocap, vad_improv])\n\nfeat = feat.reshape(feat.shape[0], 1, feat.shape[1])\n\n# remove outlier, < 1, > 5\nvad = np.where(vad==5.5, 5.0, vad)\nvad = np.where(vad==0.5, 1.0, vad)\n\n# standardization\nscaled_feature = True\n\n# set Dropout\ndo = 0.3\n\nif scaled_feature == True:\n scaler = StandardScaler()\n scaler = scaler.fit(feat.reshape(feat.shape[0]*feat.shape[1], feat.shape[2]))\n scaled_feat = scaler.transform(feat.reshape(feat.shape[0]*feat.shape[1], feat.shape[2]))\n scaled_feat = scaled_feat.reshape(feat.shape[0], feat.shape[1], feat.shape[2])\n feat = scaled_feat\nelse:\n feat = feat\n\nscaled_vad = True\n\n# standardization\nif scaled_vad:\n scaler = MinMaxScaler(feature_range=(-1, 1))\n scaler = scaler.fit(vad) #.reshape(vad.shape[0]*vad.shape[1], vad.shape[2]))\n scaled_vad = scaler.transform(vad) #.reshape(vad.shape[0]*vad.shape[1], vad.shape[2]))\n vad = scaled_vad \nelse:\n vad = vad\n\n# Concordance correlation coefficient (CCC)-based loss function - using non-inductive statistics\ndef ccc(gold, pred):\n gold = K.squeeze(gold, axis=-1)\n pred = K.squeeze(pred, axis=-1)\n gold_mean = K.mean(gold, axis=-1, keepdims=True)\n pred_mean = K.mean(pred, axis=-1, keepdims=True)\n covariance = (gold-gold_mean)*(pred-pred_mean)\n gold_var = K.mean(K.square(gold-gold_mean), axis=-1, keepdims=True)\n pred_var = K.mean(K.square(pred-pred_mean), axis=-1, keepdims=True)\n ccc = K.constant(2.) * covariance / (gold_var + pred_var + K.square(gold_mean - pred_mean) + K.common.epsilon())\n return ccc\n\n\ndef ccc_loss(gold, pred): \n # input (num_batches, seq_len, 1)\n ccc_loss = K.constant(1.) - ccc(gold, pred)\n return ccc_loss\n\n\n# API model, if use RNN, first two rnn layer must return_sequences=True\ndef api_model(alpha, beta, gamma):\n # speech network\n input_speech = Input(shape=(feat.shape[1], feat.shape[2]), name='speech_input')\n net_speech = BatchNormalization()(input_speech)\n net_speech = CuDNNLSTM(256, return_sequences=True)(net_speech)\n net_speech = CuDNNLSTM(128, return_sequences=True)(net_speech)\n net_speech = CuDNNLSTM(64, return_sequences=True)(net_speech)\n net_speech = CuDNNLSTM(32, return_sequences=True)(net_speech)\n net_speech = CuDNNLSTM(16, return_sequences=True)(net_speech)\n model_speech = Flatten()(net_speech)\n #model_speech = Dropout(0.1)(net_speech)\n\n target_names = ('v', 'a', 'd')\n model_combined = [Dense(1, name=name)(model_speech) for name in target_names]\n\n model = Model(input_speech, model_combined) \n #model.compile(loss=ccc_loss, optimizer='rmsprop', metrics=[ccc])\n model.compile(loss=ccc_loss, \n loss_weights={'v': alpha, 'a': beta, 'd': gamma},\n optimizer='adam', metrics=[ccc])\n return model\n\n#def main(alpha, beta, gamma):\nmodel = api_model(0.1, 0.5, 0.4)\nmodel.summary()\n\nidx_train = np.hstack([np.arange(0, 7869), np.arange(10039, len(feat_improv_train))])\nidx_test = np.hstack([np.arange(7869,10039), np.arange(10039 + \n len(feat_improv_train), 18387)])\n\n# 7869 first data of session 5 (for LOSO), 8000 for SD\nearlystop = EarlyStopping(monitor='val_loss', mode='min', patience=10,\n restore_best_weights=True)\nhist = model.fit(feat[idx_train], vad[idx_train].T.tolist(), batch_size=200, #best:8\n validation_split=0.2, epochs=180, verbose=1, shuffle=True, \n callbacks=[earlystop])\nmetrik = model.evaluate(feat[idx_test], vad[idx_test].T.tolist())\nprint(metrik)\n\n\n# save prediction, comment to avoid overwriting\n#predict = model.predict(feat[6296:], batch_size=200)\n#np.save('../data/predict_lstm_iemocap_sd', \n# np.array(predict).reshape(3, 3743).T)\n\n\n" ]
[ [ "pandas.read_csv", "numpy.random.seed", "numpy.arange", "sklearn.preprocessing.MinMaxScaler", "tensorflow.set_random_seed", "numpy.load", "sklearn.preprocessing.StandardScaler", "numpy.array", "numpy.where", "numpy.vstack" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7", "0.12", "1.0", "1.2" ] } ]
bond005/impartial_text_cls
[ "d7503c55388625b988098b307350b1f899e48e3c" ]
[ "tests/test_utils.py" ]
[ "# Copyright 2019 Ivan Bondarenko\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport re\nimport sys\nimport unittest\n\nimport numpy as np\n\ntry:\n from impartial_text_cls.utils import read_dstc2_data, read_snips2017_data, str_to_layers, read_csv\n from impartial_text_cls.utils import parse_hidden_layers_description\nexcept:\n sys.path.append(os.path.join(os.path.dirname(__file__), '..'))\n from impartial_text_cls.utils import read_dstc2_data, read_snips2017_data, str_to_layers, read_csv\n from impartial_text_cls.utils import parse_hidden_layers_description\n\n\nclass TestUtils(unittest.TestCase):\n def test_read_dstc2_data_positive01(self):\n file_name = os.path.join(os.path.dirname(__file__), 'test_dataset.tar.gz')\n loaded_texts, loaded_labels, loaded_classes_list = read_dstc2_data(file_name)\n true_texts = np.array(\n [\n 'moderately priced north part of town',\n 'yes',\n 'what is the address and phone number',\n 'thank you good bye',\n 'expensive',\n 'south',\n 'dont care',\n 'what is the address',\n 'thank you good bye',\n 'hello welcome',\n 'south',\n 'would you like something',\n 'steak house',\n 'indian',\n 'and whats the phone number',\n 'thank you good bye',\n 'i need a cheap restaurant serving italian food',\n 'i dont care',\n 'could i get the address',\n 'thank you bye'\n ],\n dtype=object\n )\n true_labels = np.array(\n [\n {5, 3},\n 0,\n {7, 8},\n {9, 1},\n 5,\n 3,\n 6,\n 7,\n {9, 1},\n 2,\n 3,\n -1,\n 4,\n 4,\n 8,\n {9, 1},\n {4, 5},\n 6,\n 7,\n {9, 1}\n ],\n dtype=object\n )\n true_classes_list = ['affirm', 'bye', 'hello', 'inform_area', 'inform_food', 'inform_pricerange', 'inform_this',\n 'request_addr', 'request_phone', 'thankyou']\n self.assertIsInstance(loaded_classes_list, list)\n self.assertEqual(true_classes_list, loaded_classes_list)\n self.assertIsInstance(loaded_texts, np.ndarray)\n self.assertIsInstance(loaded_labels, np.ndarray)\n self.assertEqual(true_texts.shape, loaded_texts.shape)\n self.assertEqual(true_labels.shape, loaded_labels.shape)\n self.assertEqual(true_texts.tolist(), loaded_texts.tolist())\n self.assertEqual(true_labels.tolist(), loaded_labels.tolist())\n\n def test_read_dstc2_data_positive02(self):\n file_name = os.path.join(os.path.dirname(__file__), 'test_dataset.tar.gz')\n true_classes_list = ['affirm', 'hello', 'inform_area', 'inform_food', 'inform_pricerange', 'request_addr',\n 'request_phone', 'thankyou']\n loaded_texts, loaded_labels, loaded_classes_list = read_dstc2_data(file_name, true_classes_list)\n true_texts = np.array(\n [\n 'moderately priced north part of town',\n 'yes',\n 'what is the address and phone number',\n 'thank you good bye',\n 'expensive',\n 'south',\n 'dont care',\n 'what is the address',\n 'thank you good bye',\n 'hello welcome',\n 'south',\n 'would you like something',\n 'steak house',\n 'indian',\n 'and whats the phone number',\n 'thank you good bye',\n 'i need a cheap restaurant serving italian food',\n 'i dont care',\n 'could i get the address',\n 'thank you bye'\n ],\n dtype=object\n )\n true_labels = np.array(\n [\n {4, 2},\n 0,\n {5, 6},\n 7,\n 4,\n 2,\n -1,\n 5,\n 7,\n 1,\n 2,\n -1,\n 3,\n 3,\n 6,\n 7,\n {3, 4},\n -1,\n 5,\n 7\n ],\n dtype=object\n )\n self.assertIsInstance(loaded_classes_list, list)\n self.assertEqual(true_classes_list, loaded_classes_list)\n self.assertIsInstance(loaded_texts, np.ndarray)\n self.assertIsInstance(loaded_labels, np.ndarray)\n self.assertEqual(true_texts.shape, loaded_texts.shape)\n self.assertEqual(true_labels.shape, loaded_labels.shape)\n self.assertEqual(true_texts.tolist(), loaded_texts.tolist())\n self.assertEqual(true_labels.tolist(), loaded_labels.tolist())\n\n def test_str_to_layers_positive01(self):\n src = '100-50'\n true_res = [100, 50]\n calc_res = str_to_layers(src)\n self.assertEqual(true_res, calc_res)\n\n def test_str_to_layers_positive02(self):\n src = '100'\n true_res = [100]\n calc_res = str_to_layers(src)\n self.assertEqual(true_res, calc_res)\n\n def test_str_to_layers_negative01(self):\n src = '100-a-50'\n true_err_msg = re.escape('`100-a-50` is wrong description of layer sizes!')\n with self.assertRaisesRegex(ValueError, true_err_msg):\n _ = str_to_layers(src)\n\n def test_str_to_layers_negative02(self):\n src = ''\n true_err_msg = re.escape('`` is wrong description of layer sizes!')\n with self.assertRaisesRegex(ValueError, true_err_msg):\n _ = str_to_layers(src)\n\n def test_str_to_layers_negative03(self):\n src = '100-0-50'\n true_err_msg = re.escape('`100-0-50` is wrong description of layer sizes!')\n with self.assertRaisesRegex(ValueError, true_err_msg):\n _ = str_to_layers(src)\n\n def test_read_snips2017_data(self):\n true_train_texts = [\n 'Add another song to the Cita Romántica playlist.',\n 'add clem burke in my playlist Pre-Party R&B Jams',\n 'Add Live from Aragon Ballroom to Trapeo',\n 'book The Middle East restaurant in IN for noon',\n 'Book a table at T-Rex distant from Halsey St.',\n 'I\\'d like to eat at a taverna that serves chili con carne for a party of 10',\n 'What will the weather be this year in Horseshoe Lake State Fish and Wildlife Area?',\n 'Will it be sunny one hundred thirty five days from now in Monterey Bay National Marine Sanctuary',\n 'Is it supposed to rain nearby my current location at 0 o\\'clock?',\n 'I need to hear the song Aspro Mavro from Bill Szymczyk on Youtube',\n 'play Yo Ho from the new york pops on Youtube',\n 'Play some seventies music by Janne Puurtinen on Youtube.',\n 'rate The Lotus and the Storm zero of 6',\n 'Rate The Fall-Down Artist 5 stars.',\n 'Rate the current novel one points',\n 'find the soundtrack titled This Side of Paradise',\n 'find a book called The Mad Magician',\n 'find the picture Louder Than Bombs',\n 'What are the movie schedule at Malco Theatres',\n 'I want to get the movie schedule',\n 'Show me movie time for I Am Sorry at my movie house'\n ]\n true_train_labels = [\n 'AddToPlaylist',\n 'AddToPlaylist',\n 'AddToPlaylist',\n 'BookRestaurant',\n 'BookRestaurant',\n 'BookRestaurant',\n 'GetWeather',\n 'GetWeather',\n 'GetWeather',\n 'PlayMusic',\n 'PlayMusic',\n 'PlayMusic',\n 'RateBook',\n 'RateBook',\n 'RateBook',\n 'SearchCreativeWork',\n 'SearchCreativeWork',\n 'SearchCreativeWork',\n 'SearchScreeningEvent',\n 'SearchScreeningEvent',\n 'SearchScreeningEvent'\n ]\n true_val_texts = [\n 'add Stani, stani Ibar vodo songs in my playlist música libre',\n 'add this album to my Blues playlist',\n 'Book a reservation for seven people at a bakery in Osage City',\n 'Book spot for three at Maid-Rite Sandwich Shop in Antigua and Barbuda',\n 'How\\'s the weather in Munchique National Natural Park',\n 'Tell me the weather forecast for France',\n 'I\\'d like to hear music that\\'s popular from Trick-trick on the Slacker service',\n 'Play Making Out by Alexander Rosenbaum off Google Music.',\n 'Rate All That Remains a five',\n 'Give this album 4 points',\n 'Please help me find the Bloom: Remix Album song.',\n 'Find me the soundtrack called Enter the Chicken',\n 'Find movie times for Landmark Theatres.',\n 'What are the movie times for Amco Entertainment'\n ]\n true_val_labels = [\n 'AddToPlaylist',\n 'AddToPlaylist',\n 'BookRestaurant',\n 'BookRestaurant',\n 'GetWeather',\n 'GetWeather',\n 'PlayMusic',\n 'PlayMusic',\n 'RateBook',\n 'RateBook',\n 'SearchCreativeWork',\n 'SearchCreativeWork',\n 'SearchScreeningEvent',\n 'SearchScreeningEvent'\n ]\n true_test_texts = [\n 'I\\'d like to have this track onto my Classical Relaxations playlist.',\n 'Book a reservation for my babies and I',\n 'What will the weather be faraway from here?',\n 'can you put on Like A Hurricane by Paul Landers',\n 'rate this album four out of 6 stars',\n 'Wish to find the movie the Heart Beat',\n 'Is Babar: King of the Elephants playing'\n ]\n true_test_labels = [\n 'AddToPlaylist',\n 'BookRestaurant',\n 'GetWeather',\n 'PlayMusic',\n 'RateBook',\n 'SearchCreativeWork',\n 'SearchScreeningEvent'\n ]\n loaded_train_data, loaded_val_data, loaded_test_data = read_snips2017_data(\n os.path.join(os.path.dirname(__file__), 'test_snips2017')\n )\n self.assertIsInstance(loaded_train_data, tuple)\n self.assertIsInstance(loaded_val_data, tuple)\n self.assertIsInstance(loaded_test_data, tuple)\n self.assertEqual(len(loaded_train_data), 2)\n self.assertEqual(len(loaded_val_data), 2)\n self.assertEqual(len(loaded_test_data), 2)\n self.assertIsInstance(loaded_train_data[0], list)\n self.assertIsInstance(loaded_train_data[1], list)\n self.assertEqual(len(loaded_train_data[0]), len(loaded_train_data[1]))\n self.assertIsInstance(loaded_val_data[0], list)\n self.assertIsInstance(loaded_val_data[1], list)\n self.assertEqual(len(loaded_val_data[0]), len(loaded_val_data[1]))\n self.assertIsInstance(loaded_test_data[0], list)\n self.assertIsInstance(loaded_test_data[1], list)\n self.assertEqual(len(loaded_test_data[0]), len(loaded_test_data[1]))\n self.assertEqual(true_train_texts, loaded_train_data[0])\n self.assertEqual(true_train_labels, loaded_train_data[1])\n self.assertEqual(true_val_texts, loaded_val_data[0])\n self.assertEqual(true_val_labels, loaded_val_data[1])\n self.assertEqual(true_test_texts, loaded_test_data[0])\n self.assertEqual(true_test_labels, loaded_test_data[1])\n\n def test_read_csv_positive01(self):\n file_name = os.path.join(os.path.dirname(__file__), 'csv_for_testing1.csv')\n true_classes = ['Криминал', 'Медицина', 'Политика', 'Спорт']\n true_texts = [\n 'Названы регионы России с самой высокой смертностью от рака.',\n 'Испанские клубы открестились от Неймара.',\n 'Семилетняя беженка погибла после задержания на границе США.',\n 'Главная реформа Обамы признана неконституционной.',\n 'Бывший чемпион UFC не выдержал кровопролития и сдался.',\n 'Охранника магазина зарезали из-за трех бутылок водки.',\n 'Лукашенко пожаловался Путину на украинских «отмороженных нацменов».'\n ]\n true_labels = [1, 3, 0, 2, 3, 0, 2]\n loaded_texts, loaded_labels, loaded_classes = read_csv(file_name)\n self.assertIsInstance(loaded_classes, list)\n self.assertEqual(true_classes, loaded_classes)\n self.assertIsInstance(loaded_texts, np.ndarray)\n self.assertIsInstance(loaded_labels, np.ndarray)\n self.assertEqual(loaded_texts.shape, (len(true_texts),))\n self.assertEqual(loaded_labels.shape, (len(true_labels),))\n self.assertEqual(true_texts, loaded_texts.tolist())\n self.assertEqual(true_labels, loaded_labels.tolist())\n\n def test_read_csv_positive02(self):\n file_name = os.path.join(os.path.dirname(__file__), 'csv_for_testing2.csv')\n true_classes = ['Криминал', 'Культура', 'Наука', 'Общество', 'Политика', 'Экономика']\n true_texts = [\n 'Германия разрешила третий пол.',\n 'Лукашенко объяснил поставки оружия Азербайджану.',\n 'Шакиру уличили в неуплате налогов.',\n 'Люди в масках ворвались на собрание врачей в Киеве и отказались их выпускать.',\n 'Подсчитаны расходы на российскую сверхтяжелую ракету для освоения Луны.',\n 'Популярные актеры дебютируют в короткометражных хоррорах.',\n 'Россия ответила на высылку дипломата из Словакии.',\n 'Российские нефтяники заработали триллионы на фоне бензинового кризиса.',\n 'Google выбрал своего «Человека года».'\n ]\n true_labels = [3, {4, 5}, {0, 1}, 0, {2, 5}, 1, 4, 5, 3]\n loaded_texts, loaded_labels, loaded_classes = read_csv(file_name)\n self.assertIsInstance(loaded_classes, list)\n self.assertEqual(true_classes, loaded_classes)\n self.assertIsInstance(loaded_texts, np.ndarray)\n self.assertIsInstance(loaded_labels, np.ndarray)\n self.assertEqual(loaded_texts.shape, (len(true_texts),))\n self.assertEqual(loaded_labels.shape, (len(true_labels),))\n self.assertEqual(true_texts, loaded_texts.tolist())\n self.assertEqual(true_labels, loaded_labels.tolist())\n\n def test_read_csv_positive03(self):\n file_name = os.path.join(os.path.dirname(__file__), 'csv_for_testing2.csv')\n true_classes = ['Криминал', 'Культура', 'Общество', 'Политика', 'Экономика']\n true_texts = [\n 'Германия разрешила третий пол.',\n 'Лукашенко объяснил поставки оружия Азербайджану.',\n 'Шакиру уличили в неуплате налогов.',\n 'Люди в масках ворвались на собрание врачей в Киеве и отказались их выпускать.',\n 'Подсчитаны расходы на российскую сверхтяжелую ракету для освоения Луны.',\n 'Популярные актеры дебютируют в короткометражных хоррорах.',\n 'Россия ответила на высылку дипломата из Словакии.',\n 'Российские нефтяники заработали триллионы на фоне бензинового кризиса.',\n 'Google выбрал своего «Человека года».'\n ]\n true_labels = [2, {3, 4}, {0, 1}, 0, 4, 1, 3, 4, 2]\n loaded_texts, loaded_labels, loaded_classes = read_csv(file_name, 1)\n self.assertIsInstance(loaded_classes, list)\n self.assertEqual(true_classes, loaded_classes)\n self.assertIsInstance(loaded_texts, np.ndarray)\n self.assertIsInstance(loaded_labels, np.ndarray)\n self.assertEqual(loaded_texts.shape, (len(true_texts),))\n self.assertEqual(loaded_labels.shape, (len(true_labels),))\n self.assertEqual(true_texts, loaded_texts.tolist())\n self.assertEqual(true_labels, loaded_labels.tolist())\n\n def test_read_csv_positive04(self):\n file_name = os.path.join(os.path.dirname(__file__), 'csv_for_testing1.csv')\n true_classes = ['Криминал', 'Политика', 'Спорт']\n true_texts = [\n 'Испанские клубы открестились от Неймара.',\n 'Семилетняя беженка погибла после задержания на границе США.',\n 'Главная реформа Обамы признана неконституционной.',\n 'Бывший чемпион UFC не выдержал кровопролития и сдался.',\n 'Охранника магазина зарезали из-за трех бутылок водки.',\n 'Лукашенко пожаловался Путину на украинских «отмороженных нацменов».'\n ]\n true_labels = [2, 0, 1, 2, 0, 1]\n loaded_texts, loaded_labels, loaded_classes = read_csv(file_name, 1)\n self.assertIsInstance(loaded_classes, list)\n self.assertEqual(true_classes, loaded_classes)\n self.assertIsInstance(loaded_texts, np.ndarray)\n self.assertIsInstance(loaded_labels, np.ndarray)\n self.assertEqual(loaded_texts.shape, (len(true_texts),))\n self.assertEqual(loaded_labels.shape, (len(true_labels),))\n self.assertEqual(true_texts, loaded_texts.tolist())\n self.assertEqual(true_labels, loaded_labels.tolist())\n\n def test_parse_hidden_layers_description_positive01(self):\n src = '100'\n true_res = (100, 1)\n self.assertEqual(true_res, parse_hidden_layers_description(src))\n\n def test_parse_hidden_layers_description_positive02(self):\n src = '100:3'\n true_res = (100, 3)\n self.assertEqual(true_res, parse_hidden_layers_description(src))\n\n def test_parse_hidden_layers_description_positive03(self):\n src = '100:0'\n true_res = (0, 0)\n self.assertEqual(true_res, parse_hidden_layers_description(src))\n\n def test_parse_hidden_layers_description_positive04(self):\n src = '0:2'\n true_res = (0, 0)\n self.assertEqual(true_res, parse_hidden_layers_description(src))\n\n def test_parse_hidden_layers_description_positive05(self):\n src = '0:0'\n true_res = (0, 0)\n self.assertEqual(true_res, parse_hidden_layers_description(src))\n\n def test_parse_hidden_layers_description_positive06(self):\n src = ''\n true_res = (0, 0)\n self.assertEqual(true_res, parse_hidden_layers_description(src))\n\n def test_parse_hidden_layers_description_positive07(self):\n src = None\n true_res = (0, 0)\n self.assertEqual(true_res, parse_hidden_layers_description(src))\n\n def test_parse_hidden_layers_description_negative01(self):\n src = ':'\n true_err_msg = re.escape('Description of hidden layers is empty!')\n with self.assertRaisesRegex(ValueError, true_err_msg):\n _ = parse_hidden_layers_description(src)\n\n def test_parse_hidden_layers_description_negative02(self):\n src = '100:3:2'\n true_err_msg = re.escape('`100:3:2` is wrong description of hidden layers!')\n with self.assertRaisesRegex(ValueError, true_err_msg):\n _ = parse_hidden_layers_description(src)\n\n def test_parse_hidden_layers_description_negative03(self):\n src = '100:2a'\n true_err_msg = re.escape('`100:2a` is wrong description of hidden layers!')\n with self.assertRaisesRegex(ValueError, true_err_msg):\n _ = parse_hidden_layers_description(src)\n\n def test_parse_hidden_layers_description_negative04(self):\n src = '100:-1'\n true_err_msg = re.escape('`100:-1` is wrong description of hidden layers!')\n with self.assertRaisesRegex(ValueError, true_err_msg):\n _ = parse_hidden_layers_description(src)\n\n\nif __name__ == '__main__':\n unittest.main(verbosity=2)\n" ]
[ [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
TUW-GEO/qa4sm-reader
[ "148b034e883a64b88bdae84ad1f859a1ff483e73" ]
[ "src/qa4sm_reader/plotter.py" ]
[ "# -*- coding: utf-8 -*-\nimport warnings\nfrom pathlib import Path\nimport seaborn as sns\nimport pandas as pd\nfrom typing import Union\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom qa4sm_reader.img import QA4SMImg\nimport qa4sm_reader.globals as globals\nfrom qa4sm_reader import plotting_methods as plm\n\nfrom warnings import warn\n\n\nclass QA4SMPlotter():\n \"\"\"\n Class to create image files of plots from the validation results in a QA4SMImage\n \"\"\"\n def __init__(self, image, out_dir:str=None):\n \"\"\"\n Create box plots from results in a qa4sm output file.\n\n Parameters\n ----------\n image : QA4SMImg\n The results object.\n out_dir : str, optional (default: None)\n Path to output generated plot. If None, defaults to the current working directory.\n \"\"\"\n self.img = image\n self.out_dir = self.get_dir(out_dir=out_dir)\n\n self.ref = image.datasets.ref\n\n try:\n self.img.vars\n except:\n warn(\"The initialized QA4SMImg object has not been loaded. 'load_data' needs to be \"\n \"set to 'True' in the initialization of the Image.\")\n\n def get_dir(self, out_dir:str) -> Path:\n \"\"\"Use output path if specified, otherwise same directory as the one storing the netCDF file\"\"\"\n if out_dir:\n out_dir = Path(out_dir) # use directory if specified\n if not out_dir.exists():\n out_dir.mkdir() # make if not existing\n else:\n out_dir = self.img.filepath.parent # use default otherwise\n\n return out_dir\n\n def _standard_filename(self, out_name:str, out_type:str='png') -> Path:\n \"\"\"\n Standardized behaviour for filenames: if provided name has extension, it is kept; otherwise, it is saved as\n .png to self.out_dir\n\n Parameters\n ----------\n out_name : str\n output filename (with or without extension)\n out_type : str, optional\n contains file extensions to be plotted. If None, uses 'png'\n\n Returns\n -------\n outname: pathlib.Path\n correct path of the file\n \"\"\"\n out_name = Path(out_name)\n # provide output directory\n out_path = self.out_dir.joinpath(out_name)\n\n # provide output file type\n if not out_path.suffix:\n if out_type[0] != '.':\n out_type = '.' + out_type\n out_path = out_path.with_suffix(out_type)\n\n return out_path\n\n @staticmethod\n def _box_caption(Var, tc:bool=False) -> str:\n \"\"\"\n Create the dataset part of the box (axis) caption\n\n Parameters\n ----------\n Var: MetricVar\n variable for a metric\n tc: bool, default is False\n True if TC. Then, caption starts with \"Other Data:\"\n\n Returns\n -------\n capt: str\n box caption\n \"\"\"\n ref_meta, mds_meta, other_meta = Var.get_varmeta()\n ds_parts = []\n id, meta = mds_meta\n if tc:\n id, meta = other_meta\n ds_parts.append('{}-{}\\n({})\\nVariable: {} [{}]'.format(\n id,\n meta['pretty_name'],\n meta['pretty_version'],\n meta['pretty_variable'],\n meta['mu'])\n )\n capt = '\\n and \\n'.join(ds_parts)\n\n if tc:\n capt = 'Other Data:\\n' + capt\n\n return capt\n\n @staticmethod\n def _get_parts_name(Var, type='boxplot_basic') -> list:\n \"\"\"\n Create parts for title according to the type of plot\n\n Parameters\n ----------\n Var: MetricVar\n variable for a metric\n type: str\n type of plot\n\n Returns\n -------\n parts: list\n list of parts for title\n \"\"\"\n parts = []\n ref, mds, other = [meta for meta in Var.get_varmeta()]\n if type == 'boxplot_basic':\n parts.append(ref[0])\n parts.extend([ref[1]['pretty_name'], ref[1]['pretty_version']])\n\n elif type in ['boxplot_tc', 'mapplot_basic', 'mapplot_tc']:\n parts.append(mds[0])\n parts.extend([mds[1]['pretty_name'], mds[1]['pretty_version']])\n parts.append(ref[0])\n parts.extend([ref[1]['pretty_name'], ref[1]['pretty_version']])\n\n if type == 'mapplot_tc':\n parts.append(other[0])\n parts.extend([other[1]['pretty_name'], other[1]['pretty_version']])\n\n return parts\n\n @staticmethod\n def _titles_lut(type:str) -> str:\n \"\"\"\n Lookup table for plot titles\n\n Parameters\n ----------\n type: str\n type of plot\n \"\"\"\n titles = {\n 'boxplot_basic': 'Intercomparison of {} \\nwith {}-{} ({}) as the reference\\n ',\n 'boxplot_tc': 'Intercomparison of {} \\nfor {}-{} ({}) \\nwith {}-{} ({}) as the reference\\n ',\n 'mapplot_basic': '{} for {}-{} ({}) with {}-{} ({}) as the reference',\n 'mapplot_tc': '{} for {}-{} ({}) with {}-{} ({}) and {}-{} ({}) as the references',\n 'metadata': 'Intercomparison of {} by {}\\nwith reference: {}',\n }\n\n try:\n return titles[type]\n\n except KeyError as e:\n message = \"type '{}' is not in the lookup table\".format(type)\n warn(message)\n\n @staticmethod\n def _filenames_lut(type:str) -> str:\n \"\"\"\n Lookup table for file names\n\n Parameters\n ----------\n type: str\n type of plot\n \"\"\"\n # we stick to old naming convention\n names = {\n 'boxplot_basic': 'boxplot_{}',\n 'mapplot_common': 'overview_{}',\n 'boxplot_tc': 'boxplot_{}_for_{}-{}',\n 'mapplot_double': 'overview_{}-{}_and_{}-{}_{}',\n 'mapplot_tc': 'overview_{}-{}_and_{}-{}_and_{}-{}_{}_for_{}-{}',\n 'metadata': 'boxplot_{}_metadata_{}',\n }\n\n try:\n return names[type]\n\n except KeyError as e:\n message = \"type '{}' is not in the lookup table\".format(type)\n warn(message)\n\n def create_title(self, Var, type:str) -> str:\n \"\"\"\n Create title of the plot\n\n Parameters\n ----------\n Var: MetricVar\n variable for a metric\n type: str\n type of plot\n \"\"\"\n parts = [globals._metric_name[Var.metric]]\n parts.extend(self._get_parts_name(Var=Var, type=type))\n title = self._titles_lut(type=type).format(*parts)\n\n return title\n\n def create_filename(self, Var, type:str) -> str:\n \"\"\"\n Create name of the file\n\n Parameters\n ----------\n Var: MetricVar\n variable for a metric\n type: str\n type of plot\n \"\"\"\n name = self._filenames_lut(type=type)\n ref_meta, mds_meta, other_meta = Var.get_varmeta()\n # fetch parts of the name for the variable\n if not type in [\"mapplot_tc\", \"mapplot_double\"]:\n parts = [Var.metric]\n if mds_meta:\n parts.extend([mds_meta[0], mds_meta[1]['short_name']])\n else:\n parts = [ref_meta[0], ref_meta[1]['short_name']]\n if type == \"mapplot_tc\":\n # necessary to respect old naming convention\n for dss in Var.other_dss:\n parts.extend([dss[0], dss[1]['short_name']])\n parts.extend([Var.metric, mds_meta[0], mds_meta[1]['short_name']])\n parts.extend([mds_meta[0], mds_meta[1]['short_name'], Var.metric])\n\n name = name.format(*parts)\n\n return name\n\n def _yield_values(\n self,\n metric: str,\n tc: bool = False,\n stats: bool = True,\n mean_ci: bool = True,\n ) -> tuple:\n \"\"\"\n Get iterable with pandas dataframes for all variables of a metric to plot\n\n Parameters\n ----------\n metric: str\n metric name\n tc: bool, default is False\n True if TC. Then, caption starts with \"Other Data:\"\n stats: bool\n If true, append the statistics to the caption\n mean_ci: bool\n If True, 'Mean CI: {value}' is added to the caption\n\n Yield\n -----\n df: pd.DataFrame\n dataframe with variable values and caption name\n Var: QA4SMMetricVariable\n variable corresponding to the dataframe\n ci: pd.DataFrame\n dataframe with \"upper\" and \"lower\" CI\n \"\"\"\n Vars = self.img._iter_vars(type=\"metric\", filter_parms={\"metric\":metric})\n\n for n, Var in enumerate(Vars):\n values = Var.values[Var.varname]\n # changes if it's a common-type Var\n if Var.g == 0:\n box_cap_ds = 'All datasets'\n else:\n box_cap_ds = self._box_caption(Var, tc=tc)\n # setting in global for caption stats\n if globals.boxplot_printnumbers:\n box_cap = '{}'.format(box_cap_ds)\n if stats:\n box_stats = plm._box_stats(values)\n box_cap = box_cap + \"\\n{}\".format(box_stats)\n else:\n box_cap = box_cap_ds\n df = values.to_frame(box_cap)\n\n ci = self.img.get_cis(Var)\n if ci: # could be that variable doesn't have CIs - empty list\n ci = pd.concat(ci, axis=1)\n label = \"\"\n if mean_ci:\n # get the mean CI range\n diff = ci[\"upper\"] - ci[\"lower\"]\n ci_range = float(diff.mean())\n label = \"\\nMean CI range: {:.3g}\".format(ci_range)\n df.columns = [\n df.columns[0] + label\n ]\n else:\n ci = None\n # values are all Nan or NaNf - not plotted\n df_arr = df.to_numpy()\n if np.isnan(df_arr).all() or df_arr.size == 0:\n continue\n\n yield df, Var, ci\n\n def _boxplot_definition(\n self,\n metric:str,\n df:pd.DataFrame,\n type:str,\n ci=None,\n offset=0.07,\n Var=None,\n **kwargs\n ) -> tuple:\n \"\"\"\n Define parameters of plot\n\n Parameters\n ----------\n df: pd.DataFrame\n dataframe to plot\n type: str\n one of _titles_lut\n ci : list\n list of Dataframes containing \"upper\" and \"lower\" CIs\n xticks: list\n caption to each boxplot (or triplet thereof)\n offset: float\n offset of boxplots\n Var: QA4SMMetricVariable, optional. Default is None\n Specified in case mds meta is needed\n \"\"\"\n # plot label\n parts = [globals._metric_name[metric]]\n parts.append(globals._metric_description[metric].format(\n globals._metric_units[self.ref['short_name']]))\n label = \"{}{}\".format(*parts)\n # generate plot\n figwidth = globals.boxplot_width * (len(df.columns) + 1)\n # otherwise it's too narrow\n if metric == \"n_obs\":\n figwidth = figwidth + 0.2\n figsize = [figwidth, globals.boxplot_height]\n fig, ax = plm.boxplot(\n df=df,\n ci=ci,\n label=label,\n figsize=figsize,\n dpi=globals.dpi\n )\n if not Var:\n # when we only need reference dataset from variables (i.e. is the same):\n for Var in self.img._iter_vars(type=\"metric\", filter_parms={\"metric\":metric}):\n Var = Var\n break\n if not type==\"metadata\":\n title = self.create_title(Var, type=type)\n ax.set_title(title, pad=globals.title_pad)\n # add watermark\n if self.img.has_CIs:\n offset = 0.08 # offset smaller as CI variables have a larger caption\n if Var.g == 0:\n offset = 0.03 # offset larger as common metrics have a shorter caption\n if globals.watermark_pos not in [None, False]:\n plm.make_watermark(fig, offset=offset)\n\n return fig, ax\n\n def _save_plot(self, out_name:str, out_types:str='png') -> list:\n \"\"\"\n Save plot with name to self.out_dir\n\n Parameters\n ----------\n out_name: str\n name of output file\n out_types: str or list\n extensions which the files should be saved in\n\n Returns\n -------\n fnames: list\n list of file names with all the extensions\n \"\"\"\n fnames = []\n if isinstance(out_types, str):\n out_types = [out_types]\n for ext in out_types:\n fname = self._standard_filename(out_name, out_type=ext)\n if fname.exists():\n warn('Overwriting file {}'.format(fname.name))\n try:\n plt.savefig(fname, dpi='figure', bbox_inches='tight')\n except ValueError:\n continue\n fnames.append(fname.absolute())\n\n return fnames\n\n def boxplot_basic(\n self, metric:str,\n out_name:str=None,\n out_types:str='png',\n save_files:bool=False,\n **plotting_kwargs\n ) -> Union[list, None]:\n \"\"\"\n Creates a boxplot for common and double metrics. Saves a figure and returns Matplotlib fig and ax objects for\n further processing.\n\n Parameters\n ----------\n metric : str\n metric that is collected from the file for all datasets and combined\n into one plot.\n out_name: str\n name of output file\n out_types: str or list\n extensions which the files should be saved in\n save_file: bool, optional. Default is False\n wether to save the file in the output directory\n plotting_kwargs: arguments for _boxplot_definition function\n\n Returns\n -------\n fnames: list\n list of file names with all the extensions\n \"\"\"\n fnames, values = [], []\n ci = []\n # we take the last iterated value for Var and use it for the file name\n for df, Var, var_ci in self._yield_values(metric=metric):\n values.append(df)\n if var_ci is not None:\n ci.append(var_ci)\n\n # handle empty results\n if not values:\n return None\n # put all Variables in the same dataframe\n values = pd.concat(values)\n # create plot\n fig, ax = self._boxplot_definition(\n metric=metric,\n df=values,\n type='boxplot_basic',\n ci=ci,\n **plotting_kwargs\n )\n if not out_name:\n out_name = self.create_filename(Var, type='boxplot_basic')\n # save or return plotting objects\n if save_files:\n fnames = self._save_plot(out_name, out_types=out_types)\n plt.close('all')\n\n return fnames\n\n else:\n return fig, ax\n\n def boxplot_tc(\n self, metric:str,\n out_name:str=None,\n out_types:str='png',\n save_files:bool=False,\n **plotting_kwargs\n ) -> list:\n \"\"\"\n Creates a boxplot for TC metrics. Saves a figure and returns Matplotlib fig and ax objects for\n further processing.\n\n Parameters\n ----------\n metric : str\n metric that is collected from the file for all datasets and combined\n into one plot.\n out_name: str\n name of output file\n out_types: str or list\n extensions which the files should be saved in\n save_file: bool, optional. Default is False\n wether to save the file in the output directory\n plotting_kwargs: arguments for _boxplot_definition function\n\n Returns\n -------\n fnames: list\n list of file names with all the extensions\n \"\"\"\n fnames = []\n # group Vars and CIs relative to the same dataset\n metric_tc, ci = {}, {}\n for df, Var, var_ci in self._yield_values(metric=metric, tc=True):\n id, names = Var.metric_ds\n if var_ci is not None:\n if id in ci.keys():\n ci[id].append(var_ci)\n else:\n ci[id] = [var_ci]\n if id in metric_tc.keys():\n metric_tc[id][0].append(df)\n else:\n metric_tc[id] = [df], Var\n\n for id, values in metric_tc.items():\n dfs, Var = values\n df = pd.concat(dfs)\n # values are all Nan or NaNf - not plotted\n if np.isnan(df.to_numpy()).all():\n continue\n # necessary if statement to prevent key error when no CIs are in the netCDF\n if ci:\n ci_id = ci[id]\n else:\n ci_id = None\n # create plot\n fig, ax = self._boxplot_definition(\n metric=metric,\n df=df,\n ci=ci_id,\n type='boxplot_tc',\n Var=Var,\n **plotting_kwargs\n )\n # save. Below workaround to avoid same names\n if not out_name:\n save_name = self.create_filename(Var, type='boxplot_tc')\n else:\n save_name = out_name\n # save or return plotting objects\n if save_files:\n fns = self._save_plot(save_name, out_types=out_types)\n fnames.extend(fns)\n plt.close('all')\n\n if save_files:\n return fnames\n\n def mapplot_var(\n self, Var,\n out_name:str=None,\n out_types:str='png',\n save_files:bool=False,\n **plotting_kwargs\n ) -> list:\n \"\"\"\n Plots values to a map, using the values as color. Plots a scatterplot for\n ISMN and a image plot for other input values.\n\n Parameters\n ----------\n var : QA4SMMetricVariab;e\n Var in the image to make the map for.\n out_name: str\n name of output file\n out_types: str or list\n extensions which the files should be saved in\n save_file: bool, optional. Default is False\n wether to save the file in the output directory\n plotting_kwargs: arguments for mapplot function\n\n Returns\n -------\n fnames: list\n list of file names with all the extensions\n \"\"\"\n ref_meta, mds_meta, other_meta = Var.get_varmeta()\n metric = Var.metric\n ref_grid_stepsize = self.img.ref_dataset_grid_stepsize\n # create mapplot\n fig, ax = plm.mapplot(df=Var.values[Var.varname],\n metric=metric,\n ref_short=ref_meta[1]['short_name'],\n ref_grid_stepsize=ref_grid_stepsize,\n plot_extent=None, # if None, extent is sutomatically adjusted (as opposed to img.extent)\n **plotting_kwargs)\n\n # title and plot settings depend on the metric group\n if Var.g == 0:\n title = \"{} between all datasets\".format(globals._metric_name[metric])\n out_name = self.create_filename(Var, type='mapplot_common')\n elif Var.g == 2:\n title = self.create_title(Var=Var, type='mapplot_basic')\n out_name = self.create_filename(Var, type='mapplot_double')\n else:\n title = self.create_title(Var=Var, type='mapplot_tc')\n out_name = self.create_filename(Var, type='mapplot_tc')\n\n # use title for plot, make watermark\n ax.set_title(title, pad=globals.title_pad)\n if globals.watermark_pos not in [None, False]:\n plm.make_watermark(fig, globals.watermark_pos, for_map=True, offset=0.04)\n\n # save file or just return the image\n if save_files:\n fnames = self._save_plot(out_name, out_types=out_types)\n\n return fnames\n\n else:\n return fig, ax\n\n def mapplot_metric(\n self, metric:str,\n out_types:str='png',\n save_files:bool=False,\n **plotting_kwargs\n ) -> list:\n \"\"\"\n Mapplot for all variables for a given metric in the loaded file.\n\n Parameters\n ----------\n metric : str\n Name of a metric. File is searched for variables for that metric.\n out_name: str\n name of output file\n out_types: str or list\n extensions which the files should be saved in\n save_file: bool, optional. Default is False\n wether to save the file in the output directory\n plotting_kwargs: arguments for mapplot function\n\n Returns\n -------\n fnames : list\n List of files that were created\n \"\"\"\n fnames = []\n for Var in self.img._iter_vars(type=\"metric\", filter_parms={\"metric\":metric}):\n if not (np.isnan(Var.values.to_numpy()).all() or Var.is_CI):\n fns = self.mapplot_var(Var,\n out_name=None,\n out_types=out_types,\n save_files=save_files,\n **plotting_kwargs)\n # values are all Nan or NaNf - not plotted\n else:\n continue\n if save_files:\n fnames.extend(fns)\n plt.close('all')\n\n if fnames:\n return fnames\n\n def plot_metric(\n self, metric:str,\n out_types:str='png',\n save_all:bool=True,\n **plotting_kwargs\n ) -> tuple:\n \"\"\"\n Plot and save boxplot and mapplot for a certain metric\n\n Parameters\n ----------\n metric: str\n name of the metric\n out_types: str or list\n extensions which the files should be saved in\n save_all: bool, optional. Default is True.\n all plotted images are saved to the output directory\n plotting_kwargs: arguments for mapplot function.\n \"\"\"\n Metric = self.img.metrics[metric]\n if Metric.g == 0 or Metric.g == 2:\n fnames_bplot = self.boxplot_basic(metric=metric,\n out_types=out_types,\n save_files=save_all,\n **plotting_kwargs)\n elif Metric.g == 3:\n fnames_bplot = self.boxplot_tc(metric=metric,\n out_types=out_types,\n save_files=save_all,\n **plotting_kwargs)\n fnames_mapplot = self.mapplot_metric(metric=metric,\n out_types=out_types,\n save_files=save_all,\n **plotting_kwargs)\n\n return fnames_bplot, fnames_mapplot\n\n def meta_single(\n self,\n metric:str,\n metadata:str,\n df:pd.DataFrame=None,\n axis=None,\n plot_type:str=\"catplot\",\n **plotting_kwargs\n ) -> Union[tuple, None]:\n \"\"\"\n Boxplot of a metric grouped by the given metadata.\n\n Parameters\n ----------\n metric : str\n specified metric\n metadata : str\n specified metadata\n df : pd.DataFrame, optional\n metric values can be specified, in which case they will be used from here and\n not parsed from the metric name\n axis : matplotlib.axes.Axis, optional\n if provided, the function will create the plot on the specified axis\n plot_type : str, default is 'catplot'\n one of 'catplot' or 'multiplot', defines the type of plots for the 'classes' and 'continuous'\n metadata types\n\n Returns\n -------\n fig : matplotlib.figure.Figure\n the boxplot\n ax : matplotlib.axes.Axes\n \"\"\"\n values = []\n for data, Var, var_ci in self._yield_values(metric=metric, stats=False, mean_ci=False):\n values.append(data)\n\n values = pd.concat(values, axis=1)\n # override values from metric\n if df is not None:\n values = df\n # get meta and select only metric values with metadata available\n meta_values = self.img.metadata[metadata].values.dropna()\n values = values.reindex(index=meta_values.index)\n mu = globals._metric_description[metric].format(globals._metric_units[self.ref['short_name']])\n\n out = plm.boxplot_metadata(\n df=values,\n metadata_values=meta_values,\n ax_label=Var.Metric.pretty_name + mu,\n axis=axis,\n plot_type=plot_type,\n **plotting_kwargs\n )\n\n if axis is None:\n fig, ax = out\n\n return fig, ax\n\n def meta_combo(\n self,\n metric:str,\n metadata:str,\n metadata_discrete:str,\n ):\n \"\"\"\n Cross-boxplot between two given metadata types\n\n Parameters\n ----------\n metric : str\n specified metric\n metadata: str\n 'continuous' or 'classes' metadata which provides the number of subplots (bins)\n metadata_discrete : str\n 'discrete' metadata which is shown in the subplots\n\n Returns\n -------\n fig : matplotlib.figure.Figure\n the boxplot\n ax : matplotlib.axes.Axes\n \"\"\"\n values = []\n for df, Var, ci in self._yield_values(metric=metric, stats=False, mean_ci=False):\n values.append(df)\n values = pd.concat(values, axis=1)\n\n metric_name = globals._metric_name[metric]\n metric_units = globals._metric_description[metric].format(\n globals._metric_units[self.img.datasets.ref[\"short_name\"]]\n )\n\n Meta_cont = self.img.metadata[metadata]\n meta_values = Meta_cont.values.dropna()\n\n bin_funct = plm.bin_function_lut(globals.metadata[metadata][2])\n binned_values = bin_funct(\n df=values,\n metadata_values=meta_values,\n meta_key=metadata,\n )\n # dictionary with subset values\n values_subset = {\n a_bin: values.reindex(index=binned_values[a_bin].index) for a_bin in binned_values.keys()\n }\n kwargs = {\n \"metric\": metric,\n \"metadata\": metadata_discrete,\n \"common_y\": metric_name + metric_units\n }\n n_datasets = len(self.img.datasets.others)\n fig, axes = plm.aggregate_subplots(\n to_plot=values_subset,\n funct=self.meta_single,\n n_bars=n_datasets,\n **kwargs\n )\n\n return fig, axes\n\n def plot_metadata(\n self, metric:str,\n metadata:str,\n metadata_discrete:str=None,\n save_file:bool=False,\n out_types:str='png',\n **plotting_kwargs\n ):\n \"\"\"\n Wrapper built around the 'meta_single' or 'meta_combo' functions to produce a metadata-based boxplot of a\n metric.\n\n Parameters\n __________\n metric : str\n name of metric to plot\n metadata : str\n name of metadata to subdivide the metric results\n metadata_discrete : str\n name of the metadata of the type 'discrete'\n\n Retrun\n ------\n fig : matplotlib.figure.Figure\n the boxplot\n ax : matplotlib.axes.Axes\n \"\"\"\n if metadata_discrete is None:\n fig, ax = self.meta_single(\n metric=metric, metadata=metadata, **plotting_kwargs\n )\n metadata_tuple = [metadata]\n\n else:\n metadata_tuple = [metadata, metadata_discrete]\n if not any(globals.metadata[i][2] == \"discrete\" for i in metadata_tuple):\n raise ValueError(\n \"One of the provided metadata names should correspond to the 'discrete' type, see globals.py\"\n )\n if all(globals.metadata[i][2] == \"discrete\" for i in metadata_tuple):\n raise ValueError(\n \"At least one of the provided metadata should not be of the 'continuous' type\"\n )\n fig, ax = self.meta_combo(\n metric=metric, metadata=metadata,\n metadata_discrete=metadata_discrete, **plotting_kwargs\n )\n meta_names = [globals.metadata[i][0] for i in metadata_tuple]\n title = self._titles_lut(\"metadata\").format(\n globals._metric_name[metric],\n \", \".join(meta_names),\n self.img.datasets.ref[\"pretty_title\"]\n )\n fig.suptitle(title)\n\n plm.make_watermark(fig=fig, offset=0)\n\n if save_file:\n out_name = self._filenames_lut(\"metadata\").format(\n metric,\n \"_and_\".join(metadata_tuple)\n )\n self._save_plot(out_name, out_types=out_types)\n\n return out_name\n\n else:\n return fig, ax\n\n def plot_save_metadata(self, metric):\n \"\"\"\n Plots and saves three metadata boxplots per metric (defined in globals.py):\n\n 1. Boxplot by land cover class (2010 map)\n 2. Boxplot by Koeppen-Geiger climate classification\n 3. Boxplot by instrument depth and soil type (granularity)\n\n Parameters\n ----------\n metric : str\n name of metric\n\n Return\n ------\n filenames: list\n list of file names\n \"\"\"\n filenames = []\n\n # makes no sense to plot the metadata for some metrics\n if metric in globals._metadata_exclude:\n return filenames\n\n for type, meta_keys in globals.out_metadata_plots.items():\n # the presence of instrument_depth in the out file depends on the ismn release version\n if all(meta_key in self.img.metadata.keys() for meta_key in meta_keys):\n outfile = self.plot_metadata(metric, *meta_keys, save_file=True)\n filenames.append(outfile)\n else:\n warnings.warn(\n \"Not all: \" + \", \".join(meta_keys) + \" are present in the netCDF variables\"\n )\n\n return filenames\n\n" ]
[ [ "numpy.isnan", "pandas.concat", "matplotlib.pyplot.savefig", "matplotlib.pyplot.close" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
gaeunpark924/Comprehensive-Design-2
[ "3a4ca01c5f3daa58a90516654bcd3cc4e8725506" ]
[ "nodejs/yolov5/hubconf.py" ]
[ "\"\"\"YOLOv5 PyTorch Hub models https://pytorch.org/hub/ultralytics_yolov5/\n\nUsage:\n import torch\n model = torch.hub.load('ultralytics/yolov5', 'yolov5s')\n\"\"\"\n\nimport torch\n\n\ndef _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):\n \"\"\"Creates a specified YOLOv5 model\n\n Arguments:\n name (str): name of model, i.e. 'yolov5s'\n pretrained (bool): load pretrained weights into the model\n channels (int): number of input channels\n classes (int): number of model classes\n autoshape (bool): apply YOLOv5 .autoshape() wrapper to model\n verbose (bool): print all information to screen\n device (str, torch.device, None): device to use for model parameters\n\n Returns:\n YOLOv5 pytorch model\n \"\"\"\n from pathlib import Path\n\n from models.yolo import Model, attempt_load\n from utils.general import check_requirements, set_logging\n from utils.google_utils import attempt_download\n from utils.torch_utils import select_device\n\n check_requirements(Path(__file__).parent / 'requirements.txt', exclude=('tensorboard', 'pycocotools', 'thop'))\n set_logging(verbose=verbose)\n\n fname = Path(name).with_suffix('.pt') # checkpoint filename\n try:\n if pretrained and channels == 3 and classes == 80:\n model = attempt_load(fname, map_location=torch.device('cpu')) # download/load FP32 model\n else:\n cfg = list((Path(__file__).parent / 'models').rglob(f'{name}.yaml'))[0] # model.yaml path\n model = Model(cfg, channels, classes) # create model\n if pretrained:\n attempt_download(fname) # download if not found locally\n ckpt = torch.load(fname, map_location=torch.device('cpu')) # load\n msd = model.state_dict() # model state_dict\n csd = ckpt['model'].float().state_dict() # checkpoint state_dict as FP32\n csd = {k: v for k, v in csd.items() if msd[k].shape == v.shape} # filter\n model.load_state_dict(csd, strict=False) # load\n if len(ckpt['model'].names) == classes:\n model.names = ckpt['model'].names # set class names attribute\n if autoshape:\n model = model.autoshape() # for file/URI/PIL/cv2/np inputs and NMS\n device = select_device('0' if torch.cuda.is_available() else 'cpu') if device is None else torch.device(device)\n return model.to(device)\n\n except Exception as e:\n help_url = 'https://github.com/ultralytics/yolov5/issues/36'\n s = 'Cache may be out of date, try `force_reload=True`. See %s for help.' % help_url\n raise Exception(s) from e\n\n\ndef custom(path='path/to/model.pt', autoshape=True, verbose=True, device=None):\n # YOLOv5 custom or local model\n return _create(path, autoshape=autoshape, verbose=verbose, device=device)\n\n\ndef yolov5s(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):\n # YOLOv5-small model https://github.com/ultralytics/yolov5\n return _create('yolov5s', pretrained, channels, classes, autoshape, verbose, device)\n\n\ndef yolov5m(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):\n # YOLOv5-medium model https://github.com/ultralytics/yolov5\n return _create('yolov5m', pretrained, channels, classes, autoshape, verbose, device)\n\n\ndef yolov5l(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):\n # YOLOv5-large model https://github.com/ultralytics/yolov5\n return _create('yolov5l', pretrained, channels, classes, autoshape, verbose, device)\n\n\ndef yolov5x(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):\n # YOLOv5-xlarge model https://github.com/ultralytics/yolov5\n return _create('yolov5x', pretrained, channels, classes, autoshape, verbose, device)\n\n\ndef yolov5s6(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):\n # YOLOv5-small-P6 model https://github.com/ultralytics/yolov5\n return _create('yolov5s6', pretrained, channels, classes, autoshape, verbose, device)\n\n\ndef yolov5m6(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):\n # YOLOv5-medium-P6 model https://github.com/ultralytics/yolov5\n return _create('yolov5m6', pretrained, channels, classes, autoshape, verbose, device)\n\n\ndef yolov5l6(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):\n # YOLOv5-large-P6 model https://github.com/ultralytics/yolov5\n return _create('yolov5l6', pretrained, channels, classes, autoshape, verbose, device)\n\n\ndef yolov5x6(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):\n # YOLOv5-xlarge-P6 model https://github.com/ultralytics/yolov5\n return _create('yolov5x6', pretrained, channels, classes, autoshape, verbose, device)\n\n\nif __name__ == '__main__':\n model = _create(name='yolov5s', pretrained=True, channels=3, classes=80, autoshape=True, verbose=True) # pretrained\n # model = custom(path='path/to/model.pt') # custom\n\n # Verify inference\n import cv2\n import numpy as np\n from PIL import Image\n\n imgs = ['data/images/zidane.jpg', # filename\n 'https://github.com/ultralytics/yolov5/releases/download/v1.0/zidane.jpg', # URI\n cv2.imread('data/images/bus.jpg')[:, :, ::-1], # OpenCV\n Image.open('data/images/bus.jpg'), # PIL\n np.zeros((320, 640, 3))] # numpy\n\n results = model(imgs) # batched inference\n results.print()\n results.save()\n" ]
[ [ "torch.device", "numpy.zeros", "torch.cuda.is_available" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
open-pulse/OpenPulse
[ "ef49cd1ff672821c4b57729c0ef9f4ff5a83eadf", "ef49cd1ff672821c4b57729c0ef9f4ff5a83eadf", "ef49cd1ff672821c4b57729c0ef9f4ff5a83eadf" ]
[ "data/user_input/model/setup/acoustic/specificimpedanceInput.py", "data/user_input/plots/acoustic/plot_TL_NR_Input.py", "data/user_input/plots/structural/plotStructuralFrequencyResponseInput.py" ]
[ "from data.user_input.project.printMessageInput import PrintMessageInput\nimport os\nfrom os.path import basename\nimport numpy as np\nfrom PyQt5.QtWidgets import QToolButton, QPushButton, QLineEdit, QFileDialog, QDialog, QTabWidget, QWidget, QTreeWidgetItem, QTreeWidget, QSpinBox\nfrom PyQt5.QtGui import QIcon\nfrom PyQt5.QtGui import QColor, QBrush\nfrom PyQt5.QtCore import Qt\nfrom PyQt5 import uic\nimport configparser\nfrom shutil import copyfile\nfrom pulse.utils import remove_bc_from_file\n\nclass SpecificImpedanceInput(QDialog):\n def __init__(self, project, opv, *args, **kwargs):\n super().__init__(*args, **kwargs)\n uic.loadUi('data/user_input/ui/Model/Setup/Acoustic/specificImpedanceInput.ui', self)\n\n icons_path = 'data\\\\icons\\\\'\n self.icon = QIcon(icons_path + 'pulse.png')\n self.setWindowIcon(self.icon)\n\n self.setWindowFlags(Qt.WindowStaysOnTopHint)\n self.setWindowModality(Qt.WindowModal)\n\n self.opv = opv\n self.opv.setInputObject(self)\n self.transform_points = self.opv.transformPoints\n\n self.project = project\n self.preprocessor = project.preprocessor\n self.before_run = self.preprocessor.get_model_checks()\n \n self.userPath = os.path.expanduser('~')\n self.new_load_path_table = \"\"\n self.project_folder_path = project.project_folder_path\n self.acoustic_bc_info_path = project.file._node_acoustic_path\n\n self.nodes = project.preprocessor.nodes\n self.specific_impedance = None\n self.nodes_typed = []\n self.imported_table = False\n self.remove_specific_impedance = False\n\n self.lineEdit_nodeID = self.findChild(QLineEdit, 'lineEdit_nodeID')\n self.lineEdit_specific_impedance_real = self.findChild(QLineEdit, 'lineEdit_specific_impedance_real')\n self.lineEdit_specific_impedance_imag = self.findChild(QLineEdit, 'lineEdit_specific_impedance_imag')\n self.lineEdit_load_table_path = self.findChild(QLineEdit, 'line_load_table_path')\n\n self.tabWidget_specific_impedance = self.findChild(QTabWidget, \"tabWidget_specific_impedance\")\n self.tabWidget_specific_impedance.currentChanged.connect(self.tabEvent_specific_impedance)\n\n self.tab_single_values = self.tabWidget_specific_impedance.findChild(QWidget, \"tab_single_values\")\n self.tab_table_values = self.tabWidget_specific_impedance.findChild(QWidget, \"tab_table_values\")\n\n self.treeWidget_specific_impedance = self.findChild(QTreeWidget, 'treeWidget_specific_impedance')\n self.treeWidget_specific_impedance.setColumnWidth(1, 20)\n self.treeWidget_specific_impedance.setColumnWidth(2, 80)\n self.treeWidget_specific_impedance.itemClicked.connect(self.on_click_item)\n self.treeWidget_specific_impedance.itemDoubleClicked.connect(self.on_doubleclick_item)\n\n self.toolButton_load_table = self.findChild(QToolButton, 'toolButton_load_table')\n self.toolButton_load_table.clicked.connect(self.load_specific_impedance_table)\n\n self.pushButton_single_values_confirm = self.findChild(QPushButton, 'pushButton_single_values_confirm')\n self.pushButton_single_values_confirm.clicked.connect(self.check_single_values)\n\n self.pushButton_table_values_confirm = self.findChild(QPushButton, 'pushButton_table_values_confirm')\n self.pushButton_table_values_confirm.clicked.connect(self.check_table_values)\n self.lineEdit_skiprows = self.findChild(QSpinBox, 'spinBox')\n\n self.pushButton_remove_bc_confirm = self.findChild(QPushButton, 'pushButton_remove_bc_confirm')\n self.pushButton_remove_bc_confirm.clicked.connect(self.check_remove_bc_from_node)\n\n self.pushButton_remove_bc_confirm_2 = self.findChild(QPushButton, 'pushButton_remove_bc_confirm_2')\n self.pushButton_remove_bc_confirm_2.clicked.connect(self.check_remove_bc_from_node)\n \n self.writeNodes(self.opv.getListPickedPoints())\n self.load_nodes_info()\n self.exec_()\n\n def keyPressEvent(self, event):\n if event.key() == Qt.Key_Enter or event.key() == Qt.Key_Return:\n if self.tabWidget_specific_impedance.currentIndex()==0:\n self.check_single_values()\n if self.tabWidget_specific_impedance.currentIndex()==1:\n self.check_table_values()\n elif event.key() == Qt.Key_Delete:\n if self.tabWidget_specific_impedance.currentIndex()==2:\n self.check_remove_bc_from_node()\n elif event.key() == Qt.Key_Escape:\n self.close()\n\n def tabEvent_specific_impedance(self):\n self.current_tab = self.tabWidget_specific_impedance.currentIndex()\n if self.current_tab == 2:\n self.lineEdit_nodeID.setDisabled(True)\n else:\n self.lineEdit_nodeID.setDisabled(False)\n\n def writeNodes(self, list_node_ids):\n text = \"\"\n for node in list_node_ids:\n text += \"{}, \".format(node)\n self.lineEdit_nodeID.setText(text)\n\n def check_complex_entries(self, lineEdit_real, lineEdit_imag):\n\n self.stop = False\n if lineEdit_real.text() != \"\":\n try:\n real_F = float(lineEdit_real.text())\n except Exception:\n window_title =\"ERROR\"\n title = \"Invalid entry to the specific impedance\"\n message = \"Wrong input for real part of specific impedance.\"\n PrintMessageInput([title, message, window_title])\n \n self.stop = True\n return\n else:\n real_F = 0\n\n if lineEdit_imag.text() != \"\":\n try:\n imag_F = float(lineEdit_imag.text())\n except Exception:\n window_title =\"ERROR\"\n title = \"Invalid entry to the specific impedance\"\n message = \"Wrong input for imaginary part of specific impedance.\"\n PrintMessageInput([title, message, window_title])\n self.stop = True\n return\n else:\n imag_F = 0\n \n if real_F == 0 and imag_F == 0:\n return None\n else:\n return real_F + 1j*imag_F\n\n def check_single_values(self):\n\n lineEdit_nodeID = self.lineEdit_nodeID.text()\n self.stop, self.nodes_typed = self.before_run.check_input_NodeID(lineEdit_nodeID)\n if self.stop:\n return\n\n specific_impedance = self.check_complex_entries(self.lineEdit_specific_impedance_real, self.lineEdit_specific_impedance_imag)\n \n if self.stop:\n return\n\n if specific_impedance is not None:\n self.specific_impedance = specific_impedance\n self.project.set_specific_impedance_bc_by_node(self.nodes_typed, self.specific_impedance, False)\n self.transform_points(self.nodes_typed)\n self.close()\n else: \n window_title =\"ERROR\"\n title = \"Additional inputs required\"\n message = \"You must to inform at least one nodal load to confirm the input!\"\n PrintMessageInput([title, message, window_title])\n \n def load_table(self, lineEdit, header):\n \n self.basename = \"\"\n window_label = 'Choose a table to import the specific impedance'\n self.path_imported_table, _type = QFileDialog.getOpenFileName(None, window_label, self.userPath, 'Files (*.dat; *.csv)')\n\n if self.path_imported_table == \"\":\n return \"\", \"\"\n\n self.basename = os.path.basename(self.path_imported_table)\n lineEdit.setText(self.path_imported_table)\n if self.basename != \"\":\n self.imported_table_name = self.basename\n \n if \"\\\\\" in self.project_folder_path:\n self.new_load_path_table = \"{}\\\\{}\".format(self.project_folder_path, self.basename)\n elif \"/\" in self.project_folder_path:\n self.new_load_path_table = \"{}/{}\".format(self.project_folder_path, self.basename)\n\n try:\n skiprows = int(self.lineEdit_skiprows.text()) \n imported_file = np.loadtxt(self.path_imported_table, delimiter=\",\", skiprows=skiprows)\n except Exception as error_log:\n window_title =\"ERROR\"\n title = \"Error reached while loading table\"\n message = f\" {str(error_log)} \\n\\nIt is recommended to skip the header rows.\"\n PrintMessageInput([title, message, window_title])\n return\n\n if imported_file.shape[1]<2:\n window_title =\"ERROR\"\n title = \"Error reached while loading table\"\n message = \"The imported table has insufficient number of columns. The spectrum \\n\"\n message += \"data must have frequencies, real and imaginary columns.\"\n PrintMessageInput([title, message, window_title])\n return\n \n try:\n self.imported_values = imported_file[:,1] + 1j*imported_file[:,2]\n if imported_file.shape[1]>2:\n\n self.frequencies = imported_file[:,0]\n self.f_min = self.frequencies[0]\n self.f_max = self.frequencies[-1]\n self.f_step = self.frequencies[1] - self.frequencies[0] \n self.imported_table = True\n\n real_values = np.real(self.imported_values)\n imag_values = np.imag(self.imported_values)\n abs_values = np.imag(self.imported_values)\n data = np.array([self.frequencies, real_values, imag_values, abs_values]).T\n np.savetxt(self.new_load_path_table, data, delimiter=\",\", header=header)\n\n except Exception as error_log:\n window_title =\"ERROR\"\n title = \"Error reached while loading table\"\n message = f\" {str(error_log)} \\n\\nIt is recommended to skip the header rows.\"\n PrintMessageInput([title, message, window_title])\n \n return self.imported_values, self.basename\n\n def load_specific_impedance_table(self):\n header = \"specific impedance || Frequency [Hz], real[Pa], imaginary[Pa], absolute[Pa]\"\n self.specific_impedance, self.basename_specific_impedance = self.load_table(self.lineEdit_load_table_path, header)\n \n def check_table_values(self):\n\n lineEdit_nodeID = self.lineEdit_nodeID.text()\n self.stop, self.nodes_typed = self.before_run.check_input_NodeID(lineEdit_nodeID)\n if self.stop:\n return\n\n if self.lineEdit_load_table_path != \"\":\n if self.specific_impedance is not None:\n self.project.set_specific_impedance_bc_by_node(self.nodes_typed, self.specific_impedance, True, table_name=self.basename_specific_impedance)\n self.transform_points(self.nodes_typed)\n self.close()\n\n def text_label(self, value):\n text = \"\"\n if isinstance(value, complex):\n value_label = str(value)\n elif isinstance(value, np.ndarray):\n value_label = 'Table'\n text = \"{}\".format(value_label)\n return text\n\n def load_nodes_info(self):\n for node in self.project.preprocessor.nodes_with_specific_impedance:\n new = QTreeWidgetItem([str(node.external_index), str(self.text_label(node.specific_impedance))])\n new.setTextAlignment(0, Qt.AlignCenter)\n new.setTextAlignment(1, Qt.AlignCenter) \n self.treeWidget_specific_impedance.addTopLevelItem(new)\n\n def on_click_item(self, item):\n self.lineEdit_nodeID.setText(item.text(0))\n\n def on_doubleclick_item(self, item):\n self.lineEdit_nodeID.setText(item.text(0))\n self.check_remove_bc_from_node()\n\n def check_remove_bc_from_node(self):\n\n lineEdit_nodeID = self.lineEdit_nodeID.text()\n self.stop, self.nodes_typed = self.before_run.check_input_NodeID(lineEdit_nodeID)\n if self.stop:\n return\n\n key_strings = [\"specific impedance\"]\n message = \"The specific impedance attributed to the {} node(s) have been removed.\".format(self.nodes_typed)\n remove_bc_from_file(self.nodes_typed, self.acoustic_bc_info_path, key_strings, message)\n self.project.preprocessor.set_specific_impedance_bc_by_node(self.nodes_typed, None)\n self.transform_points(self.nodes_typed)\n self.treeWidget_specific_impedance.clear()\n self.load_nodes_info()\n self.close()\n\n def update(self):\n self.writeNodes(self.opv.getListPickedPoints())", "from PyQt5.QtWidgets import QMessageBox, QLineEdit, QDialog, QFileDialog, QWidget, QTreeWidget, QRadioButton, QTreeWidgetItem, QTabWidget, QLabel, QCheckBox, QPushButton, QToolButton, QSpinBox\nfrom pulse.utils import error\nfrom os.path import basename\nfrom PyQt5.QtGui import QIcon\nfrom PyQt5.QtGui import QColor, QBrush\nfrom PyQt5.QtCore import Qt\nfrom PyQt5 import uic\nimport configparser\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\n\nfrom pulse.postprocessing.plot_acoustic_data import get_acoustic_frf\nfrom data.user_input.project.printMessageInput import PrintMessageInput\n\nwindow_title1 = \"ERROR MESSAGE\"\nwindow_title2 = \"WARNING MESSAGE\"\n\nclass SnaptoCursor(object):\n def __init__(self, ax, x, y, show_cursor):\n\n self.ax = ax\n self.x = x\n self.y = y\n self.show_cursor = show_cursor\n\n if show_cursor:\n \n self.vl = self.ax.axvline(x=x[0], color='k', alpha=0.3, label='_nolegend_') # the vertical line\n self.hl = self.ax.axhline(y=y[0], color='k', alpha=0.3, label='_nolegend_') # the horizontal line \n self.marker, = ax.plot(x[0], y[0], markersize=4, marker=\"s\", color=[0,0,0], zorder=3)\n # self.marker.set_label(\"x: %1.2f // y: %4.2e\" % (self.x[0], self.y[0]))\n # plt.legend(handles=[self.marker], loc='lower left', title=r'$\\bf{Cursor}$ $\\bf{coordinates:}$')\n\n def mouse_move(self, event):\n if self.show_cursor: \n\n if not event.inaxes: return\n x, y = event.xdata, event.ydata\n if x>=np.max(self.x): return\n\n indx = np.searchsorted(self.x, [x])[0]\n \n x = self.x[indx]\n y = self.y[indx]\n self.vl.set_xdata(x)\n self.hl.set_ydata(y)\n self.marker.set_data([x],[y])\n self.marker.set_label(\"x: %1.2f // y: %1.2f\" % (x, y))\n plt.legend(handles=[self.marker], loc='lower left', title=r'$\\bf{Cursor}$ $\\bf{coordinates:}$')\n \n self.ax.figure.canvas.draw_idle()\n\n\nclass Plot_TL_NR_Input(QDialog):\n def __init__(self, project, opv, analysisMethod, frequencies, solution, *args, **kwargs):\n super().__init__(*args, **kwargs)\n uic.loadUi('data/user_input/ui/Plots/Results/Acoustic/plot_TL_NR_Input.ui', self)\n\n icons_path = 'data\\\\icons\\\\'\n self.icon = QIcon(icons_path + 'pulse.png')\n self.setWindowIcon(self.icon)\n\n self.setWindowFlags(Qt.WindowStaysOnTopHint)\n self.setWindowModality(Qt.WindowModal)\n\n self.opv = opv\n self.opv.setInputObject(self)\n\n self.projec = project\n self.preprocessor = project.preprocessor\n self.before_run = self.preprocessor.get_model_checks()\n\n self.elements = self.preprocessor.acoustic_elements\n self.dict_elements_diameter = self.preprocessor.neighbor_elements_diameter()\n self.nodes = project.preprocessor.nodes\n \n self.userPath = os.path.expanduser('~')\n self.path = \"\"\n self.save_path = \"\"\n \n self.analysisMethod = analysisMethod\n self.frequencies = frequencies\n self.solution = solution\n\n self.mag = False\n self.real = False\n self.imag = False\n self.flagTL = False\n self.flagNR = False\n self.input_node_ID = None\n self.output_node_ID = None\n self.imported_data = None\n \n self.lineEdit_inputNodeID = self.findChild(QLineEdit, 'lineEdit_inputNodeID') \n self.lineEdit_outputNodeID = self.findChild(QLineEdit, 'lineEdit_outputNodeID')\n self.lineEdit_FileName = self.findChild(QLineEdit, 'lineEdit_FileName')\n self.lineEdit_ImportResultsPath = self.findChild(QLineEdit, 'lineEdit_ImportResultsPath')\n self.lineEdit_SaveResultsPath = self.findChild(QLineEdit, 'lineEdit_SaveResultsPath')\n\n self.toolButton_ChooseFolderImport = self.findChild(QToolButton, 'toolButton_ChooseFolderImport')\n self.toolButton_ChooseFolderImport.clicked.connect(self.choose_path_import_results)\n self.toolButton_ChooseFolderExport = self.findChild(QToolButton, 'toolButton_ChooseFolderExport')\n self.toolButton_ChooseFolderExport.clicked.connect(self.choose_path_export_results)\n self.toolButton_ExportResults = self.findChild(QToolButton, 'toolButton_ExportResults')\n self.toolButton_ExportResults.clicked.connect(self.ExportResults)\n self.toolButton_ResetPlot = self.findChild(QToolButton, 'toolButton_ResetPlot')\n self.toolButton_ResetPlot.clicked.connect(self.reset_imported_data)\n\n self.tabWidget_plot_results = self.findChild(QTabWidget, \"tabWidget_plot_results\")\n self.tab_plot = self.tabWidget_plot_results.findChild(QWidget, \"tab_plot\")\n\n self.radioButton_TL = self.findChild(QRadioButton, 'radioButton_TL')\n self.radioButton_NR = self.findChild(QRadioButton, 'radioButton_NR')\n self.radioButton_TL.toggled.connect(self.radioButtonEvent_TL_NR)\n self.radioButton_NR.toggled.connect(self.radioButtonEvent_TL_NR)\n self.flagTL = self.radioButton_TL.isChecked()\n self.flagNR = self.radioButton_NR.isChecked()\n\n self.checkBox_cursor = self.findChild(QCheckBox, 'checkBox_cursor')\n self.cursor = self.checkBox_cursor.isChecked()\n self.checkBox_cursor.clicked.connect(self.update_cursor)\n\n self.pushButton_AddImportedPlot = self.findChild(QPushButton, 'pushButton_AddImportedPlot')\n self.pushButton_AddImportedPlot.clicked.connect(self.ImportResults)\n self.lineEdit_skiprows = self.findChild(QSpinBox, 'spinBox')\n\n self.pushButton = self.findChild(QPushButton, 'pushButton')\n self.pushButton.clicked.connect(self.check)\n\n self.pushButton_flipNodes = self.findChild(QPushButton, 'pushButton_flipNodes')\n self.pushButton_flipNodes.clicked.connect(self.flip_nodes)\n\n self.writeNodes(self.opv.getListPickedPoints())\n self.exec_()\n\n def keyPressEvent(self, event):\n if event.key() == Qt.Key_Enter or event.key() == Qt.Key_Return:\n self.check()\n elif event.key() == Qt.Key_Escape:\n self.close()\n\n def writeNodes(self, list_node_ids):\n if len(list_node_ids) == 2:\n self.lineEdit_inputNodeID.setText(str(list_node_ids[-2]))\n self.lineEdit_outputNodeID.setText(str(list_node_ids[-1]))\n elif len(list_node_ids) == 1:\n self.lineEdit_inputNodeID.setText(str(list_node_ids[-1]))\n self.lineEdit_outputNodeID.setText(\"\")\n\n def flip_nodes(self):\n temp_text_input = self.lineEdit_inputNodeID.text()\n temp_text_output = self.lineEdit_outputNodeID.text()\n self.lineEdit_inputNodeID.setText(temp_text_output)\n self.lineEdit_outputNodeID.setText(temp_text_input) \n\n def update(self):\n self.writeNodes(self.opv.getListPickedPoints())\n\n def update_cursor(self):\n self.cursor = self.checkBox_cursor.isChecked()\n\n def check_node(self, node_string):\n try:\n tokens = node_string.text().strip().split(',')\n try:\n tokens.remove('')\n except:\n pass\n node_typed = list(map(int, tokens))\n\n except Exception:\n title = \"INVALID NODE ID\"\n message = \"Wrong input for Node ID.\"\n PrintMessageInput([title, message, window_title1])\n return None, False\n\n if len(node_typed) == 1:\n try:\n self.preprocessor.nodes[node_typed[0]].external_index\n except:\n title = \"INVALID NODE ID\"\n message = \" The Node ID input values must be\\n greater than 1 and less than {}.\".format(len(self.nodes))\n PrintMessageInput([title, message, window_title1])\n return None, False\n\n elif len(node_typed) == 0:\n title = \"INVALID NODE ID\"\n message = \"Please, enter a valid Node ID.\"\n PrintMessageInput([title, message, window_title1])\n return None, False\n\n else:\n title = \"MULTIPLE NODE IDs\"\n message = \"Please, type or select only one Node ID.\"\n PrintMessageInput([title, message, window_title1])\n return None, False\n\n return node_typed[0], True\n\n def reset_imported_data(self):\n self.imported_data = None\n title = \"Information\"\n message = \"The plot data has been reseted.\"\n PrintMessageInput([title, message, window_title2])\n\n def radioButtonEvent_TL_NR(self):\n self.flagTL = self.radioButton_TL.isChecked()\n self.flagNR = self.radioButton_NR.isChecked()\n\n def check(self, export=False):\n\n lineEdit_input = self.lineEdit_inputNodeID.text()\n stop, self.input_node_ID = self.before_run.check_input_NodeID(lineEdit_input, single_ID=True)\n if stop:\n return\n\n lineEdit_output = self.lineEdit_outputNodeID.text()\n stop, self.output_node_ID = self.before_run.check_input_NodeID(lineEdit_output, single_ID=True)\n if stop:\n return\n\n if export:\n return\n else:\n self.plot()\n\n def choose_path_import_results(self):\n self.import_path, _ = QFileDialog.getOpenFileName(None, 'Open file', self.userPath, 'Files (*.dat; *.csv)')\n self.import_name = basename(self.import_path)\n self.lineEdit_ImportResultsPath.setText(str(self.import_path))\n \n def ImportResults(self):\n try:\n skiprows = int(self.lineEdit_skiprows.text()) \n self.imported_data = np.loadtxt(self.import_path, delimiter=\",\", skiprows=skiprows)\n self.legend_imported = \"imported data: \"+ basename(self.import_path).split(\".\")[0]\n self.tabWidget_plot_results.setCurrentWidget(self.tab_plot)\n title = \"Information\"\n message = \"The results have been imported.\"\n PrintMessageInput([title, message, window_title2])\n except Exception as e:\n title = \"ERROR WHILE LOADING TABLE\"\n message = [str(e) + \" It is recommended to skip the header rows.\"] \n PrintMessageInput([title, message[0], window_title1])\n return\n\n def choose_path_export_results(self):\n self.save_path = QFileDialog.getExistingDirectory(None, 'Choose a folder to export the results', self.userPath)\n self.save_name = basename(self.save_path)\n self.lineEdit_SaveResultsPath.setText(str(self.save_path))\n \n def ExportResults(self):\n\n if self.lineEdit_FileName.text() != \"\":\n if self.save_path != \"\":\n self.export_path_folder = self.save_path + \"/\"\n else:\n title = \"None folder selected\"\n message = \"Plese, choose a folder before trying export the results.\"\n PrintMessageInput([title, message, window_title1])\n return\n else:\n title = \"Empty file name\"\n message = \"Inform a file name before trying export the results.\"\n PrintMessageInput([title, message, window_title1]) \n return\n \n self.check(export=True)\n data = self.get_TL_NR()\n\n if self.stop:\n return\n\n freq = self.frequencies\n\n check_name_TL = []\n check_name_NR = []\n for a in [\"NR\", \"Nr\", \"nr\", \"attenuation\", \"Attenuation\", \"ATTENUATION\"]:\n if a in self.lineEdit_FileName.text():\n check_name_TL.append(True)\n else:\n check_name_TL.append(False)\n\n for a in [\"TL\", \"Tl\",\"tl\", \"tranmission\", \"loss\", \"Transmission\", \"Loss\", \"TRANSMISSION\", \"LOSS\"]:\n if a in self.lineEdit_FileName.text():\n check_name_NR.append(True)\n else:\n check_name_NR.append(False)\n\n if self.flagTL:\n if True in check_name_TL:\n title = \"File name recheck\"\n message = \"Please, it's recommended to check the file name before export the results!\"\n PrintMessageInput([title, message, window_title2])\n return \n header = \"Frequency[Hz], TL - Magnitude [dB]\" \n else:\n if True in check_name_NR:\n title = \"File name recheck\"\n message = \"Please, it's recommended to check the file name before export the results!\"\n PrintMessageInput([title, message, window_title2])\n return\n header = \"Frequency[Hz], NR - Magnitude [dB]\"\n\n self.export_path = self.export_path_folder + self.lineEdit_FileName.text() + \".dat\"\n data_to_export = np.array([freq, data]).T \n np.savetxt(self.export_path, data_to_export, delimiter=\",\", header=header)\n \n title = \"Information\"\n message = \"The results have been exported.\"\n PrintMessageInput([title, message, window_title2])\n\n def get_minor_outer_diameter_from_node(self, node):\n data = self.dict_elements_diameter[node]\n inner_diameter = []\n density = []\n speed_of_sound = []\n for (index, _, int_dia) in data:\n inner_diameter.append(int_dia)\n density.append(self.elements[index].fluid.density)\n speed_of_sound.append(self.elements[index].speed_of_sound_corrected())\n ind = inner_diameter.index(min(inner_diameter))\n return inner_diameter[ind], density[ind], speed_of_sound[ind]\n\n def get_TL_NR(self):\n \n self.stop = False\n\n P_input = get_acoustic_frf(self.preprocessor, self.solution, self.input_node_ID)\n P_output = get_acoustic_frf(self.preprocessor, self.solution, self.output_node_ID)\n \n P_input2 = 0.5*np.real(P_input*np.conjugate(P_input))\n P_output2 = 0.5*np.real(P_output*np.conjugate(P_output))\n\n d_in, rho_in, c0_in = self.get_minor_outer_diameter_from_node(self.input_node_ID)\n d_out, rho_out, c0_out = self.get_minor_outer_diameter_from_node(self.output_node_ID)\n \n if 0 not in P_input2 and 0 not in P_output2:\n if self.flagTL:\n alpha_T = (P_output2*rho_out*c0_out)/(P_input2*rho_in*c0_in)\n TL = -10*np.log10(alpha_T)\n return TL\n \n if self.flagNR:\n delta = (P_output2*rho_out*c0_out*(d_out**2))/(P_input2*rho_in*c0_in*(d_in**2))\n NR = 10*np.log10(delta)\n return NR\n\n else:\n self.stop = True\n return None\n\n def plot(self):\n\n plt.close()\n fig = plt.figure(figsize=[12,7])\n ax = fig.add_subplot(1,1,1)\n\n frequencies = self.frequencies\n results = self.get_TL_NR()\n \n if self.stop:\n title = \"Invalid pressure values\"\n message = \"The input pressure must be different from zero value!\"\n PrintMessageInput([title, message, window_title1])\n return\n\n if self.flagTL:\n analysis_label = \"TRANSMISSION LOSS\"\n else:\n analysis_label = \"ATTENUATION\"\n \n # mng = plt.get_current_fig_manager()\n # mng.window.state('zoomed')\n\n #cursor = Cursor(ax)\n cursor = SnaptoCursor(ax, frequencies, results, self.cursor)\n plt.connect('motion_notify_event', cursor.mouse_move)\n unit_label = \"dB\"\n legend_label = \"Input Node ID: {} || Output Node ID: {}\".format(self.input_node_ID, self.output_node_ID)\n\n if self.imported_data is None:\n first_plot, = plt.plot(frequencies, results, color=[1,0,0], linewidth=2, label=legend_label)\n _legends = plt.legend(handles=[first_plot], labels=[legend_label], loc='upper right')\n else: \n first_plot, = plt.plot(frequencies, results, color=[1,0,0], linewidth=2)\n second_plot, = plt.plot(self.imported_data[:,0], self.imported_data[:,1], color=[0,0,1], linewidth=2, linestyle=\"--\")\n _legends = plt.legend(handles=[first_plot, second_plot], labels=[legend_label, self.legend_imported], loc='upper right')\n \n plt.gca().add_artist(_legends)\n\n ax.set_title(('FREQUENCY PLOT OF {}').format(analysis_label.upper()), fontsize = 18, fontweight = 'bold')\n ax.set_xlabel(('Frequency [Hz]'), fontsize = 14, fontweight = 'bold')\n if self.flagTL:\n ax.set_ylabel((\"Transmission Loss [{}]\").format(unit_label), fontsize = 14, fontweight = 'bold')\n elif self.flagNR:\n ax.set_ylabel((\"Attenuation [{}]\").format(unit_label), fontsize = 14, fontweight = 'bold')\n plt.show()", "from PyQt5.QtWidgets import QLineEdit, QDialog, QFileDialog, QWidget, QTreeWidget, QToolButton, QRadioButton, QMessageBox, QTreeWidgetItem, QTabWidget, QLabel, QCheckBox, QPushButton, QSpinBox\nfrom os.path import basename\nfrom PyQt5.QtGui import QIcon\nfrom PyQt5.QtGui import QColor, QBrush\nfrom PyQt5.QtCore import Qt\nfrom PyQt5 import uic\nimport configparser\nimport os\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom pulse.postprocessing.plot_structural_data import get_structural_frf\nfrom data.user_input.project.printMessageInput import PrintMessageInput\n\nwindow_title1 = \"ERROR MESSAGE\"\nwindow_title2 = \"WARNING MESSAGE\"\n\nclass SnaptoCursor(object):\n def __init__(self, ax, x, y, show_cursor):\n\n self.ax = ax\n self.x = x\n self.y = y\n self.show_cursor = show_cursor\n\n if show_cursor:\n \n self.vl = self.ax.axvline(x=np.min(x), ymin=np.min(y), color='k', alpha=0.3, label='_nolegend_') # the vertical line\n self.hl = self.ax.axhline(color='k', alpha=0.3, label='_nolegend_') # the horizontal line \n self.marker, = ax.plot(x[0], y[0], markersize=4, marker=\"s\", color=[0,0,0], zorder=3)\n # self.marker.set_label(\"x: %1.2f // y: %4.2e\" % (self.x[0], self.y[0]))\n # plt.legend(handles=[self.marker], loc='lower left', title=r'$\\bf{Cursor}$ $\\bf{coordinates:}$')\n\n def mouse_move(self, event):\n if self.show_cursor: \n\n if not event.inaxes: return\n x, y = event.xdata, event.ydata\n if x>=np.max(self.x): return\n\n indx = np.searchsorted(self.x, [x])[0]\n \n x = self.x[indx]\n y = self.y[indx]\n self.vl.set_xdata(x)\n self.hl.set_ydata(y)\n self.marker.set_data([x],[y])\n self.marker.set_label(\"x: %1.2f // y: %4.2e\" % (x, y))\n plt.legend(handles=[self.marker], loc='lower left', title=r'$\\bf{Cursor}$ $\\bf{coordinates:}$')\n \n self.ax.figure.canvas.draw_idle()\n\n\nclass PlotStructuralFrequencyResponseInput(QDialog):\n def __init__(self, project, opv, analysisMethod, frequencies, solution, *args, **kwargs):\n super().__init__(*args, **kwargs)\n uic.loadUi('data/user_input/ui/Plots/Results/Structural/plotStructuralFrequencyResponseInput.ui', self)\n\n icons_path = 'data\\\\icons\\\\'\n self.icon = QIcon(icons_path + 'pulse.png')\n self.setWindowIcon(self.icon)\n\n self.setWindowFlags(Qt.WindowStaysOnTopHint)\n self.setWindowModality(Qt.WindowModal)\n\n self.opv = opv\n self.opv.setInputObject(self)\n self.list_node_IDs = self.opv.getListPickedPoints()\n\n self.projec = project\n self.preprocessor = project.preprocessor\n self.before_run = self.preprocessor.get_model_checks()\n self.nodes = self.preprocessor.nodes\n \n self.analysisMethod = analysisMethod\n self.frequencies = frequencies\n self.solution = solution\n\n self.userPath = os.path.expanduser('~')\n self.save_path = \"\"\n self.node_ID = 0\n self.imported_data = None\n self.localDof = None\n\n self.lineEdit_nodeID = self.findChild(QLineEdit, 'lineEdit_nodeID')\n\n self.lineEdit_FileName = self.findChild(QLineEdit, 'lineEdit_FileName')\n self.lineEdit_ImportResultsPath = self.findChild(QLineEdit, 'lineEdit_ImportResultsPath')\n self.lineEdit_SaveResultsPath = self.findChild(QLineEdit, 'lineEdit_SaveResultsPath')\n\n self.toolButton_ChooseFolderImport = self.findChild(QToolButton, 'toolButton_ChooseFolderImport')\n self.toolButton_ChooseFolderImport.clicked.connect(self.choose_path_import_results)\n self.toolButton_ChooseFolderExport = self.findChild(QToolButton, 'toolButton_ChooseFolderExport')\n self.toolButton_ChooseFolderExport.clicked.connect(self.choose_path_export_results)\n self.toolButton_ExportResults = self.findChild(QToolButton, 'toolButton_ExportResults')\n self.toolButton_ExportResults.clicked.connect(self.ExportResults)\n self.toolButton_ResetPlot = self.findChild(QToolButton, 'toolButton_ResetPlot')\n self.toolButton_ResetPlot.clicked.connect(self.reset_imported_data)\n self.lineEdit_skiprows = self.findChild(QSpinBox, 'spinBox')\n\n self.checkBox_cursor = self.findChild(QCheckBox, 'checkBox_cursor')\n self.cursor = self.checkBox_cursor.isChecked()\n self.checkBox_cursor.clicked.connect(self.update_cursor)\n\n self.radioButton_ux = self.findChild(QRadioButton, 'radioButton_ux')\n self.radioButton_uy = self.findChild(QRadioButton, 'radioButton_uy')\n self.radioButton_uz = self.findChild(QRadioButton, 'radioButton_uz')\n self.radioButton_rx = self.findChild(QRadioButton, 'radioButton_rx')\n self.radioButton_ry = self.findChild(QRadioButton, 'radioButton_ry')\n self.radioButton_rz = self.findChild(QRadioButton, 'radioButton_rz')\n self.Ux = self.radioButton_ux.isChecked()\n self.Uy = self.radioButton_uy.isChecked()\n self.Uz = self.radioButton_uz.isChecked()\n self.Rx = self.radioButton_rx.isChecked()\n self.Ry = self.radioButton_ry.isChecked()\n self.Rz = self.radioButton_rz.isChecked()\n\n self.radioButton_plotAbs = self.findChild(QRadioButton, 'radioButton_plotAbs')\n self.radioButton_plotReal = self.findChild(QRadioButton, 'radioButton_plotReal')\n self.radioButton_plotImag = self.findChild(QRadioButton, 'radioButton_plotImag')\n self.radioButton_plotAbs.clicked.connect(self.radioButtonEvent_YAxis)\n self.radioButton_plotReal.clicked.connect(self.radioButtonEvent_YAxis)\n self.radioButton_plotImag.clicked.connect(self.radioButtonEvent_YAxis)\n self.plotAbs = self.radioButton_plotAbs.isChecked()\n self.plotReal = self.radioButton_plotReal.isChecked()\n self.plotImag = self.radioButton_plotImag.isChecked()\n\n self.radioButton_Absolute = self.findChild(QRadioButton, 'radioButton_Absolute')\n self.radioButton_Real_Imaginary = self.findChild(QRadioButton, 'radioButton_Real_Imaginary')\n self.radioButton_Absolute.clicked.connect(self.radioButtonEvent_save_data)\n self.radioButton_Real_Imaginary.clicked.connect(self.radioButtonEvent_save_data)\n self.save_Absolute = self.radioButton_Absolute.isChecked()\n self.save_Real_Imaginary = self.radioButton_Real_Imaginary.isChecked()\n\n self.radioButton_NoneDiff = self.findChild(QRadioButton, 'radioButton_NoneDiff')\n self.radioButton_SingleDiff = self.findChild(QRadioButton, 'radioButton_SingleDiff')\n self.radioButton_DoubleDiff = self.findChild(QRadioButton, 'radioButton_DoubleDiff')\n self.radioButton_NoneDiff.clicked.connect(self.radioButtonEvent_modify_spectrum)\n self.radioButton_SingleDiff.clicked.connect(self.radioButtonEvent_modify_spectrum)\n self.radioButton_DoubleDiff.clicked.connect(self.radioButtonEvent_modify_spectrum)\n self.NoneDiff = self.radioButton_NoneDiff.isChecked()\n self.SingleDiff = self.radioButton_SingleDiff.isChecked()\n self.DoubleDiff = self.radioButton_DoubleDiff.isChecked()\n\n self.tabWidget_plot_results = self.findChild(QTabWidget, \"tabWidget_plot_results\")\n self.tab_plot = self.tabWidget_plot_results.findChild(QWidget, \"tab_plot\")\n self.pushButton_AddImportedPlot = self.findChild(QPushButton, 'pushButton_AddImportedPlot')\n self.pushButton_AddImportedPlot.clicked.connect(self.ImportResults) \n self.pushButton = self.findChild(QPushButton, 'pushButton')\n self.pushButton.clicked.connect(self.check)\n\n self.writeNodes(self.list_node_IDs)\n self.exec_()\n\n def update_cursor(self):\n self.cursor = self.checkBox_cursor.isChecked()\n\n def reset_imported_data(self):\n self.imported_data = None\n title = \"Information\"\n message = \"The plot data has been reseted.\"\n PrintMessageInput([title, message, window_title2])\n \n def writeNodes(self, list_node_ids):\n text = \"\"\n for node in list_node_ids:\n text += \"{}, \".format(node)\n self.lineEdit_nodeID.setText(text)\n\n def update(self):\n self.list_node_IDs = self.opv.getListPickedPoints()\n if self.list_node_IDs != []:\n self.writeNodes(self.list_node_IDs)\n\n def keyPressEvent(self, event):\n if event.key() == Qt.Key_Enter or event.key() == Qt.Key_Return:\n self.check()\n elif event.key() == Qt.Key_Escape:\n self.close()\n\n def radioButtonEvent_YAxis(self):\n self.plotAbs = self.radioButton_plotAbs.isChecked()\n self.plotReal = self.radioButton_plotReal.isChecked()\n self.plotImag = self.radioButton_plotImag.isChecked()\n\n def radioButtonEvent_save_data(self):\n self.save_Absolute = self.radioButton_Absolute.isChecked()\n self.save_Real_Imaginary = self.radioButton_Real_Imaginary.isChecked()\n\n def radioButtonEvent_modify_spectrum(self):\n self.NoneDiff = self.radioButton_NoneDiff.isChecked()\n self.SingleDiff = self.radioButton_SingleDiff.isChecked()\n self.DoubleDiff = self.radioButton_DoubleDiff.isChecked()\n\n def choose_path_import_results(self):\n self.import_path, _ = QFileDialog.getOpenFileName(None, 'Open file', self.userPath, 'Files (*.dat; *.csv)')\n self.import_name = basename(self.import_path)\n self.lineEdit_ImportResultsPath.setText(str(self.import_path))\n \n def ImportResults(self):\n try:\n skiprows = int(self.lineEdit_skiprows.text())\n self.imported_data = np.loadtxt(self.import_path, delimiter=\",\", skiprows=skiprows)\n self.legend_imported = \"imported data: \"+ basename(self.import_path).split(\".\")[0]\n self.tabWidget_plot_results.setCurrentWidget(self.tab_plot)\n title = \"Information\"\n message = \"The results have been imported.\"\n PrintMessageInput([title, message, window_title2])\n except Exception as e:\n title = \"ERROR WHILE LOADING TABLE\"\n message = [str(e) + \" It is recommended to skip the header rows.\"] \n PrintMessageInput([title, message[0], window_title1])\n return\n\n def choose_path_export_results(self):\n self.save_path = QFileDialog.getExistingDirectory(None, 'Choose a folder to export the results', self.userPath)\n self.save_name = basename(self.save_path)\n self.lineEdit_SaveResultsPath.setText(str(self.save_path))\n\n def check(self, export=False):\n \n lineEdit_nodeID = self.lineEdit_nodeID.text()\n stop, self.node_ID = self.before_run.check_input_NodeID(lineEdit_nodeID, single_ID=True)\n if stop:\n return True\n\n self.localDof = None\n if self.SingleDiff:\n _unit_label = \"m/s\"\n elif self.DoubleDiff:\n _unit_label = \"m/s²\"\n else:\n _unit_label = \"m\" \n\n if self.radioButton_ux.isChecked():\n self.localDof = 0\n self.localdof_label = \"Ux\"\n self.unit_label = _unit_label\n\n if self.radioButton_uy.isChecked():\n self.localDof = 1\n self.localdof_label = \"Uy\"\n self.unit_label = _unit_label\n\n if self.radioButton_uz.isChecked():\n self.localDof = 2\n self.localdof_label = \"Uz\"\n self.unit_label = _unit_label\n \n if self.radioButton_rx.isChecked():\n self.localDof = 3\n self.localdof_label = \"Rx\"\n self.unit_label = _unit_label\n\n if self.radioButton_ry.isChecked():\n self.localDof = 4\n self.localdof_label = \"Ry\"\n self.unit_label = _unit_label\n\n if self.radioButton_rz.isChecked():\n self.localDof = 5\n self.localdof_label = \"Rz\"\n self.unit_label = _unit_label\n\n if self.SingleDiff:\n _unit_label = \"rad/s\"\n elif self.DoubleDiff:\n _unit_label = \"rad/s²\"\n else:\n _unit_label = \"rad\"\n\n if not export:\n self.plot()\n\n return False\n\n def ExportResults(self):\n \n if self.lineEdit_FileName.text() != \"\":\n if self.save_path != \"\":\n self.export_path_folder = self.save_path + \"/\"\n else:\n title = \"None folder selected\"\n message = \"Plese, choose a folder before trying export the results.\"\n PrintMessageInput([title, message, window_title1])\n return\n else:\n title = \"Empty file name\"\n message = \"Inform a file name before trying export the results.\"\n PrintMessageInput([title, message, window_title1])\n return\n \n if self.check(export=True):\n return\n\n freq = self.frequencies\n self.export_path = self.export_path_folder + self.lineEdit_FileName.text() + \".dat\"\n response = self.get_response()\n\n if self.save_Absolute:\n header = (\"Frequency[Hz], Real part [{}], Imaginary part [{}], Absolute [{}]\").format(self.unit_label, self.unit_label, self.unit_label)\n data_to_export = np.array([freq, np.real(response), np.imag(response), np.abs(response)]).T\n elif self.save_Real_Imaginary:\n header = (\"Frequency[Hz], Real part [{}], Imaginary part [{}]\").format(self.unit_label, self.unit_label)\n data_to_export = np.array([freq, np.real(response), np.imag(response)]).T \n \n np.savetxt(self.export_path, data_to_export, delimiter=\",\", header=header)\n title = \"Information\"\n message = \"The results have been exported.\"\n PrintMessageInput([title, message, window_title2])\n\n def get_response(self):\n response = get_structural_frf(self.preprocessor, self.solution, self.node_ID, self.localDof)\n if self.SingleDiff:\n output_data = response*(1j*2*np.pi)*self.frequencies\n elif self.DoubleDiff:\n output_data = response*((1j*2*np.pi*self.frequencies)**2)\n else:\n output_data = response\n return output_data\n\n def plot(self):\n\n fig = plt.figure(figsize=[12,7])\n ax = fig.add_subplot(1,1,1)\n\n frequencies = self.frequencies\n response = self.get_response()\n\n if self.imported_data is not None:\n data = self.imported_data\n imported_Xvalues = data[:,0]\n\n if self.plotAbs:\n imported_Yvalues = np.abs(data[:,1] + 1j*data[:,2]) \n elif self.plotReal:\n imported_Yvalues = data[:,1]\n elif self.plotImag:\n imported_Yvalues = data[:,2]\n\n if self.plotAbs:\n response = np.abs(response)\n ax.set_ylabel((\"Structural Response - Absolute [{}]\").format(self.unit_label), fontsize = 14, fontweight = 'bold')\n if not float(0) in response:\n if self.imported_data is None:\n ax.set_yscale('log', nonposy='clip')\n else:\n if not float(0) in imported_Yvalues:\n ax.set_yscale('log', nonposy='clip')\n elif self.plotReal:\n response = np.real(response)\n ax.set_ylabel((\"Structural Response - Real [{}]\").format(self.unit_label), fontsize = 14, fontweight = 'bold')\n elif self.plotImag:\n response = np.imag(response)\n ax.set_ylabel((\"Structural Response - Imaginary [{}]\").format(self.unit_label), fontsize = 14, fontweight = 'bold')\n \n #cursor = Cursor(ax)\n cursor = SnaptoCursor(ax, frequencies, response, self.cursor)\n plt.connect('motion_notify_event', cursor.mouse_move)\n\n legend_label = \"Response {} at node {}\".format(self.localdof_label, self.node_ID)\n\n if self.imported_data is None:\n\n if float(0) in response or self.plotReal or self.plotImag:\n if float(0) in response[1:] or self.plotReal or self.plotImag:\n first_plot, = plt.plot(frequencies, response, color=[1,0,0], linewidth=2, label=legend_label)\n else:\n first_plot, = plt.semilogy(frequencies[1:], response[1:], color=[1,0,0], linewidth=2, label=legend_label)\n else: \n first_plot, = plt.semilogy(frequencies, response, color=[1,0,0], linewidth=2, label=legend_label)\n _legends = plt.legend(handles=[first_plot], labels=[legend_label], loc='upper right')\n else:\n if float(0) in response or float(0) in imported_Yvalues or self.plotReal or self.plotImag:\n if float(0) in response[1:] or float(0) in imported_Yvalues[1:] or self.plotReal or self.plotImag:\n first_plot, = plt.plot(frequencies, response, color=[1,0,0], linewidth=2)\n second_plot, = plt.plot(imported_Xvalues, imported_Yvalues, color=[0,0,1], linewidth=1, linestyle=\"--\")\n else: \n first_plot, = plt.semilogy(frequencies[1:], response[1:], color=[1,0,0], linewidth=2, label=legend_label)\n second_plot, = plt.semilogy(imported_Xvalues[1:], imported_Yvalues[1:], color=[0,0,1], linewidth=1, linestyle=\"--\")\n else: \n first_plot, = plt.semilogy(frequencies, response, color=[1,0,0], linewidth=2, label=legend_label)\n second_plot, = plt.semilogy(imported_Xvalues, imported_Yvalues, color=[0,0,1], linewidth=1, linestyle=\"--\")\n _legends = plt.legend(handles=[first_plot, second_plot], labels=[legend_label, self.legend_imported], loc='upper right')\n\n plt.gca().add_artist(_legends)\n\n ax.set_title(('STRUCTURAL FREQUENCY RESPONSE - {}').format(self.analysisMethod.upper()), fontsize = 16, fontweight = 'bold')\n ax.set_xlabel(('Frequency [Hz]'), fontsize = 14, fontweight = 'bold')\n\n plt.show()" ]
[ [ "numpy.imag", "numpy.real", "numpy.savetxt", "numpy.array", "numpy.loadtxt" ], [ "matplotlib.pyplot.legend", "matplotlib.pyplot.gca", "matplotlib.pyplot.connect", "numpy.conjugate", "matplotlib.pyplot.plot", "numpy.max", "numpy.log10", "matplotlib.pyplot.close", "numpy.searchsorted", "numpy.savetxt", "numpy.array", "matplotlib.pyplot.show", "numpy.loadtxt", "matplotlib.pyplot.figure" ], [ "matplotlib.pyplot.legend", "matplotlib.pyplot.semilogy", "matplotlib.pyplot.gca", "matplotlib.pyplot.connect", "numpy.abs", "numpy.imag", "numpy.min", "matplotlib.pyplot.plot", "numpy.max", "numpy.real", "numpy.searchsorted", "numpy.savetxt", "matplotlib.pyplot.show", "numpy.loadtxt", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
MaraniMatias/machine-learning
[ "5346a60d0a16942a889d67b4c313b9332eb8f50a" ]
[ "tensorflow/mandelbrot-set.py" ]
[ "# Import libraries for simulation\nimport tensorflow as tf\nimport numpy as np\n\n# Imports for visualization\nimport PIL.Image\nfrom io import BytesIO\nfrom IPython.display import Image, display\n\n\ndef DisplayFractal(a, fmt='jpeg'):\n \"\"\"Display an array of iteration counts as a\n colorful picture of a fractal.\"\"\"\n a_cyclic = (6.28*a/20.0).reshape(list(a.shape)+[1])\n img = np.concatenate([10+20*np.cos(a_cyclic),\n 30+50*np.sin(a_cyclic),\n 155-80*np.cos(a_cyclic)], 2)\n img[a == a.max()] = 0\n a = img\n a = np.uint8(np.clip(a, 0, 255))\n f = BytesIO()\n PIL.Image.fromarray(a).save(f, fmt)\n display(Image(data=f.getvalue()))\n\n\n#\n# Session and Variable Initialization\n#\nsess = tf.InteractiveSession()\n\n\n# Use NumPy to create a 2D array of complex numbers\nY, X = np.mgrid[-1.3:1.3:0.005, -2:1:0.005]\nZ = X+1j*Y\n\nxs = tf.constant(Z.astype(np.complex64))\nzs = tf.Variable(xs)\nns = tf.Variable(tf.zeros_like(xs, tf.float32))\n\ntf.global_variables_initializer().run()\n\n# Compute the new values of z: z^2 + x\nzs_ = zs*zs + xs\n\n# Have we diverged with this new value?\nnot_diverged = tf.abs(zs_) < 4\n\n# Operation to update the zs and the iteration count.\n#\n# Note: We keep computing zs after they diverge! This\n# is very wasteful! There are better, if a little\n# less simple, ways to do this.\n#\nstep = tf.group(\n zs.assign(zs_),\n ns.assign_add(tf.cast(not_diverged, tf.float32))\n)\n\nfor i in range(200):\n step.run()\n\nDisplayFractal(ns.eval())\n" ]
[ [ "tensorflow.InteractiveSession", "tensorflow.Variable", "numpy.clip", "tensorflow.cast", "numpy.cos", "numpy.sin", "tensorflow.zeros_like", "tensorflow.global_variables_initializer", "tensorflow.abs" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.4", "1.5", "1.7", "0.12", "1.0", "1.2" ] } ]
pedrocwb/ms2deepscore
[ "fb6b131d9b7a80a98185fca49cddca5b162dc0cc" ]
[ "ms2deepscore/data_generators.py" ]
[ "\"\"\" Data generators for training/inference with siamese Keras model.\n\"\"\"\nimport warnings\nfrom typing import List, Iterator, NamedTuple\n\nimport numpy as np\nimport pandas as pd\nfrom tensorflow.keras.utils import Sequence\n\nfrom .typing import BinnedSpectrumType\n\n\nclass SpectrumPair(NamedTuple):\n \"\"\"\n Represents a pair of binned spectrums\n \"\"\"\n spectrum1: BinnedSpectrumType\n spectrum2: BinnedSpectrumType\n\n\nclass DataGeneratorBase(Sequence):\n def __init__(self, binned_spectrums: List[BinnedSpectrumType],\n reference_scores_df: pd.DataFrame, dim: int, **settings):\n \"\"\"Base for data generator generating data for a siamese model.\n\n Parameters\n ----------\n binned_spectrums\n List of BinnedSpectrum objects with the binned peak positions and intensities.\n reference_scores_df\n Pandas DataFrame with reference similarity scores (=labels) for compounds identified\n by inchikeys (first 14 characters). Columns and index should be inchikeys, the value\n in a row x column depicting the similarity score for that pair. Must be symmetric\n (reference_scores_df[i,j] == reference_scores_df[j,i]) and column names should be\n identical to the index.\n dim\n Input vector dimension.\n\n As part of **settings, defaults for the following parameters can be set:\n batch_size\n Number of pairs per batch. Default=32.\n num_turns\n Number of pairs for each InChiKey14 during each epoch. Default=1.\n shuffle\n Set to True to shuffle IDs every epoch. Default=True\n ignore_equal_pairs\n Set to True to ignore pairs of two identical spectra. Default=True\n same_prob_bins\n List of tuples that define ranges of the true label to be trained with\n equal frequencies. Default is set to [(0, 0.5), (0.5, 1)], which means\n that pairs with scores <=0.5 will be picked as often as pairs with scores\n > 0.5.\n augment_removal_max\n Maximum fraction of peaks (if intensity < below augment_removal_intensity)\n to be removed randomly. Default is set to 0.2, which means that between\n 0 and 20% of all peaks with intensities < augment_removal_intensity\n will be removed.\n augment_removal_intensity\n Specifying that only peaks with intensities < max_intensity will be removed.\n augment_intensity\n Change peak intensities by a random number between 0 and augment_intensity.\n Default=0.1, which means that intensities are multiplied by 1+- a random\n number within [0, 0.1].\n augment_noise_max\n Max number of 'new' noise peaks to add to the spectrum, between 0 to `augment_noise_max`\n of peaks are added.\n augment_noise_intensity\n Intensity of the 'new' noise peaks to add to the spectrum\n use_fixed_set\n Toggles using a fixed dataset, if set to True the same dataset will be generated each\n epoch. Default is False.\n \"\"\"\n\n self._validate_labels(reference_scores_df)\n\n # Set all other settings to input (or otherwise to defaults):\n self._set_generator_parameters(**settings)\n self.binned_spectrums = binned_spectrums\n self.reference_scores_df = self._exclude_nans_from_labels(reference_scores_df)\n self.reference_scores_df = self._transform_to_inchikey14(self.reference_scores_df)\n self._collect_and_validate_inchikeys()\n self.dim = dim\n self.fixed_set = dict()\n\n def _collect_and_validate_inchikeys(self):\n \"\"\"Collect all inchikeys14 (first 14 characters) of all binned_spectrums\n and check if all are present in the reference scores as well.\n \"\"\"\n self.spectrum_inchikeys = np.array([s.get(\"inchikey\")[:14] for s in self.binned_spectrums])\n for inchikey in np.unique(self.spectrum_inchikeys):\n assert inchikey in self.reference_scores_df.index, \\\n \"InChIKey in given spectrum not found in reference scores\"\n\n @staticmethod\n def _validate_labels(reference_scores_df: pd.DataFrame):\n if set(reference_scores_df.index) != set(reference_scores_df.columns):\n raise ValueError(\"index and columns of reference_scores_df are not identical\")\n\n @staticmethod\n def _transform_to_inchikey14(reference_scores_df: pd.DataFrame):\n \"\"\"Transform index and column names from potential full InChIKeys to InChIKey14\"\"\"\n reference_scores_df.index = [x[:14] for x in reference_scores_df.index]\n reference_scores_df.columns = [x[:14] for x in reference_scores_df.columns]\n return reference_scores_df\n\n @staticmethod\n def _exclude_nans_from_labels(reference_scores_df: pd.DataFrame):\n \"\"\"Exclude nans in reference_scores_df, exclude columns and rows if there is any NaN\n value\"\"\"\n clean_df = reference_scores_df.dropna(axis='rows') # drop rows with any NaN\n clean_df = clean_df[clean_df.index] # drop corresponding columns\n n_dropped = len(reference_scores_df) - len(clean_df)\n if n_dropped > 0:\n print(f\"{n_dropped} nans among {len(reference_scores_df)} labels will be excluded.\")\n return clean_df\n\n def _set_generator_parameters(self, **settings):\n \"\"\"Set parameter for data generator. Use below listed defaults unless other\n input is provided.\n\n Parameters\n ----------\n batch_size\n Number of pairs per batch. Default=32.\n num_turns\n Number of pairs for each InChiKey14 during each epoch. Default=1\n shuffle\n Set to True to shuffle IDs every epoch. Default=True\n ignore_equal_pairs\n Set to True to ignore pairs of two identical spectra. Default=True\n same_prob_bins\n List of tuples that define ranges of the true label to be trained with\n equal frequencies. Default is set to [(0, 0.5), (0.5, 1)], which means\n that pairs with scores <=0.5 will be picked as often as pairs with scores\n > 0.5.\n augment_removal_max\n Maximum fraction of peaks (if intensity < below augment_removal_intensity)\n to be removed randomly. Default is set to 0.2, which means that between\n 0 and 20% of all peaks with intensities < augment_removal_intensity\n will be removed.\n augment_removal_intensity\n Specifying that only peaks with intensities < max_intensity will be removed.\n augment_intensity\n Change peak intensities by a random number between 0 and augment_intensity.\n Default=0.1, which means that intensities are multiplied by 1+- a random\n number within [0, 0.1].\n augment_noise_max\n Max number of 'new' noise peaks to add to the spectrum, between 0 to `augment_noise_max`\n of peaks are added.\n augment_noise_intensity\n Intensity of the 'new' noise peaks to add to the spectrum\n use_fixed_set\n Toggles using a fixed dataset, if set to True the same dataset will be generated each\n epoch. Default is False.\n \"\"\"\n defaults = dict(\n batch_size=32,\n num_turns=1,\n ignore_equal_pairs=True,\n shuffle=True,\n same_prob_bins=[(0, 0.5), (0.5, 1)],\n augment_removal_max= 0.3,\n augment_removal_intensity=0.2,\n augment_intensity=0.4,\n augment_noise_max=10,\n augment_noise_intensity=0.01,\n use_fixed_set=False\n )\n\n # Set default parameters or replace by **settings input\n for key in defaults:\n if key in settings:\n print(\"The value for {} is set from {} (default) to {}\".format(key, defaults[key],\n settings[key]))\n else:\n settings[key] = defaults[key]\n assert 0.0 <= settings[\"augment_removal_max\"] <= 1.0, \"Expected value within [0,1]\"\n assert 0.0 <= settings[\"augment_removal_intensity\"] <= 1.0, \"Expected value within [0,1]\"\n if settings[\"use_fixed_set\"] and settings[\"shuffle\"]:\n warnings.warn('When using a fixed set, data will not be shuffled')\n if settings[\"use_fixed_set\"]:\n np.random.seed(42)\n self.settings = settings\n\n def _find_match_in_range(self, inchikey1, target_score_range):\n \"\"\"Randomly pick ID for a pair with inchikey_id1 that has a score in\n target_score_range. When no such score exists, iteratively widen the range\n in steps of 0.1.\n\n Parameters\n ----------\n inchikey1\n Inchikey (first 14 characters) to be paired up with another compound within\n target_score_range.\n target_score_range\n lower and upper bound of label (score) to find an ID of.\n \"\"\"\n # Part 1 - find match within range (or expand range iteratively)\n extend_range = 0\n low, high = target_score_range\n inchikey2 = None\n while inchikey2 is None:\n matching_inchikeys = self.reference_scores_df.index[\n (self.reference_scores_df[inchikey1] > low - extend_range)\n & (self.reference_scores_df[inchikey1] <= high + extend_range)]\n if self.settings[\"ignore_equal_pairs\"]:\n matching_inchikeys = matching_inchikeys[matching_inchikeys != inchikey1]\n if len(matching_inchikeys) > 0:\n inchikey2 = np.random.choice(matching_inchikeys)\n extend_range += 0.1\n return inchikey2\n\n def __getitem__(self, batch_index: int):\n \"\"\"Generate one batch of data.\n\n If use_fixed_set=True we try retrieving the batch from self.fixed_set (or store it if\n this is the first epoch). This ensures a fixed set of data is generated each epoch.\n \"\"\"\n if self.settings['use_fixed_set'] and batch_index in self.fixed_set:\n return self.fixed_set[batch_index]\n if self.settings['use_fixed_set'] and batch_index == 0:\n np.random.seed(42)\n spectrum_pairs = self._spectrum_pair_generator(batch_index)\n X, y = self.__data_generation(spectrum_pairs)\n if self.settings['use_fixed_set']:\n self.fixed_set[batch_index] = (X, y)\n return X, y\n\n def _data_augmentation(self, spectrum_binned):\n \"\"\"Data augmentation.\n Parameters\n ----------\n spectrum_binned\n Dictionary with the binned peak positions and intensities.\n \"\"\"\n idx = np.array([int(x) for x in spectrum_binned.keys()])\n values = np.array(list(spectrum_binned.values()))\n # Augmentation 1: peak removal (peaks < augment_removal_max)\n if self.settings[\"augment_removal_max\"] or self.settings[\"augment_removal_intensity\"]:\n # TODO: Factor out function with documentation + example?\n indices_select = np.where(values < self.settings[\"augment_removal_max\"])[0]\n removal_part = np.random.random(1) * self.settings[\"augment_removal_max\"]\n indices_select = np.random.choice(indices_select,\n int(np.ceil((1 - removal_part)*len(indices_select))))\n indices = np.concatenate((indices_select,\n np.where(values >= self.settings[\"augment_removal_intensity\"])[0]))\n if len(indices) > 0:\n idx = idx[indices]\n values = values[indices]\n # Augmentation 2: Change peak intensities\n if self.settings[\"augment_intensity\"]:\n # TODO: Factor out function with documentation + example?\n values = (1 - self.settings[\"augment_intensity\"] * 2 * (np.random.random(values.shape) - 0.5)) * values\n # Augmentation 3: Peak addition\n if self.settings[\"augment_noise_max\"] and self.settings[\"augment_noise_max\"] > 0:\n idx, values = self._peak_addition(idx, values)\n return idx, values\n\n def _peak_addition(self, idx, values):\n \"\"\"\n For each of between 0-augment_noise_max randomly selected zero-intensity bins\n that bin’s intensity is set to random values between 0 and augment_noise_intensity\n \"\"\"\n n_noise_peaks = np.random.randint(0, self.settings[\"augment_noise_max\"])\n idx_no_peaks = np.setdiff1d(np.arange(0, self.dim), idx)\n idx_noise_peaks = np.random.choice(idx_no_peaks, n_noise_peaks)\n idx = np.concatenate((idx, idx_noise_peaks))\n new_values = self.settings[\"augment_noise_intensity\"] * np.random.random(len(idx_noise_peaks))\n values = np.concatenate((values, new_values))\n return idx, values\n\n def _get_spectrum_with_inchikey(self, inchikey: str) -> BinnedSpectrumType:\n \"\"\"\n Get a random spectrum matching the `inchikey` argument. NB: A compound (identified by an\n inchikey) can have multiple measured spectrums in a binned spectrum dataset.\n \"\"\"\n matching_spectrum_id = np.where(self.spectrum_inchikeys == inchikey)[0]\n return self.binned_spectrums[np.random.choice(matching_spectrum_id)]\n\n def __data_generation(self, spectrum_pairs: Iterator[SpectrumPair]):\n \"\"\"Generates data containing batch_size samples\"\"\"\n # Initialization\n X = [np.zeros((self.settings[\"batch_size\"], self.dim)) for i in range(2)]\n y = np.zeros((self.settings[\"batch_size\"],))\n\n # Generate data\n for i_pair, pair in enumerate(spectrum_pairs):\n for i_spectrum, spectrum in enumerate(pair):\n idx, values = self._data_augmentation(spectrum.binned_peaks)\n X[i_spectrum][i_pair, idx] = values\n y[i_pair] = self.reference_scores_df[pair[0].get(\"inchikey\")[:14]][pair[1].get(\"inchikey\")[:14]]\n\n return X, y\n\n def _spectrum_pair_generator(self, batch_index: int) -> Iterator[SpectrumPair]:\n \"\"\"\n Generator of spectrum pairs within a batch, inheriting classes should implement this.\n \"\"\"\n raise NotImplementedError()\n\n\nclass DataGeneratorAllSpectrums(DataGeneratorBase):\n \"\"\"Generates data for training a siamese Keras model\n This generator will provide training data by picking each training spectrum\n in binned_spectrums num_turns times in every epoch and pairing it with a randomly chosen\n other spectrum that corresponds to a reference score as defined in same_prob_bins.\n \"\"\"\n def __init__(self, binned_spectrums: List[BinnedSpectrumType],\n reference_scores_df: pd.DataFrame, dim: int, **settings):\n \"\"\"Generates data for training a siamese Keras model.\n Parameters\n ----------\n binned_spectrums\n List of BinnedSpectrum objects with the binned peak positions and intensities.\n reference_scores_df\n Pandas DataFrame with reference similarity scores (=labels) for compounds identified\n by inchikeys. Columns and index should be inchikeys, the value in a row x column\n depicting the similarity score for that pair. Must be symmetric\n (reference_scores_df[i,j] == reference_scores_df[j,i]) and column names should be\n identical to the index.\n dim\n Input vector dimension.\n As part of **settings, defaults for the following parameters can be set:\n batch_size\n Number of pairs per batch. Default=32.\n num_turns\n Number of pairs for each InChiKey during each epoch. Default=1.\n shuffle\n Set to True to shuffle IDs every epoch. Default=True\n ignore_equal_pairs\n Set to True to ignore pairs of two identical spectra. Default=True\n same_prob_bins\n List of tuples that define ranges of the true label to be trained with\n equal frequencies. Default is set to [(0, 0.5), (0.5, 1)], which means\n that pairs with scores <=0.5 will be picked as often as pairs with scores\n > 0.5.\n augment_removal_max\n Maximum fraction of peaks (if intensity < below augment_removal_intensity)\n to be removed randomly. Default is set to 0.2, which means that between\n 0 and 20% of all peaks with intensities < augment_removal_intensity\n will be removed.\n augment_removal_intensity\n Specifying that only peaks with intensities < max_intensity will be removed.\n augment_intensity\n Change peak intensities by a random number between 0 and augment_intensity.\n Default=0.1, which means that intensities are multiplied by 1+- a random\n number within [0, 0.1].\n \"\"\"\n super().__init__(binned_spectrums, reference_scores_df, dim, **settings)\n self.reference_scores_df = self._exclude_not_selected_inchikeys(self.reference_scores_df)\n self.on_epoch_end()\n\n def __len__(self):\n \"\"\"Denotes the number of batches per epoch\"\"\"\n # TODO: this means we don't see all data every epoch, because the last half-empty batch\n # is omitted. I guess that is expected behavior? --> Yes, with the shuffling in each epoch that seem OK to me (and makes the code easier).\n return int(self.settings[\"num_turns\"]) * int(np.floor(len(self.binned_spectrums) / self.settings[\"batch_size\"]))\n\n def _spectrum_pair_generator(self, batch_index: int) -> Iterator[SpectrumPair]:\n \"\"\"\n Generate spectrum pairs for batch. For each 'source' spectrum, get the inchikey and\n find an inchikey in the desired target score range. Then randomly get a spectrums for\n the maching inchikey.\n \"\"\"\n same_prob_bins = self.settings[\"same_prob_bins\"]\n batch_size = self.settings[\"batch_size\"]\n indexes = self.indexes[batch_index*batch_size:(batch_index+1)*batch_size]\n for index in indexes:\n spectrum1 = self.binned_spectrums[index]\n inchikey1 = spectrum1.get(\"inchikey\")[:14]\n\n # Randomly pick the desired target score range and pick matching ID\n target_score_range = same_prob_bins[np.random.choice(np.arange(len(same_prob_bins)))]\n inchikey2 = self._find_match_in_range(inchikey1, target_score_range)\n spectrum2 = self._get_spectrum_with_inchikey(inchikey2)\n yield SpectrumPair(spectrum1, spectrum2)\n\n def on_epoch_end(self):\n \"\"\"Updates indexes after each epoch\"\"\"\n self.indexes = np.tile(np.arange(len(self.binned_spectrums)), int(self.settings[\"num_turns\"]))\n if self.settings[\"shuffle\"]:\n np.random.shuffle(self.indexes)\n\n @staticmethod\n def _exclude_nans_from_labels(reference_scores_df: pd.DataFrame):\n \"\"\"Exclude nans in reference_scores_df, exclude columns and rows if there is any NaN\n value\"\"\"\n clean_df = reference_scores_df.dropna(axis='rows') # drop rows with any NaN\n clean_df = clean_df[clean_df.index] # drop corresponding columns\n n_dropped = len(reference_scores_df) - len(clean_df)\n if n_dropped > 0:\n print(f\"{n_dropped} nans among {len(reference_scores_df)} labels will be excluded.\")\n return clean_df\n\n def _exclude_not_selected_inchikeys(self, reference_scores_df: pd.DataFrame) -> pd.DataFrame:\n \"\"\"Exclude rows and columns of reference_scores_df for all InChIKeys which are not\n present in the binned_spectrums.\"\"\"\n inchikeys_in_selection = {s.get(\"inchikey\")[:14] for s in self.binned_spectrums}\n clean_df = reference_scores_df.loc[reference_scores_df.index.isin(inchikeys_in_selection),\n reference_scores_df.columns.isin(inchikeys_in_selection)]\n n_dropped = len(self.reference_scores_df) - len(clean_df)\n if n_dropped > 0:\n print(f\"{len(clean_df)} out of {len(self.reference_scores_df)} InChIKeys found in selected spectrums.\")\n return clean_df\n\n\nclass DataGeneratorAllInchikeys(DataGeneratorBase):\n \"\"\"Generates data for training a siamese Keras model\n This generator will provide training data by picking each training InchiKey\n listed in *selected_inchikeys* num_turns times in every epoch. It will then randomly\n pick one the spectra corresponding to this InchiKey (if multiple) and pair it\n with a randomly chosen other spectrum that corresponds to a reference score\n as defined in same_prob_bins.\n \"\"\"\n def __init__(self, binned_spectrums: List[BinnedSpectrumType], selected_inchikeys: list,\n reference_scores_df: pd.DataFrame, dim: int, **settings):\n \"\"\"Generates data for training a siamese Keras model.\n Parameters\n ----------\n binned_spectrums\n List of BinnedSpectrum objects with the binned peak positions and intensities.\n reference_scores_df\n Pandas DataFrame with reference similarity scores (=labels) for compounds identified\n by inchikeys. Columns and index should be inchikeys, the value in a row x column\n depicting the similarity score for that pair. Must be symmetric\n (reference_scores_df[i,j] == reference_scores_df[j,i]) and column names should be identical to the index.\n selected_inchikeys\n List of inchikeys to use for training.\n dim\n Input vector dimension.\n As part of **settings, defaults for the following parameters can be set:\n batch_size\n Number of pairs per batch. Default=32.\n num_turns\n Number of pairs for each InChiKey during each epoch. Default=1\n shuffle\n Set to True to shuffle IDs every epoch. Default=True\n ignore_equal_pairs\n Set to True to ignore pairs of two identical spectra. Default=True\n same_prob_bins\n List of tuples that define ranges of the true label to be trained with\n equal frequencies. Default is set to [(0, 0.5), (0.5, 1)], which means\n that pairs with scores <=0.5 will be picked as often as pairs with scores\n > 0.5.\n augment_removal_max\n Maximum fraction of peaks (if intensity < below augment_removal_intensity)\n to be removed randomly. Default is set to 0.2, which means that between\n 0 and 20% of all peaks with intensities < augment_removal_intensity\n will be removed.\n augment_removal_intensity\n Specifying that only peaks with intensities < max_intensity will be removed.\n augment_intensity\n Change peak intensities by a random number between 0 and augment_intensity.\n Default=0.1, which means that intensities are multiplied by 1+- a random\n number within [0, 0.1].\n \"\"\"\n super().__init__(binned_spectrums, reference_scores_df, dim, **settings)\n self.reference_scores_df = self._data_selection(reference_scores_df, selected_inchikeys)\n self.on_epoch_end()\n\n def __len__(self):\n \"\"\"Denotes the number of batches per epoch\n NB1: self.reference_scores_df only contains 'selected' inchikeys, see `self._data_selection`.\n NB2: We don't see all data every epoch, because the last half-empty batch is omitted. \n This is expected behavior, with the shuffling this is OK.\n \"\"\"\n return int(self.settings[\"num_turns\"]) * int(np.floor(len(self.reference_scores_df) / self.settings[\n \"batch_size\"]))\n\n def _spectrum_pair_generator(self, batch_index: int) -> Iterator[SpectrumPair]:\n \"\"\"\n Generate spectrum pairs for batch. For each 'source' inchikey pick an inchikey in the\n desired target score range. Then randomly get spectrums for this pair of inchikeys.\n \"\"\"\n same_prob_bins = self.settings[\"same_prob_bins\"]\n batch_size = self.settings[\"batch_size\"]\n # Go through all indexes\n indexes = self.indexes[batch_index * batch_size:(batch_index + 1) * batch_size]\n\n for index in indexes:\n inchikey1 = self.reference_scores_df.index[index]\n # Randomly pick the desired target score range and pick matching inchikey\n target_score_range = same_prob_bins[np.random.choice(np.arange(len(same_prob_bins)))]\n inchikey2 = self._find_match_in_range(inchikey1, target_score_range)\n spectrum1 = self._get_spectrum_with_inchikey(inchikey1)\n spectrum2 = self._get_spectrum_with_inchikey(inchikey2)\n yield SpectrumPair(spectrum1, spectrum2)\n\n @staticmethod\n def _data_selection(reference_scores_df, selected_inchikeys):\n \"\"\"\n Select labeled data to generate from based on `selected_inchikeys`\n \"\"\"\n return reference_scores_df.loc[selected_inchikeys, selected_inchikeys]\n\n def on_epoch_end(self):\n \"\"\"Updates indexes after each epoch\"\"\"\n self.indexes = np.tile(np.arange(len(self.reference_scores_df)), int(self.settings[\"num_turns\"]))\n if self.settings[\"shuffle\"]:\n np.random.shuffle(self.indexes)\n" ]
[ [ "numpy.random.random", "numpy.random.seed", "numpy.random.choice", "numpy.unique", "numpy.arange", "numpy.random.shuffle", "numpy.concatenate", "numpy.zeros", "numpy.where", "numpy.random.randint" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
chsafouane/adaptnlp
[ "34bfb8fef32d1e59f89a94799db87ed6da774965" ]
[ "adaptnlp/inference/text_generation.py" ]
[ "# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/09_text_generation.ipynb (unless otherwise specified).\n\n__all__ = ['logger', 'TransformersTextGenerator', 'EasyTextGenerator']\n\n# Cell\nimport logging\nfrom typing import List, Dict, Union\nfrom collections import defaultdict\n\nimport torch\nfrom torch.utils.data import TensorDataset\n\nfrom transformers import (\n AutoTokenizer,\n AutoModelForCausalLM,\n PreTrainedTokenizer,\n PreTrainedModel,\n)\n\nfrom fastprogress.fastprogress import progress_bar\n\nfrom ..model import AdaptiveModel, DataLoader\nfrom ..model_hub import HFModelResult\n\nfrom fastai.torch_core import apply, default_device, to_device\n\n# Cell\nlogger = logging.getLogger(__name__)\n\n# Cell\nclass TransformersTextGenerator(AdaptiveModel):\n \"Adaptive model for Transformer's Language Models\"\n\n def __init__(\n self,\n tokenizer: PreTrainedTokenizer, # A tokenizer object from Huggingface's transformers (TODO)and tokenizers\n model: PreTrainedModel # A transformers Language model\n ):\n # Load up model and tokenizer\n self.tokenizer = tokenizer\n super().__init__()\n\n # Sets internal model\n self.set_model(model)\n\n # Setup cuda and automatic allocation of model\n self.device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n self.model.to(self.device)\n\n @classmethod\n def load(\n cls,\n model_name_or_path: str # A key string of one of Transformer's pre-trained Language Model\n ) -> AdaptiveModel:\n \"Class method for loading and constructing this Model\"\n tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, pad_token=\"<PAD>\")\n model = AutoModelForCausalLM.from_pretrained(model_name_or_path)\n generator = cls(tokenizer, model)\n return generator\n\n def predict(\n self,\n text: Union[List[str], str], # Sentences to run inference on\n mini_batch_size: int = 32, # Mini batch size\n num_tokens_to_produce: int = 50, # Number of tokens you want to generate\n ) -> List[str]: # A list of predicted sentences\n \"Predict method for running inference using the pre-trained sequence classifier model. Keyword arguments for parameters of the method `Transformers.PreTrainedModel.generate()` can be used as well.\"\n with torch.no_grad():\n\n # Make all inputs lists\n if isinstance(text, str):\n text = [text]\n\n dataset = self._tokenize(text)\n dataloader = DataLoader(dataset, batch_size=mini_batch_size)\n results = []\n\n logger.info(f'Running text generator on {len(dataset)} text sequences')\n logger.info(f'Batch size = {mini_batch_size}')\n for batch in progress_bar(dataloader):\n self.model.eval()\n batch = apply(to_device, batch)\n\n if len(batch) == 3:\n inputs = {\n 'input_ids': batch[0],\n 'attention_masks': batch[1],\n 'token_type_ids': batch[2],\n }\n else:\n inputs = {\n 'input_ids': batch[0],\n 'attention_masks': batch[1],\n }\n # model.generate() does not have batch inference implemented yet\n generated_text = self._batch_generate(\n inputs=inputs,\n seq_len=batch[0].shape[1],\n num_tokens_to_produce=num_tokens_to_produce,\n )\n results += generated_text\n\n return {\"generated_text\":results}\n\n def _tokenize(self, text: Union[List[str], str]) -> TensorDataset:\n \"\"\" Batch tokenizes text and produces a `TensorDataset` with text \"\"\"\n\n tokenized_text = self.tokenizer.batch_encode_plus(\n text,\n return_tensors=\"pt\",\n padding=\"longest\",\n )\n\n dataset = TensorDataset(\n tokenized_text[\"input_ids\"],\n tokenized_text[\"attention_mask\"],\n )\n\n return dataset\n\n def _batch_generate(\n self, inputs: Dict, seq_len: int, num_tokens_to_produce: int\n ) -> List[str]:\n \"\"\"Generates text data with varying text sizes\"\"\"\n input_ids = inputs[\"input_ids\"]\n attn_mask = inputs[\"attention_masks\"]\n\n pad_token_id = self.tokenizer.pad_token_id\n eos_token_id = self.tokenizer.eos_token_id\n eos_not_in_sents = torch.ones(input_ids.shape[0]).long().to(self.device)\n\n # we need to get the token ids of the last non-padded value\n last_non_masked_idx = torch.sum(attn_mask, dim=1) - 1\n start_idx = (\n (last_non_masked_idx)\n .view(-1, 1)\n .repeat(1, self.tokenizer.vocab_size)\n .unsqueeze(1)\n )\n\n # get correct position ids\n position_ids = torch.tensor(\n [list(range(seq_len)) for i in range(input_ids.shape[0])]\n ).to(self.device)\n for i, position_ids_slice in enumerate(position_ids):\n position_ids_slice[last_non_masked_idx[i] :] = position_ids_slice[\n last_non_masked_idx[i]\n ]\n\n for step in range(num_tokens_to_produce):\n outputs = self.model(\n input_ids, attention_mask=attn_mask, position_ids=position_ids\n )\n\n # in the first decoding step, we want to use the 'real' last position for each sentence\n if step == 0:\n next_token_logits = outputs[0].gather(1, start_idx).squeeze(1)\n else:\n next_token_logits = outputs[0][:, -1, :]\n\n next_tokens = torch.argmax(next_token_logits, dim=-1)\n\n # this updates which sentences have not seen an <EOS> token so far\n # if one <EOS> token was seen the sentence is finished\n eos_not_in_sents.mul_(next_tokens.ne(eos_token_id).long())\n\n # either append a padding token here if <EOS> has been seen or append next token\n tokens_to_add = next_tokens * (eos_not_in_sents) + pad_token_id * (\n 1 - eos_not_in_sents\n )\n\n # Update input_ids, attn_mask and position_ids\n input_ids = torch.cat([input_ids, tokens_to_add.unsqueeze(-1)], dim=-1)\n attn_mask = torch.cat(\n [attn_mask, torch.ones((attn_mask.shape[0], 1)).long().to(self.device)],\n dim=1,\n )\n position_ids = torch.cat(\n [position_ids, (position_ids[:, -1] + 1).unsqueeze(-1)], dim=1\n )\n\n return [\n self.tokenizer.decode(output, skip_special_tokens=True)\n for output in input_ids\n ]\n\n# Cell\nclass EasyTextGenerator:\n \"Text Generation Module\"\n\n def __init__(self):\n self.generators: Dict[AdaptiveModel] = defaultdict(bool)\n\n def generate(\n self,\n text: Union[List[str], str], # List of sentences to run inference on\n model_name_or_path: [str, HFModelResult] = \"gpt2\", # A model id or path to a pre-trained model repository or custom trained model directory\n mini_batch_size: int = 32, # Mini batch size\n num_tokens_to_produce: int = 50, # Number of tokens you want to generate\n ) -> List[str]: # A list of predicted sentences\n \"Predict method for running inference using the pre-trained sequence classifier model. Keyword arguments for parameters of the method `Transformers.PreTrainedModel.generate()` can be used as well.\"\n name = getattr(model_name_or_path, 'name', model_name_or_path)\n if not self.generators[name]:\n self.generators[name] = TransformersTextGenerator.load(\n name\n )\n\n generator = self.generators[name]\n return generator.predict(\n text=text,\n mini_batch_size=mini_batch_size,\n num_tokens_to_produce=num_tokens_to_produce\n )" ]
[ [ "torch.ones", "torch.utils.data.TensorDataset", "torch.sum", "torch.no_grad", "torch.cuda.is_available", "torch.argmax" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
QAQ-v/HeterGTransformer
[ "8f29ffa86a40b09261092726b87608661139eec0" ]
[ "onmt/utils/misc.py" ]
[ "# -*- coding: utf-8 -*-\r\n\r\nimport torch\r\nimport random\r\nimport inspect\r\nfrom itertools import islice\r\n\r\n\r\ndef split_corpus(path, shard_size):\r\n with open(path, \"rb\") as f:\r\n if shard_size <= 0:\r\n yield f.readlines()\r\n else:\r\n while True:\r\n shard = list(islice(f, shard_size))\r\n if not shard:\r\n break\r\n yield shard\r\n\r\n\r\ndef aeq(*args):\r\n \"\"\"\r\n Assert all arguments have the same value\r\n \"\"\"\r\n arguments = (arg for arg in args)\r\n first = next(arguments)\r\n assert all(arg == first for arg in arguments), \\\r\n \"Not all arguments have the same value: \" + str(args)\r\n\r\n\r\ndef sequence_mask(lengths, max_len=None):\r\n \"\"\"\r\n Creates a boolean mask from sequence lengths.\r\n \"\"\"\r\n # Returns the total number of elements in the input tensor.\r\n batch_size = lengths.numel()\r\n max_len = max_len or lengths.max()\r\n return (torch.arange(0, max_len, device=lengths.device)\r\n .type_as(lengths)\r\n .repeat(batch_size, 1)\r\n .lt(lengths.unsqueeze(1)))\r\n\r\n\r\ndef tile(x, count, dim=0):\r\n \"\"\"\r\n Tiles x on dimension dim count times.\r\n \"\"\"\r\n perm = list(range(len(x.size())))\r\n if dim != 0:\r\n perm[0], perm[dim] = perm[dim], perm[0]\r\n x = x.permute(perm).contiguous()\r\n out_size = list(x.size())\r\n out_size[0] *= count\r\n batch = x.size(0)\r\n x = x.view(batch, -1) \\\r\n .transpose(0, 1) \\\r\n .repeat(count, 1) \\\r\n .transpose(0, 1) \\\r\n .contiguous() \\\r\n .view(*out_size)\r\n if dim != 0:\r\n x = x.permute(perm).contiguous()\r\n return x\r\n\r\n\r\ndef use_gpu(opt):\r\n \"\"\"\r\n Creates a boolean if gpu used\r\n \"\"\"\r\n return (hasattr(opt, 'gpu_ranks') and len(opt.gpu_ranks) > 0) or \\\r\n (hasattr(opt, 'gpu') and opt.gpu > -1)\r\n\r\n\r\ndef set_random_seed(seed, is_cuda):\r\n \"\"\"Sets the random seed.\"\"\"\r\n if seed > 0:\r\n torch.manual_seed(seed)\r\n # this one is needed for torchtext random call (shuffled iterator)\r\n # in multi gpu it ensures datasets are read in the same order\r\n random.seed(seed)\r\n # some cudnn methods can be random even after fixing the seed\r\n # unless you tell it to be deterministic\r\n torch.backends.cudnn.deterministic = True\r\n\r\n if is_cuda and seed > 0:\r\n # These ensure same initialization in multi gpu mode\r\n torch.cuda.manual_seed(seed)\r\n\r\n\r\ndef generate_relative_positions_matrix(length, max_relative_positions,\r\n cache=False):\r\n \"\"\"Generate the clipped relative positions matrix\r\n for a given length and maximum relative positions\"\"\"\r\n if cache:\r\n distance_mat = torch.arange(-length+1, 1, 1).unsqueeze(0)\r\n else:\r\n range_vec = torch.arange(length)\r\n range_mat = range_vec.unsqueeze(-1).expand(-1, length).transpose(0, 1)\r\n distance_mat = range_mat - range_mat.transpose(0, 1)\r\n distance_mat_clipped = torch.clamp(distance_mat,\r\n min=-max_relative_positions,\r\n max=max_relative_positions)\r\n # Shift values to be >= 0\r\n final_mat = distance_mat_clipped + max_relative_positions\r\n return final_mat\r\n\r\n\r\ndef relative_matmul(x, z, transpose):\r\n \"\"\"Helper function for relative positions attention.\"\"\"\r\n batch_size = x.shape[0]\r\n heads = x.shape[1]\r\n length = x.shape[2]\r\n x_t = x.permute(2, 0, 1, 3)\r\n x_t_r = x_t.reshape(length, heads * batch_size, -1)\r\n if transpose:\r\n z_t = z.transpose(1, 2)\r\n x_tz_matmul = torch.matmul(x_t_r, z_t)\r\n else:\r\n x_tz_matmul = torch.matmul(x_t_r, z)\r\n x_tz_matmul_r = x_tz_matmul.reshape(length, batch_size, heads, -1)\r\n x_tz_matmul_r_t = x_tz_matmul_r.permute(1, 2, 0, 3)\r\n return x_tz_matmul_r_t\r\n\r\n\r\ndef fn_args(fun):\r\n \"\"\"Returns the list of function arguments name.\"\"\"\r\n return inspect.getfullargspec(fun).args\r\n" ]
[ [ "torch.cuda.manual_seed", "torch.manual_seed", "torch.matmul", "torch.arange", "torch.clamp" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
jemil-butt/kernel_inference
[ "a82f6bc3bb399c6d2aea389204c45941a5fec2f8", "a82f6bc3bb399c6d2aea389204c45941a5fec2f8" ]
[ "Figures/Special_case_3_random_field.py", "Support_funs.py" ]
[ "\"\"\"\nThe goal of this script is to showcase kernel inference for the task of estimating\nthe covariance of a random field that exhibits an instationary correlation structure.\nThis produces a figure showcasing the kernel inference procedure and its uses \nas detailed in the case example nr 3 which deals with applications featuring \nmultivariate quantities. More details can be found in the paper:\n'Inference of instationary covariance functions for optimal estimation in \nspatial statistics'.\n\nFor this, do the following:\n 1. Imports and definitions\n 2. Create covariance matrices\n 3. Simulation of autocorrelated data\n 4. Kernel inference\n 5. Optimal estimation\n 6. Plots and illustrations\n \nThe simulations are based on a fixed random seed, to generate data deviating \nfrom the one shown in the paper and different for each run, please comment out\nthe entry 'np.random.seed(x)' in section 1.\n\n\"\"\"\n\n\n\"\"\"\n 1. Imports and definitions -----------------------------------------------\n\"\"\"\n\n\n# i) Imports\n\nimport sys\nsys.path.append(\"..\")\n\nimport numpy as np\nimport numpy.linalg as lina\nimport scipy.linalg as spla\nimport matplotlib.pyplot as plt\nimport Support_funs as sf\nplt.rcParams.update({'font.size': 12})\n\n\n# ii) Definition of auxiliary quantities\n\nn_x=25\nn_y=25\nn_tot=n_x*n_y\n\nn_sample_x=20\nn_sample_y=20\nn_sample=n_sample_x*n_sample_y\n\nn_simu=100\n\nt_x=np.linspace(0,1,n_x)\nt_y=np.linspace(0,1,n_y)\n\ngrid_x,grid_y=np.meshgrid(t_x,t_y)\n\nsample_index_x=np.round(np.linspace(0,n_x-1,n_sample_x))\nsample_index_y=np.round(np.linspace(0,n_y-1,n_sample_y))\nt_x_sample=t_x[sample_index_x.astype(int)]\nt_y_sample=t_y[sample_index_y.astype(int)]\nnp.random.seed(0)\n\ntol=10**(-6)\n\n\n\n\"\"\"\n 2. Create covariance matrices --------------------------------------------\n\"\"\"\n\n\n# i) Define covariance functions\n\nd_x=0.1\ndef cov_fun_x(t1,t2):\n return np.exp(-(lina.norm(t1-t2)/d_x)**2)\n\nd_y=0.2\ndef cov_fun_y(t1,t2):\n return np.exp(-(lina.norm(t1-t2)/d_y)**2)\n\n\n# ii) Derive component covariance matrices\n\nK_x=np.zeros([n_x,n_x])\nfor k in range(n_x):\n for l in range(n_x):\n K_x[k,l]=cov_fun_x(t_x[k],t_x[l])\n \n \nK_y=np.zeros([n_y,n_y])\nfor k in range(n_y):\n for l in range(n_y):\n K_y[k,l]=cov_fun_y(t_y[k],t_y[l])\n\n\n[U_x,S_x,V_x]=lina.svd(K_x)\n[U_y,S_y,V_y]=lina.svd(K_y)\n\n\n\n\"\"\"\n 3. Simulation of autocorrelated data -------------------------------------\n\"\"\"\n\n\n# i) Draw from a distribution with covariance matrix K_x\n\nexplained_var=0.95\nRandom_field_collection=np.zeros([n_y,n_x,n_simu])\nx_measured=np.zeros([n_sample,n_simu])\nfor k in range(n_simu):\n \n Random_field_temp=sf.Simulation_random_field(cov_fun_x, cov_fun_y, grid_x, grid_y, explained_var)\n Random_field_collection[:,:,k]=Random_field_temp\n x_measured_temp=Random_field_temp[np.ix_(sample_index_y.astype(int),sample_index_x.astype(int))]\n x_measured[:,k]=np.ravel(x_measured_temp)\n \nS_emp_measured=(1/n_sample)*x_measured@x_measured.T\n\n\"\"\"\n 4. Kernel inference ------------------------------------------------------\n\"\"\"\n\n\n# i) Preparation\n\nr=2\nn_exp=30\n\n\n# iii) Prior and basis\n\nlambda_mat=np.outer(S_y, S_x)\nindex_mat_ordered=np.unravel_index(np.argsort(-lambda_mat.ravel()), [n_y,n_x])\nlambda_ordered=lambda_mat[index_mat_ordered]\n\nlambda_tot=np.sum(lambda_mat)\nlambda_cumsum=np.cumsum(lambda_ordered)\nstop_index=n_exp\n\nLambda_p_cut=np.diag(lambda_ordered[:n_exp])\n\nU_p_cut=np.zeros([n_tot,n_exp])\nPsi=np.zeros([n_sample,n_exp])\nfor k in range(n_exp):\n basis_fun=np.outer(U_y[:,index_mat_ordered[0][k]],U_x[:,index_mat_ordered[1][k]])\n U_p_cut[:,k]=np.ravel(basis_fun)\n Psi[:,k]=np.ravel(basis_fun[np.ix_(sample_index_y.astype(int),sample_index_x.astype(int))])\n\n\n# ii) Execute inference\n\nimport sys\nsys.path.append(\"..\")\nimport KI\nbeta, mu, gamma, C_gamma, KI_logfile = KI.Kernel_inference_homogeneous(x_measured,Lambda_p_cut,Psi,r)\n\n\n\n\"\"\"\n 5. Optimal estimation ---------------------------------------------------\n\"\"\"\n\n\n# i) Auxiliary quantities\n\nn_datapoints= 15\ndatapoint_index_x=np.random.choice(range(n_x),size=n_datapoints)\ndatapoint_index_y=np.random.choice(range(n_y),size=n_datapoints)\nx_datapoints=grid_x[datapoint_index_y.astype(int),datapoint_index_x.astype(int)]\ny_datapoints=grid_y[datapoint_index_y.astype(int),datapoint_index_x.astype(int)]\nrf_datapoints=Random_field_temp[datapoint_index_y.astype(int),datapoint_index_x.astype(int)]\n\n\n# ii) Interpolate using squared exponential\n\nd_sqexp_interpolate=0.3\ndef cov_fun_exp_interpolate(t1,t2):\n return np.exp(-(lina.norm(t1-t2)/d_sqexp_interpolate)**1)\n\nK_sqexp_interpolate_sample=np.zeros([n_datapoints,n_datapoints])\nK_sqexp_interpolate_subset=np.zeros([n_tot,n_datapoints])\nfor k in range(n_datapoints):\n for l in range(n_datapoints):\n t1=np.array([grid_x[datapoint_index_y[k],datapoint_index_x[k]],grid_y[datapoint_index_y[k],datapoint_index_x[k]]])\n t2=np.array([grid_x[datapoint_index_y[l],datapoint_index_x[l]],grid_y[datapoint_index_y[l],datapoint_index_x[l]]])\n K_sqexp_interpolate_sample[k,l]=cov_fun_exp_interpolate(t1,t2)\n \nfor k in range(n_tot):\n for l in range(n_datapoints):\n t1=np.array([np.matrix.flatten(grid_x)[k],np.matrix.flatten(grid_y)[k]])\n t2=np.array([grid_x[datapoint_index_y[l],datapoint_index_x[l]],grid_y[datapoint_index_y[l],datapoint_index_x[l]]])\n K_sqexp_interpolate_subset[k,l]=cov_fun_exp_interpolate(t1,t2)\n \nrf_est_K_exp=np.reshape([email protected](K_sqexp_interpolate_sample,rcond=tol,hermitian=True)@rf_datapoints,[n_y,n_x])\n\n\n# iii) Interpolate using inferred kernel\n\ndatapoint_index=np.ravel_multi_index((datapoint_index_y,datapoint_index_x),[n_y,n_x])\n\nK_gamma=U_p_cut@gamma@U_p_cut.T\nK_gamma_sample=K_gamma[np.ix_(datapoint_index.astype(int),datapoint_index.astype(int))]\nK_gamma_subset=K_gamma[:,datapoint_index.astype(int)]\n\nrf_est_K_gamma=np.reshape([email protected](K_gamma_sample,rcond=tol,hermitian=True)@rf_datapoints,[n_y,n_x])\n\n\n# iv) Interpolate using true kernel\n\nK_true=np.kron(K_y,K_x)\nK_true_sample=K_true[np.ix_(datapoint_index.astype(int),datapoint_index.astype(int))]\nK_true_subset=K_true[:,datapoint_index.astype(int)]\n\nrf_est_K_true=np.reshape([email protected](K_true_sample,rcond=tol,hermitian=True)@rf_datapoints,[n_y,n_x])\n\n\n\n\n\"\"\"\n 6. Plots and illustrations -----------------------------------------------\n\"\"\"\n\n\n# i) Auxiliary definitions\n\n\n\n\n# ii) Invoke figure 1\n\nn_plot=15\nw,h=plt.figaspect(0.3)\nfig1 = plt.figure(dpi=400,constrained_layout=True,figsize=(w,h))\ngs1 = fig1.add_gridspec(1, 3)\n\n\n# Location 1,1 Underlying covariance function\nf1_ax1 = fig1.add_subplot(gs1[0,0])\nf1_ax1.imshow(K_true)\nplt.ylabel('Locations x,y')\nplt.xlabel('Locations x,y')\nplt.tick_params(axis='y', which='both', left=False,right=False,labelleft=False)\nplt.tick_params(axis='x', which='both', top=False,bottom=False,labelbottom=False)\nf1_ax1.set_title('Covariance function')\n\n\n# Location 1,2 Example realizations\nf1_ax2 = fig1.add_subplot(gs1[0,1])\n\nplt.imshow(Random_field_temp,extent=[0,1,0,1])\nplt.ylabel('Location y')\nplt.xlabel('Location x')\nplt.tick_params(axis='y', which='both', left=False,right=False,labelleft=False)\nplt.tick_params(axis='x', which='both', top=False,bottom=False,labelbottom=False)\nf1_ax2.set_title('Example realization')\n\n\n# Location 1,3 Plot of the empirical covariance matrix\nf1_ax3 = fig1.add_subplot(gs1[0,2])\nf1_ax3.imshow((1/n_sample)*x_measured@x_measured.T)\nplt.ylabel('Locations x,y')\nplt.xlabel('Locations x,y')\nplt.tick_params(axis='y', which='both', left=False,right=False,labelleft=False)\nplt.tick_params(axis='x', which='both', top=False,bottom=False,labelbottom=False)\nf1_ax3.set_title('Empirical covariance')\n\n\n# Save the figure\n# plt.savefig('Special_case_3a_random_field',dpi=400)\n\n\n\n\n# iii) Invoke figure 2\n\nn_plot=15\nn_illu=5\nw,h=plt.figaspect(0.35)\nfig2 = plt.figure(dpi=400,constrained_layout=True,figsize=(w,h))\ngs2 = fig2.add_gridspec(4, 6)\n\n\nf2_ax1 = fig2.add_subplot(gs2[0:2, 0:2])\nf2_ax1.imshow(K_true)\nf2_ax1.set_title('True covariance function')\nf2_ax1.axis('off')\n\nf2_ax2 = fig2.add_subplot(gs2[0:2, 4:6])\nf2_ax2.imshow(K_gamma)\nf2_ax2.set_title('Estimated covariance function')\nf2_ax2.axis('off')\n\nf2_ax3 = fig2.add_subplot(gs2[0, 2])\nf2_ax3.imshow(S_emp_measured)\nf2_ax3.set_title('Empirical covariance')\nf2_ax3.axis('off')\n\nf2_ax4 = fig2.add_subplot(gs2[0, 3])\nf2_ax4.imshow(C_gamma)\nf2_ax4.set_title('Estimated covariance')\nf2_ax4.axis('off')\n\nf2_ax5 = fig2.add_subplot(gs2[1, 2])\nf2_ax5.imshow(Lambda_p_cut)\nf2_ax5.set_title('Prior gamma')\nf2_ax5.axis('off')\n\nf2_ax6 = fig2.add_subplot(gs2[1, 3])\nf2_ax6.imshow(gamma)\nf2_ax6.set_title('Inferred gamma')\nf2_ax6.axis('off')\n\n\n# Save the figure\n# plt.savefig('Special_case_3b_random_field',dpi=400)\n\n\n\n\n\n# iii) Invoke figure 3\n\nw,h=plt.figaspect(0.25)\nfig3 = plt.figure(dpi=400,constrained_layout=True,figsize=(w,h))\ngs3 = fig3.add_gridspec(1, 3)\n\n# Location 1.2 Estimations using squared exponential covariance\nf3_ax1 = fig3.add_subplot(gs3[0,1])\nf3_ax1.scatter(x_datapoints,y_datapoints,facecolors='1',edgecolors='1',label='Data points')\n\nexp_est = f3_ax1.imshow(np.flipud(rf_est_K_exp),extent=[0,1,0,1],label='Estimate sqexp cov')\nplt.setp(exp_est, label=\"_\")\nplt.tick_params(axis='y', which='both', left=False,right=False,labelleft=False)\nplt.tick_params(axis='x', which='both', top=False,bottom=False,labelbottom=False)\nplt.xlabel('Location x')\nf3_ax1.set_title('Estimations using exp. covariance')\n\n\n\n# Location 1.3 Estimations using inferred covariance\nf3_ax2 = fig3.add_subplot(gs3[0,2])\nf3_ax2.scatter(x_datapoints,y_datapoints,facecolors='1',edgecolors='1',label='Data points')\n \ngamma_est = f3_ax2.imshow(np.flipud(rf_est_K_gamma),extent=[0,1,0,1],label='Estimate inferred cov')\nplt.setp(gamma_est, label=\"_\")\nplt.tick_params(axis='y', which='both', left=False,right=False,labelleft=False)\nplt.tick_params(axis='x', which='both', top=False,bottom=False,labelbottom=False)\nplt.xlabel('Location x')\nf3_ax2.set_title('Estimations using inferred covariance')\n\n\n\n# Location 1.1 Estimations using true covariance\nf3_ax3 = fig3.add_subplot(gs3[0,0])\nf3_ax3.scatter(x_datapoints,y_datapoints,facecolors='1',edgecolors='1',label='Data points')\ntrue_est = f3_ax3.imshow(np.flipud(rf_est_K_true),extent=[0,1,0,1],label='Estimate true cov')\n\nplt.ylabel('Location y')\nplt.xlabel('Location x')\nplt.tick_params(axis='y', which='both', left=False,right=False,labelleft=False)\nplt.tick_params(axis='x', which='both', top=False,bottom=False,labelbottom=False)\nf3_ax3.set_title('Estimate using true covariance')\nf3_ax3.legend(loc='lower right')\n\n# Save the figure\n# plt.savefig('Special_case_3c_random_field',dpi=400)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "\"\"\"\nThis file provides several support functions that are available after import.\nThe functions are:\n Logpdet: Calculates the log pseudodeterminant of a matrix\n Backtracking_linesearch: Performs backtracking line search for homogeneous\n kernel inference problems\n Backtracking_linesearch_inhomogeneous: Performs backtracking line search \n for inhomogeneous kernel inference problems\n Get_P_psi: Calculates an S_psi that satisfies some boundary conditions\n Simulation_random_field: Draws a realization from a random field given\n a tensor product decomposition of its covariance function.\n\"\"\"\n\n\n\n\ndef Logpdet(A,tol):\n \"\"\"\n The goal of this function is to calculate the log pseudodeterminant of a \n square matrix A. During the calculation, all eigenvalues smaller than tol\n are set to 1. \n For this, do the following:\n 1. Imports and definitions\n 2. Singular value decomposition\n 3. Calculate log pseudodeterminant\n\n INPUTS\n The inputs consist in A square matrix A whose pseudodeterminant is to be \n calculated and a value for the numerical cutoff-tolerance that determines\n values consideres as zero during the calculation.\n \n Name Interpretation Type\n A Data matrix, each col is one vector- Matrix [n,n]\n valued measurement.\n tol Tolerance for thresholding the singular Small positive number\n values.\n \n \n OUTPUTS\n The outputs consist in the pseudodeterminant, a real number\n \n Name Interpretation Type\n logpdet The logarithm of the pseudodet Real number\n\n\n \"\"\"\n \n \n \"\"\" \n 1. Imports and definitions -------------------------------------------\n \"\"\"\n \n \n # i) Imports\n \n import numpy as np\n \n \n # ii) Auxiliary quantities\n \n n=np.shape(A)[0]\n \n \"\"\" \n 2. Singular value decomposition -------------------------------------\n \"\"\"\n \n \n # i) Calculate and threshold\n \n [U,S,V]=np.linalg.svd(A)\n for k in range(n):\n S[k]=S[k]*(S[k]>tol)+1*(S[k]<=tol)\n \n \n \"\"\"\n 3. Calculate pseudodeterminant ---------------------------------------\n \"\"\"\n \n \n # i) Calculate pseudodeterminant\n \n logpdet=np.sum(np.log(S))\n \n \n # ii) Assemble solution\n \n return logpdet\n \n\n\ndef Backtracking_linesearch(f, x, lambda_newton, Delta_x,options):\n \"\"\"\n The goal of this function is to perform a backtracking linesearch to adapt \n the stepsize t of the Newton step, i.e. prepare a damped Newton step.\n For this, do the following:\n 1. Imports and definitions\n 2. Loop till conditions satisfied\n \n The stepsize t is reduced until the condition f(x+t Delta_x) < f(x) + \n t alpha <grad_f, Delta_x> is satisfied.\n\n INPUTS\n The inputs consist in an objective function f used to check validity of the\n Hessian approximation as well as an evaluation point x and the Newton decrement\n lambda_newton. Furthermore, the descent direction Delta_x needs to be pro-\n vided together with some options on (alpha, beta, tolerances) that feature\n in backtracking line search algorithms.\n \n Name Interpretation Type\n f The objective function for which the Function handle\n Armijo optimality condition is to be\n checked. Calculates the objective values\n f(x) and f(x+t Delta_x). \n x The position at which gradients and Matrix [n_exp,n_exp]\n search directions are evaluated.\n lambda_newton The Newton decrement quantifying the A positive real number\n decrease of the objective function in \n the direction of Delta_x \n Delta_x Provides the descent direction, for Matrix [n_exp,n_exp]\n which a reasonable stepsize t is to be \n determined. The recommended update is \n then x = x + t Delta x \n options Tuple containing the values for alpha, Tuple (alpha,beta,max_iter) \n beta and maximum iterations to arrive \n at a reasonable stepsize.\n \n OUTPUTS\n The outputs consist in the stepsize t, a real number guaranteeing that \n Newton updates do not leave the psd cone.\n \n Name Interpretation Type\n t Stepsize for a robust damped Newton Real number in [0,1]\n update \n\n \"\"\"\n\n \n \n \"\"\"\n 1. Imports and definitions -------------------------------------------\n \"\"\"\n \n \n # i) Import packages\n \n import numpy as np\n \n \n # ii) Define auxiliary quantities \n \n alpha=options[0]\n beta=options[1]\n max_iter=options[2]\n \n # iii) Initial function evaluations\n \n t=1\n f_val_x=f(x)\n f_val_x_mod=f(x+t*Delta_x)\n \n difference=f_val_x_mod-(f_val_x-alpha*t*(lambda_newton**2))\n \n \n \n \"\"\" \n 2. Loop till conditions satisfied ------------------------------------\n \"\"\"\n \n # i) Iterate\n \n k=1\n while difference>0 and k<max_iter:\n t=beta*t\n f_val_x_mod=f(x+t*Delta_x)\n difference=f_val_x_mod-(f_val_x-alpha*t*(lambda_newton**2))\n k=k+1\n \n if k==max_iter:\n t=0\n\n \n # ii) Assemble solution\n \n return t\n \n \n \n \n \ndef Backtracking_linesearch_inhomogeneous(f, gamma, eta_list, lambda_newton, Delta_gamma, Delta_eta_list, options):\n \"\"\"\n The goal of this function is to perform a backtracking linesearch to adapt \n the stepsize t of the Newton step, i.e. prepare a damped Newton step.\n For this, do the following:\n 1. Imports and definitions\n 2. Loop till conditions satisfied\n \n The stepsize t is reduced until the condition f(x+t Delta_x) < f(x) + \n t alpha <grad_f, Delta_x> is satisfied.\n\n INPUTS\n The inputs consist in an objective function f used to check validity of the\n Hessian approximation as well as an evaluation point x and the Newton decrement\n lambda_newton. Furthermore, the descent direction Delta_x needs to be pro-\n vided together with some options on (alpha, beta, tolerances) that feature\n in backtracking line search algorithms.\n \n Name Interpretation Type\n f The objective function for which the Function handle\n Armijo optimality condition is to be\n checked. Calculates the objective values\n f(x) and f(x+t Delta_x). \n gamma The position at which gradients and Matrix [n_exp,n_exp]\n search directions are evaluated.\n eta_list List containing the n_S_obs matrices List of matrices\n eta_1 , ... ,eta_{n_S_obs}. Dummy \n variables linked to gamma via linear \n constraints.\n lambda_newton The Newton decrement quantifying the A positive real number\n decrease of the objective function in \n the direction of Delta_x \n Delta_gamma Provides the descent direction, for Matrix [n_exp,n_exp]\n which a reasonable stepsize t is to be \n determined. The recommended update is \n then gamma = gamma + t Delta gamma \n Delta_eta_list List containing the n_S_obs matrices List of matrices\n Delta eta_1 ,..., Delta eta_{n_S_obs}\n They encode the descent directions for \n the eta matrices. The update for the eta \n matricesis eta_list[k] = \n eta_list[k]+t*Delta_eta_list[k].\n options Tuple containing the values for alpha, Tuple (alpha,beta,max_iter) \n beta and maximum iterations to arrive \n at a reasonable stepsize.\n \n OUTPUTS\n The outputs consist in the stepsize t, a real number guaranteeing that \n Newton updates do not leave the psd cone.\n \n Name Interpretation Type\n t Stepsize for a robust damped Newton Real number in [0,1]\n \n \"\"\"\n \n \n \n \"\"\"\n 1. Imports and definitions -------------------------------------------\n \"\"\"\n \n \n # i) Import packages\n \n import numpy as np\n \n \n # ii) Define auxiliary quantities \n \n alpha=options[0]\n beta=options[1]\n max_iter=options[2]\n n_S_obs=len(eta_list)\n \n # iii) Initial function evaluations\n \n t=1\n f_val_x=f(gamma, eta_list)\n \n eta_list_mod=[]\n for k in range(n_S_obs):\n eta_list_mod.append(eta_list[k]+t*Delta_eta_list[k])\n f_val_x_mod=f(gamma+Delta_gamma,eta_list_mod)\n \n difference=f_val_x_mod-(f_val_x-alpha*t*(lambda_newton**2))\n \n \n \n \"\"\" \n 2. Loop till conditions satisfied ------------------------------------\n \"\"\"\n \n # i) Iterate\n \n k=1\n while difference>0 and k<max_iter:\n t=beta*t\n \n eta_list_mod=[]\n for k in range(n_S_obs):\n eta_list_mod.append(eta_list[k]+t*Delta_eta_list[k])\n \n f_val_x_mod=f(gamma+t*Delta_gamma,eta_list_mod)\n difference=f_val_x_mod-(f_val_x-alpha*t*(lambda_newton**2))\n k=k+1\n \n if k==max_iter:\n t=0\n\n \n # ii) Assemble solution\n \n return t\n \n \n \ndef Get_S_psi(Psi, S_emp, A, b, tol=10**(-6)):\n \n \"\"\"\n The goal of this function is to provide a matrix S_psi that reconstructs\n the observed empirical covariance matrix S_emp given some constraints that\n are encoded by A vec(S_psi)=b.\n For this, do the following:\n 1. Definitions and imports\n 2. Set up problem matrices\n 3. Solve quadratic program\n 4. Assemble solutions\n \n INPUTS\n The inputs consist in the matrix Psi used for reconstructing S_emp by \n Psi@[email protected]. This matrix is supposed to be close to the empirical \n covariance matrix S_emp. The matrix A contains as row vectors the vectorized \n matrices A_i for which <A_i,gamma>=b_i is supposed to hold.\n \n Name Interpretation Type\n Psi Matrix containing info w.r.t the fun Matrix [n,n_exp]\n ction basis used for reconstruction.\n Each col is one of the basis functions\n measured by the measurement operator\n S_emp Empirical covariance matrix to be Matrix [n,n]\n approximated via S_psi\n A Constraint matrix specifying the linear Matrix [n_c, n_exp^2]\n constraints A vec(gamma)=b\n b Vector of constraint values Vector [n_c,1]\n tol Tolerance for inversion procedures. Small positive number\n The larger the tolerance, the more \n regular S_psi is.\n \n OUTPUTS\n The outputs consist in the matrix S_psi. It reconstructs S_emp closely via\n S_emp approx Psi@[email protected] while adhering to the constraints as formulated\n by A.\n \n Name Interpretation Type\n S_psi Data induced estimator for gamma, Matrix [n_exp,n_exp]\n reconstructs S_emp\n\n \"\"\"\n \n \n \n \"\"\"\n 1. Definitions and imports\n \"\"\"\n \n \n # i) Import packages\n \n import numpy as np\n import numpy.linalg as lina\n\n \n \n \"\"\"\n 2. Set up problem matrices\n \"\"\"\n \n \n # i) Gradient and Hessian\n \n F=np.kron(Psi,Psi)\n H=F.T@F\n H_pinv=lina.pinv(H,rcond=tol,hermitian=True)\n \n \n # ii) Respecify dimensions\n \n n=np.shape(Psi)[0]\n n_exp=np.shape(Psi)[1]\n n_c=np.shape(A)[0]\n \n S=np.reshape(S_emp,[n**2,1])\n \n \n \n \"\"\"\n 3. Solve quadratic program\n \"\"\"\n \n \n # i) Solve QP\n \n # [email protected]@S\n # Mid_mat=A.T@(lina.pinv(A@[email protected],rcond=tol,hermitian=True))\n # x_2=-H_pinv@Mid_mat@(A@x_1-b)\n \n Top_mat=np.hstack((H,A.T))\n Bot_mat=np.hstack((A,np.zeros([n_c,n_c])))\n \n Full_mat=np.vstack((Top_mat,Bot_mat))\n target_vec=np.vstack((F.T@S,np.zeros([n_c,1])))\n \n # ii) Respecify solution\n \n S_psi=lina.lstsq(Full_mat,target_vec,rcond=tol)[0]\n S_psi=np.reshape(S_psi[:n_exp**2],[n_exp,n_exp]) \n S_psi=0.5*(S_psi+S_psi.T) \n \n [S,U]=lina.eig(S_psi)\n S_pos=S*(S>0)\n [email protected](S_pos)@U.T\n \n S_psi=0.5*(S_psi+S_psi.T)\n \n \n \n \"\"\"\n 4. Assemble solutions\n \"\"\"\n \n \n return S_psi \n \n \n \n \ndef Simulation_random_field(cov_x, cov_y, grid_x, grid_y, explained_var):\n \n \"\"\"\n The goal of this function is to simulate a realization of a random field\n efficiently employing the tensor product nature of covariance functions.\n This does not work for all random field but only for those, whose covariance\n function cov((x_1,y_1),(x_2,y_2)) decomposes as cov_x(x_1,x_2)*cov_y(y_1,y_2). \n The actual simulation uses the Karhunen Loewe expansion of a process into\n superpositions of basis functions weighted by the eigenvalues of the covariance\n matrix multiplied with white noise variables.\n For this, do the following:\n 1. Definitions and imports\n 2. Set up problem matrices\n 3. Simulate and assemble solution\n \n INPUTS\n The inputs consist in the two covariance functions whose product forms the\n multivariate covariance of the random field. Furthermore grid values for\n the input coordinates are provided and a number between 0 and 1 indicating\n how many terms are used in the superposition of the Karhunen Loewe expansion.\n \n Name Interpretation Type\n cov_x Function handle for the cov function function handle\n Maps two real numbers x_1,x_2 to a real\n number indicating the cov in x direction\n cov_y Function handle for the cov function function handle\n Maps two real numbers y_1,y_2 to a real\n number indicating the cov in y direction\n grid_x Matrix containing info w.r.t the x vals Matrix [n,n]\n at each location for which a value is\n to be simulated\n grid_y Matrix containing info w.r.t the y vals Matrix [n,n]\n at each location for which a value is\n to be simulated\n explained_var The fraction of variance to be explained Number in [0,1]\n by the simulation. The closer to 1, the \n more faithful the reproduction of the cov\n structure and the longer the runtime\n \n OUTPUTS\n The outputs consist in the matrix Random_field which is a realization of the\n random field from which a sample was supposed to be drawn.\n \n Name Interpretation Type\n Random_field Realization of the random field Matrix [n,n]\n\n\n \"\"\"\n \n \n \n \"\"\"\n 1. Definitions and imports\n \"\"\"\n \n \n # i) Import packages\n \n import numpy as np\n import numpy.linalg as lina\n \n \n # ii)) Define auxiliary quantities\n \n n_y,n_x=np.shape(grid_x)\n \n \n \n \n \"\"\"\n 2. Set up problem matrices\n \"\"\"\n \n \n # i) Component covariance matrices\n \n K_x=np.zeros([n_x,n_x])\n K_y=np.zeros([n_y,n_y])\n for k in range(n_x):\n for l in range(n_x):\n K_x[k,l]=cov_x(grid_x[0,k], grid_x[0,l])\n \n for k in range(n_y):\n for l in range(n_y):\n K_y[k,l]=cov_y(grid_y[k,0], grid_y[l,0])\n \n [U_x,S_x,V_x]=lina.svd(K_x)\n [U_y,S_y,V_y]=lina.svd(K_y)\n \n\n # ii) Indexing and ordering of eigenvalues\n \n n_tot=n_x*n_y\n \n lambda_mat=np.outer(S_y, S_x)\n index_mat_ordered=np.unravel_index(np.argsort(-lambda_mat.ravel()), [n_y,n_x])\n lambda_ordered=lambda_mat[index_mat_ordered]\n \n lambda_tot=np.sum(lambda_mat)\n lambda_cumsum=np.cumsum(lambda_ordered)\n stop_index=(np.where(lambda_cumsum>=explained_var*lambda_tot))[0][0]\n \n \n \n \"\"\"\n 3. Simulate and assemble solution\n \"\"\"\n \n \n # i) Iterative Karhunen Loewe composition\n \n white_noise=np.random.normal(0,1,[stop_index])\n \n Random_field=np.zeros([n_y,n_x])\n for k in range(stop_index):\n Random_field=Random_field+white_noise[k]*lambda_ordered[k]*np.outer(U_y[:,index_mat_ordered[0][k]],U_x[:,index_mat_ordered[1][k]])\n \n \n \n # ii) Return solution\n \n return Random_field\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n" ]
[ [ "numpy.diag", "matplotlib.pyplot.imshow", "numpy.linspace", "numpy.flipud", "numpy.kron", "numpy.cumsum", "numpy.ravel_multi_index", "matplotlib.pyplot.rcParams.update", "numpy.linalg.svd", "numpy.outer", "numpy.ravel", "numpy.zeros", "matplotlib.pyplot.figure", "numpy.matrix.flatten", "numpy.meshgrid", "numpy.array", "numpy.sum", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.figaspect", "numpy.random.seed", "numpy.linalg.norm", "numpy.linalg.pinv", "matplotlib.pyplot.setp", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.tick_params" ], [ "numpy.diag", "numpy.hstack", "numpy.linalg.svd", "numpy.log", "numpy.reshape", "numpy.linalg.eig", "numpy.kron", "numpy.cumsum", "numpy.linalg.pinv", "numpy.random.normal", "numpy.linalg.lstsq", "numpy.shape", "numpy.where", "numpy.outer", "numpy.zeros", "numpy.sum", "numpy.vstack" ] ]
[ { "matplotlib": [], "numpy": [ "1.11", "1.10", "1.12", "1.19", "1.24", "1.13", "1.16", "1.9", "1.18", "1.23", "1.21", "1.22", "1.20", "1.7", "1.15", "1.14", "1.17", "1.8" ], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
joshua-laughner/CAADA
[ "831c70c0dfa96cabb95a069ad54211fb37533f5a" ]
[ "caada/ca_pems/readers.py" ]
[ "\"\"\"\nThis module contains functions to read Caltrans PEMS station data and metadata files.\n\"\"\"\n\nimport pandas as pd\nimport sys\n\nfrom ..caada_typing import pathlike as _pathlike\n\n\ndef read_pems_station_csv(csv_file: _pathlike) -> pd.DataFrame:\n \"\"\"Read a Caltrans PEMS daily station .csv file\n\n Parameters\n ----------\n csv_file\n The path to the PEMS file to read\n\n Returns\n -------\n A dataframe containing the PEMS data with the correct header\n \"\"\"\n columns = ['timestamp', 'station', 'district', 'route', 'direction of travel', 'lane type', 'station length', 'samples',\n 'percent observed', 'total flow', 'delay 35', 'delay 40', 'delay 45', 'delay 50', 'delay 55', 'delay 60']\n df = pd.read_csv(csv_file, header=None)\n df.columns = columns\n df['timestamp'] = pd.DatetimeIndex(df['timestamp'])\n return df\n\n\ndef read_pems_station_meta(filename: _pathlike) -> pd.DataFrame:\n \"\"\"Read a PEMS station metadata file.\n\n Parameters\n ----------\n filename\n Path to the metadata tab-delimited file to read.\n\n Returns\n -------\n pandas.DataFrame\n A dataframe, indexed by site ID, containing the metadata from the requested file.\n\n \"\"\"\n try:\n df = pd.read_csv(filename, sep='\\t')\n except pd.errors.ParserError as err:\n print('Error parsing metadata file: {}'.format(filename), file=sys.stderr)\n raise\n df.set_index('ID', inplace=True)\n df.rename(columns=lambda s: s.lower(), inplace=True)\n return df\n" ]
[ [ "pandas.read_csv", "pandas.DatetimeIndex" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [], "tensorflow": [] } ]
AccessibleAI/ailibrary
[ "f283f7c1608f5998694efc3cdbd0a29ebf3239c2" ]
[ "pr_PrepCSV/test/_data_producer.py" ]
[ "\"\"\"\nAll rights reserved to cnvrg.io\n\n http://www.cnvrg.io\n\ntest_prepcsv.py\n==============================================================================\n\"\"\"\nimport json\nimport os\nimport string\nimport pandas\nimport random\nimport numpy as np\n\n### Produce csv file for testing.\nrows_num = 25\ndata = {}\nsummary = {}\n\n### integers columns.\nintegers_columns_num = 3\nfor col in range(integers_columns_num):\n\ttitle = 'int_col_' + str(col)\n\telements = [random.randint(1, 10) for i in range(rows_num)]\n\tdata[title] = elements\n\n\tavg = sum(elements) / len(elements)\n\tnum_of_elements = len(set(elements))\n\tsummary[title] = {'avg': avg, 'num_of_elements': num_of_elements}\n\n### strings columns.\nstrings_columns_num = 3\nvalues = [(random.choice(string.ascii_letters)*3).upper() for i in range(10)]\nfor col in range(strings_columns_num):\n\ttitle = 'str_col_' + str(col)\n\telements = [random.choice(values) for i in range(rows_num)]\n\tdata[title] = elements\n\n\tnum_of_elements = len(set(elements))\n\tsummary[title] = {'num_of_elements': num_of_elements}\n\n### column with empty values.\nempty_values_columns_num = 1\nnum_of_empty_cells = 6\nfor col in range(empty_values_columns_num):\n\ttitle = 'empty_val_' + str(col)\n\telements = [random.randint(1, 10) for i in range(rows_num)]\n\trand_indexes = [random.randint(0, rows_num) for i in range(num_of_empty_cells)]\n\tfor ind in range(len(elements)):\n\t\tif ind in rand_indexes: elements[ind] = np.nan\n\n\tdata[title] = elements\n\tnum_of_elements = len(set(elements))\n\tsummary[title] = {'num_of_elements': num_of_elements}\n\n\n### target column.\ntitle = 'target_col'\nelements = [random.choice([0, 0.3, 0.6, 1]) for i in range(rows_num)]\ndata[title] = elements\n\n\ndf = pandas.DataFrame.from_dict(data)\ndf.to_csv(os.getcwd() + \"/_data_for_testing.csv\")\n\nwith open(os.getcwd() + '/_results.json', 'w') as f:\n\tjson.dump(summary, f)\n\n" ]
[ [ "pandas.DataFrame.from_dict" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
gurvirsingh15/CPS847-Group-2
[ "95afb7c7eca9a68682a80fa5d4d8e9fb65dde351" ]
[ "weatherbot.py" ]
[ "# loads .env contents\nimport settings\n\n# use for approximate string matching\nimport difflib\n\nimport pandas as pd \n\nimport os, time, sys\nimport re, json\n\nfrom urllib.request import urlopen\nfrom datetime import datetime as dt\n\nfrom slackclient import SlackClient\n\nkeys = {\n 'weatherbot': os.environ['WEATHERBOT_API_KEY'],\n 'openweather': os.environ['OPENWEATHER_API_KEY']\n}\n\nclient = SlackClient(keys['weatherbot'])\nweatherbot_id = 'U93NEAZ24'\n\nmention_regex = \"<@{}>(.*)\".format(weatherbot_id)\n\n#MENTION_REGEX = \"<.*>(.*)\"\n#.format(('.*')\n\nbase_url = 'https://api.openweathermap.org/data/2.5/weather'\n\n# from the world cities database : https://simplemaps.com/data/world-cities\ncities = pd.read_csv('cities.csv').city\n\n# emojis assigned to each description for great fun\nemojis = {\n 'broken clouds': 'sun_behind_cloud', \n 'clear sky': 'sun_with_face',\n 'few clouds': 'sun_small_cloud', \n 'haze': 'fog', \n 'mist': 'fog', \n 'light rain': 'partly_sunny_rain', \n 'light snow': 'snowflake', \n 'moderate rain': 'umbrella_with_rain_drops', \n 'overcast clouds': 'cloud', \n 'scattered clouds': 'sun_small_cloud'\n} \n\n\n\ndef get_weather(city):\n \"\"\"Gets the weather data for a given city\"\"\"\n\n # build the url string\n url = '{}?APPID={}&q={}'.format(base_url,\n keys['openweather'],\n city.replace(' ', '+'))\n # http get it\n try:\n res = urlopen(url)\n except:\n return {'error': 'url not found'}\n \n \n if res.code != 200:\n return {'error': 'invalid request'}\n \n try:\n data = json.loads(res.read().decode('utf8'))\n except:\n return {'error': 'malformed data'}\n\n return data\n\ndef extract_message(message):\n \"\"\"Extracts message content from a mention\"\"\"\n \n matches = re.search(mention_regex, message)\n print(matches)\n if not (matches == None):\n return matches.group(1)\n\ndef parse_command(information_recieved):\n \"\"\"Parses information from RTM and extracts command and parameters\"\"\"\n for item in information_recieved:\n if item['type'] == \"message\" and not \"subtype\" in item:\n message = extract_message(item['text'])\n user = item['user']\n channel = item['channel']\n\n return message, channel, user\n\n return None,None,None\n\ndef handle_message(message, channel, user):\n \"\"\"Main method to handle weather data queries\"\"\"\n\n \n # get the current time\n t = str(dt.now())[:19]\n\n # display message details\n log = \"\"\"\n Time {}\n Message {}\n Channel {}\n User {}\n \n \"\"\".format(t, message, channel, user)\n \n sys.stderr.write(log)\n \n \n # check the world cities dataset for cities \n # whose names are approximately the given text\n # \n # example: new yrk --> New York\n matches = difflib.get_close_matches(message, cities)\n \n # if a city is found, grab the data for the first match\n # from the openweather API\n if len(matches):\n city = matches[0]\n \n data = get_weather(city)\n \n if not 'error' in data:\n # parse main fields\n desc = data['weather'][0]['description']\n \n temp = int(data['main']['temp']) - 273 # kelvin to celsius\n hum = data['main']['humidity']\n #vis = data['visibility']\n\n # add an emoji if we've got one\n emoji = '' if not desc in emojis else ':{}:'.format(emojis[desc])\n\n # format the response\n \n header = '\\n*{} Weather Report *'.format(city)\n sig = '\\nCheers, \\n\\t *Weatherbot*'\n\n response = '\\n\\t'.join([\n header,\n 'Description: {} {}'.format(desc, emoji),\n 'Temperature: {}'.format(temp),\n 'Humidity: {}'.format(hum),\n #'Visibility: {}'.format(vis),\n sig\n ])\n \n else:\n response = ':sob: I couldn\\'t get any weather data for \"{}\"'.format(message)\n else:\n response = ':sob: I couldn\\'t find any cities matching \"{}\"'.format(message)\n\n\n # send the response\n client.api_call(\n \"chat.postMessage\",\n channel=channel,\n text=response,\n user=user\n )\n\n\nif __name__ == \"__main__\":\n if client.rtm_connect(with_team_state=False):\n print('Weatherbot ready 2 rumbl')\n while True:\n message, channel, user = parse_command(client.rtm_read())\n\n if not message == None:\n handle_message(message,channel,user)\n \n time.sleep(1)\n else:\n print (\"Connection failed\")\n" ]
[ [ "pandas.read_csv" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
caurnhammer/AurnhammerFrank_LIG
[ "7598d5912eb6447b7c01d56d1a0f37643f72b80e" ]
[ "RNNs/SRP_DKL_forward.py" ]
[ "###############################################################################\n# Code by Christoph Aurnhammer, based on #\n# https://github.com/pytorch/examples/tree/master/word_language_model #\n# Citation: Aurnhammer & Frank (2019), Neuropsychologia. #\n# LIG_2: #\n# This code averages all instances of a model snapshot and #\n# then computes surprisal and the Kullbach-Leibler Divergence #\n# on the experimental stimuli from Frank (2013) #\n# (Christoph Aurnhammer, 05.04.2019 #\n# for Aurnhammer, Frank (upcoming) #\n###############################################################################\n\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nfrom torch.nn.functional import softmax\nimport argparse\nimport pandas\nimport glob\nimport re\nimport numpy as np\nfrom math import log, exp, isnan\n# script data.py\nimport data\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(description='PyTorch ENCOW Language Model')\n parser.add_argument('--data', type=str, default='./corpus/',\n help='location of the data corpus')\n parser.add_argument('--bptt', type=int, default=42,\n help='sequence length')\n parser.add_argument('--cuda', action='store_true',\n help='use CUDA')\n arguments = parser.parse_args()\n return arguments\n\n\n# Get snapshots absolute paths from dir\ndef get_paths(directory, names_snapshot):\n snapshots = []\n for snap in names_snapshot:\n for repetition in range(0, 6):\n found_checkpoints = glob.glob(directory + snap + str(repetition))\n # If list is not empty\n if found_checkpoints:\n for cp in sorted(found_checkpoints, key=numerical_sort):\n snapshots.append(cp)\n return snapshots\n\n\ndef numerical_sort(value):\n # Numerical sort from here on\n numbers = re.compile(r'(\\d+)')\n parts = numbers.split(value)\n parts[1::2] = map(int, parts[1::2])\n return parts\n\n\ndef prepare_outfile(out_path):\n # Load items from the experimental stimuli\n with open('./corpus/test.txt') as inputfile:\n inputfile = inputfile.read()\n inputfile = inputfile.replace(' ,', ',')\n inputfile = inputfile.replace(' n\\'t', 'n\\'t')\n inputfile = inputfile.replace(' \\'', '\\'')\n inputfile = inputfile.split('\\n')\n del inputfile[-1]\n inputfile = [sentence.split(' ') for sentence in inputfile]\n\n sent_nr = []\n word_pos = []\n words = []\n for sent_ind, sentence in enumerate(inputfile):\n for word_ind, word in enumerate(sentence):\n sent_nr.append(sent_ind + 1)\n word_pos.append(word_ind + 1)\n words.append(word)\n\n # Prepare output file\n dataframe = pandas.DataFrame()\n dataframe['sent_nr'] = sent_nr\n dataframe['word_pos'] = word_pos\n dataframe['word'] = words\n dataframe['item'] = pandas.read_csv('./input/itemnumbers_frank2013.csv', delimiter='\\t', header=None)\n dataframe['ENCOW_log_freq'] = pandas.read_csv('./input/ENCOWfreqs_frank2013.csv', delimiter='\\t')\n dataframe.to_csv(out_path, sep='\\t', index=False)\n return dataframe\n\n\ndef batchify(dt, bsz, arguments):\n # Work out how cleanly we can divide the dataset into bsz parts.\n nbatch = dt.size(0) // bsz\n # Trim off any extra elements that wouldn't cleanly fit (remainders).\n dt = dt.narrow(0, 0, nbatch * bsz)\n # Evenly divide the data across the bsz batches.\n dt = dt.view(bsz, -1).t().contiguous()\n if arguments.cuda:\n dt = dt.cuda()\n return dt\n\n\ndef repackage_hidden(h):\n \"\"\"Wraps hidden states in new Variables, to detach them from their history.\"\"\"\n if type(h) == Variable:\n return Variable(h.data)\n else:\n return tuple(repackage_hidden(v) for v in h)\n\n\ndef get_batch(source, i):\n seq_len = min(args.bptt, len(source) - 1 - i)\n dt = Variable(source[i:i + seq_len])\n target = Variable(source[i + 1:i + 1 + seq_len].view(-1))\n return dt, target\n\n\ndef get_eos(arguments):\n # chr: define end of sentence index\n if arguments.cuda:\n eos_tensor = torch.cuda.LongTensor([corpus.dictionary.word2idx['<eos>']])\n else:\n eos_tensor = torch.LongTensor([corpus.dictionary.word2idx['<eos>']])\n return eos_tensor\n\n\ndef forward_model(rnn, sequence, types, args):\n # Initialise hidden state of PLM to zeros for new sequence\n hidden_true = rnn.init_hidden(1)\n\n # List of probability distributions over w_t and w_t1\n w_t_list = []\n w_t1_list = []\n\n # For each word in the sentence (starting from <sos>)\n for item in sequence:\n # Reformat item (technicality)\n if args.cuda:\n item = torch.cuda.LongTensor([[int(item)]])\n else:\n item = torch.LongTensor([[int(item)]])\n\n # Model current item.\n # This is returns the \"true\" output / hidden states corresponding\n # to the actually occuring items in the stimuli\n output_true, hidden_true = rnn(item, hidden_true)\n\n # Collect current P(w_t|w_1..._t-1) probability distribution\n p_wt_dist = softmax(output_true, dim=-1).data[0][0]\n\n # For P(w_t+1|w_1...t):\n # Allocate array with vocabulary size as rows and columns\n if args.cuda:\n probs = torch.cuda.FloatTensor(np.empty([types, types]))\n else:\n probs = torch.FloatTensor(np.empty([types, types]))\n\n # For each possible possible w_t\n for wt in range(0, types):\n # Select probability of current w_t\n p_wt = p_wt_dist[wt]\n\n # Run using current wt and rnn hidden state produced after last true item\n if args.cuda:\n output_wt1, hidden_wt1 = rnn(torch.cuda.LongTensor([[wt]]), hidden_true)\n else:\n output_wt1, hidden_wt1 = rnn(torch.LongTensor([[wt]]), hidden_true)\n\n # Collect current P(w_t+1|w_1...t) distribution\n p_wt1_dist = softmax(output_wt1, dim=-1).data[0][0]\n\n # Enter as column into matrix\n # Each cell is the probability of the j-th w_t1\n # multiplied by the prob of the current w_t\n probs[:, wt] = p_wt1_dist * p_wt\n\n # Compute sum per row, leaving a single vector with\n # one probability per possible w_t1\n p_wt1_dist = probs.sum(dim=1)\n\n # Append to output lists\n w_t_list.append(p_wt_dist)\n w_t1_list.append(p_wt1_dist)\n return w_t_list, w_t1_list\n\n\ndef average_sent(sentence_output):\n average_dist = []\n num_words = len(sentence_output[0])\n for word_outer in range(0, num_words):\n word_dists = []\n for model in sentence_output:\n word_dists.append(model[word_outer])\n word_avg = sum(word_dists)/len(word_dists)\n average_dist.append(word_avg)\n return average_dist\n\n\ndef compute_surprisal(dists, seq_targets):\n # accept a list of probability distributions and the indices of the correct word and compute surprisal for each word\n sent_surprisal = []\n for target, prob in zip(seq_targets.data, dists):\n sent_surprisal.append([corpus.dictionary.idx2word[target], round(log(float((prob[target]))), 4) * -1])\n return sent_surprisal\n\n\ndef compute_kld(dist_nextword, dist_plusone, seq_targets):\n # accept two lists of probability distributions and compute the Kullback-Leibler Divergence for each pair\n del dist_nextword[0]\n del dist_plusone[-1]\n # We can't compute the KLD for the first words of sentence\n sent_kld = [[corpus.dictionary.idx2word[seq_targets[0]], None]]\n seq_targets = seq_targets[1:]\n for dist_nw, dist_po, target in zip(dist_nextword, dist_plusone, seq_targets):\n cross_entropy = -sum(dist_nw * dist_po.log())\n plusone_entropy = -sum(dist_nw * dist_nw.log())\n kld = cross_entropy - plusone_entropy\n sent_kld.append([corpus.dictionary.idx2word[target], round(kld.item(), 4)])\n return sent_kld\n\n\ndef add_to_df(input_lol, dataframe, snap, metric_name):\n # Clean up sentences: remove commas and words with clitics (ensures equal lengths of sentences)\n for i_index, sentence in enumerate(input_lol):\n for j_index, word in enumerate(sentence):\n if word[0] == ',':\n del (input_lol[i_index][j_index])\n elif '\\'' in word[0]:\n del (input_lol[i_index][j_index])\n # Add metrics to new column\n new_col = []\n for row in dataframe.iterrows():\n word_value = input_lol[row[1][0] - 1][row[1][1] - 1]\n word = row[1][2].lower()\n word = word.strip(',')\n if word_value[0] == word and word_value[1] is not None:\n new_col.append(float(word_value[1]))\n else:\n new_col.append(None)\n assert len(df) == len(new_col)\n dataframe[metric_name + '_' + snap] = new_col\n return dataframe\n\n\ndef evaluate(surprisal_values):\n N = 0\n Psum = 0\n\n for surp in surprisal_values:\n if isnan(surp):\n pass\n else:\n N += 1\n Psum += -surp\n print(\"Evaluated: Perplexity {}\".format(exp(-1 / N * Psum)))\n return exp(-1 / N * Psum)\n\n\ndef store_eval(wt_perf, wt1_perf):\n output = pandas.DataFrame()\n output['snapshots'] = ['1K', '3K', '10K', '30K', '100K', '300K', '1M', '3M', '6.47M']\n output['wt'] = wt_perf\n output['wt1'] = wt1_perf\n output.to_csv('srp_entr/PLM_ppl.csv')\n print(output)\n\nif __name__ == \"__main__\":\n # Parse command line arguments\n args = parse_args()\n\n # Notify user if a cuda decive could be used\n if torch.cuda.is_available():\n if not args.cuda:\n print(\"WARNING: You have a CUDA device, so you should probably run with --cuda\")\n\n #####################################\n # Define all project specific input #\n #####################################\n # Define the names of the snapshots and the directories where they are found\n snapshot_names = ['_1000_', '_3000_', '_10000_', '_30000_', '_100000_', '_300000_', '_1000000_',\n '_3000000_', '_6470000_']\n # We are only select LSTM models (in case other matching models are in dir)\n snapshot_names = ['LSTM' + name for name in snapshot_names]\n # Shortened names used for output\n snapshot_output_names = ['1k', '3k', '10k', '30k', '100k', '300k', '1M', '3M', '6-47M']\n\n # Directories where the models are stored (normal LM = nextword; two steps ahead = plusone)\n nextword_dir = './output/'\n # Get full paths\n nextword_paths = get_paths(nextword_dir, snapshot_names)\n\n # Prepare output file\n outfile_path = './srp_entr/SRP_DKL_snapshots.txt'\n df = prepare_outfile(outfile_path)\n\n # Load corpus into memory (requires data.py)\n print('Loading corpus from {}'.format(args.data))\n corpus = data.Corpus(args.data, args.bptt)\n ntypes = len(corpus.dictionary)\n eos = get_eos(args)\n test_data = batchify(corpus.test, bsz=1, arguments=args)\n\n # Evaluation output\n wt_ppl = []\n wt1_ppl = []\n ######################################\n # Loop through snapshots, sentences #\n # Compute surprisal, KLD #\n # Add data to file for each snapshot #\n ######################################\n for snap_name, snap_out_name in zip(snapshot_names, snapshot_output_names):\n snap_paths_nextword = [path for path in nextword_paths if snap_name in path]\n\n sent_counter = 0\n wt_surprisal_list = []\n KLD_list = []\n wt1_surprisal_list = []\n for x in range(0, test_data.size(0) - 1, args.bptt):\n data, targets = get_batch(test_data, x)\n # chr: cut off data at end of sentence\n for j in range(len(data)):\n if (int(data[j].data) == int(eos)) is True:\n sent_counter += 1\n data = data[:j, :1]\n\n # normal targets\n wt_targets = targets[:j]\n # targets for wt1\n wt1_targets = targets[1:j]\n break\n\n # Wt list\n wt_list = []\n wt1_list = []\n\n # Forward modeling of two steps ahead probability\n for model_path in snap_paths_nextword:\n with open(model_path, 'rb') as f:\n rnn_model = torch.load(f)\n rnn_model.eval()\n wt_seq, wt1_seq = forward_model(rnn_model, data, ntypes, args) # return two lists of dists\n wt_list.append(wt_seq)\n wt1_list.append(wt1_seq)\n\n wt_avg = average_sent(wt_list)\n wt1_avg = average_sent(wt1_list)\n\n # For each word in the current sentence get surprisal and KLD\n # Compute surprisal for each word wt\n wt_surprisal_list.append(compute_surprisal(wt_avg, wt_targets))\n # Compute KLD for each word (targets used to make word step identifiable)\n KLD_list.append(compute_kld(wt_avg, wt1_avg, wt_targets))\n # Compute surprisal for each word wt1 (only for evaluation purposes)\n wt1_surprisal_list.append(compute_surprisal(wt1_avg, wt1_targets))\n print(\"Classified {} sentences at snapshot {}\".format(sent_counter, snap_out_name), end='\\r')\n\n # Add surprisal to output\n df = add_to_df(wt_surprisal_list, df, snap_out_name, metric_name='srp')\n df = add_to_df(KLD_list, df, snap_out_name, metric_name='KLD')\n df.to_csv(outfile_path, sep='\\t', index=False)\n\n # List surprisal for evaluation\n wt_ppl.append(evaluate([y for x in wt_surprisal_list for y in x]))\n wt1_ppl.append(evaluate([y for x in wt1_surprisal_list for y in x]))\n\n # Store evaluation output\n store_eval(wt_ppl, wt1_ppl)\n" ]
[ [ "torch.LongTensor", "pandas.read_csv", "torch.nn.functional.softmax", "torch.load", "torch.cuda.LongTensor", "pandas.DataFrame", "torch.cuda.is_available", "numpy.empty", "torch.autograd.Variable" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [], "tensorflow": [] } ]
samuro95/GSPnP
[ "1aaabf24d2912135da0bdb89cad1cd0846f9649e" ]
[ "PnP_restoration/GS_PnP_restoration.py" ]
[ "import os\nimport cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom utils import utils_sr\nimport torch\nfrom argparse import ArgumentParser\nfrom utils.utils_restoration import rgb2y, psnr, array2tensor, tensor2array\nimport sys\nfrom matplotlib.ticker import MaxNLocator\n\n\nclass PnP_restoration():\n\n def __init__(self, hparams):\n\n self.hparams = hparams\n self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n self.initialize_cuda_denoiser()\n\n def initialize_cuda_denoiser(self):\n '''\n Initialize the denoiser model with the given pretrained ckpt\n '''\n sys.path.append('../GS_denoising/')\n from lightning_GSDRUNet import GradMatch\n parser2 = ArgumentParser(prog='utils_restoration.py')\n parser2 = GradMatch.add_model_specific_args(parser2)\n parser2 = GradMatch.add_optim_specific_args(parser2)\n hparams = parser2.parse_known_args()[0]\n hparams.act_mode = self.hparams.act_mode_denoiser\n self.denoiser_model = GradMatch(hparams)\n checkpoint = torch.load(self.hparams.pretrained_checkpoint, map_location=self.device)\n self.denoiser_model.load_state_dict(checkpoint['state_dict'])\n self.denoiser_model.eval()\n for i, v in self.denoiser_model.named_parameters():\n v.requires_grad = False\n self.denoiser_model = self.denoiser_model.to(self.device)\n if self.hparams.precision == 'double' :\n if self.denoiser_model is not None:\n self.denoiser_model.double()\n\n def initialize_prox(self, img, degradation):\n '''\n calculus for future prox computatations\n :param img: degraded image\n :param degradation: 2D blur kernel for deblurring and SR, mask for inpainting\n '''\n if self.hparams.degradation_mode == 'deblurring':\n self.k = degradation\n self.k_tensor = array2tensor(np.expand_dims(self.k, 2)).double().to(self.device)\n self.FB, self.FBC, self.F2B, self.FBFy = utils_sr.pre_calculate(img, self.k_tensor, 1)\n elif self.hparams.degradation_mode == 'SR':\n self.k = degradation\n self.k_tensor = array2tensor(np.expand_dims(self.k, 2)).double().to(self.device)\n self.FB, self.FBC, self.F2B, self.FBFy = utils_sr.pre_calculate(img, self.k_tensor, self.hparams.sf)\n elif self.hparams.degradation_mode == 'inpainting':\n self.M = array2tensor(degradation).double().to(self.device)\n self.My = self.M*img\n else:\n print('degradation mode not treated')\n\n def calculate_prox(self, img):\n '''\n Calculation of the proximal mapping of the data term f\n :param img: input for the prox\n :return: prox_f(img)\n '''\n if self.hparams.degradation_mode == 'deblurring':\n rho = torch.tensor([1/self.tau]).double().repeat(1, 1, 1, 1).to(self.device)\n px = utils_sr.data_solution(img.double(), self.FB, self.FBC, self.F2B, self.FBFy, rho, 1)\n elif self.hparams.degradation_mode == 'SR':\n rho = torch.tensor([1 / self.tau]).double().repeat(1, 1, 1, 1).to(self.device)\n px = utils_sr.data_solution(img.double(), self.FB, self.FBC, self.F2B, self.FBFy, rho, self.hparams.sf)\n elif self.hparams.degradation_mode == 'inpainting':\n if self.hparams.noise_level_img > 1e-2:\n px = (self.tau*self.My + img)/(self.tau*self.M+1)\n else :\n px = self.My + (1-self.M)*img\n else:\n print('degradation mode not treated')\n return px\n\n def calculate_F(self,x,s,img):\n '''\n Calculation of the objective function value f + lamb*s\n :param x: Point where to evaluate F\n :param s: Precomputed regularization function value\n :param img: Degraded image\n :return: F(x)\n '''\n if self.hparams.degradation_mode == 'deblurring':\n deg_x = utils_sr.imfilter(x.double(),self.k_tensor[0].double().flip(1).flip(2).expand(3,-1,-1,-1))\n F = 0.5 * torch.norm(img - deg_x, p=2) ** 2 + self.hparams.lamb * s\n elif self.hparams.degradation_mode == 'SR':\n deg_x = utils_sr.imfilter(x.double(), self.k_tensor[0].double().flip(1).flip(2).expand(3, -1, -1, -1))\n deg_x = deg_x[...,0::self.hparams.sf, 0::self.hparams.sf]\n F = 0.5 * torch.norm(img - deg_x, p=2) ** 2 + self.hparams.lamb * s\n elif self.hparams.degradation_mode == 'inpainting':\n deg_x = self.M*x.double()\n F = 0.5*torch.norm(img - deg_x, p=2) ** 2 + self.hparams.lamb * s\n else :\n print('degradation not implemented')\n return F.item()\n\n def restore(self, img, clean_img, degradation,extract_results=False):\n '''\n Compute GS-PnP restoration algorithm\n :param img: Degraded image\n :param clean_img: ground-truth clean image\n :param degradation: 2D blur kernel for deblurring and SR, mask for inpainting\n :param extract_results: Extract information for subsequent image or curve saving\n '''\n\n if extract_results:\n z_list, x_list, Dx_list, psnr_tab, s_list, Ds_list, F_list = [], [], [], [], [], [], []\n\n # initalize parameters\n if self.hparams.tau is not None:\n self.tau = self.hparams.tau\n else:\n self.tau = 1 / self.hparams.lamb\n\n i = 0 # iteration counter\n\n img_tensor = array2tensor(img).to(self.device) # for GPU computations (if GPU available)\n self.initialize_prox(img_tensor, degradation) # prox calculus that can be done outside of the loop\n\n # Initialization of the algorithm\n if self.hparams.degradation_mode == 'SR' :\n x0 = cv2.resize(img, (img.shape[1] * self.hparams.sf, img.shape[0] * self.hparams.sf),interpolation=cv2.INTER_CUBIC)\n x0 = utils_sr.shift_pixel(x0, self.hparams.sf)\n x0 = array2tensor(x0).to(self.device)\n else:\n x0 = img_tensor\n x0 = self.calculate_prox(x0)\n\n if extract_results: # extract np images and PSNR values\n out_x = tensor2array(x0.cpu())\n current_x_psnr = psnr(clean_img, out_x)\n if self.hparams.print_each_step:\n print('current x PSNR : ', current_x_psnr)\n psnr_tab.append(current_x_psnr)\n x_list.append(out_x)\n\n x = x0\n\n diff_F = 1\n F_old = 1\n self.relative_diff_F_min = self.hparams.relative_diff_F_min\n\n while i < self.hparams.maxitr and abs(diff_F)/F_old > self.relative_diff_F_min:\n\n if self.hparams.inpainting_init :\n if i < self.hparams.n_init:\n self.sigma_denoiser = 50\n self.relative_diff_F_min = 0\n else :\n self.sigma_denoiser = self.hparams.sigma_denoiser\n self.relative_diff_F_min = self.hparams.relative_diff_F_min\n else :\n self.sigma_denoiser = self.hparams.sigma_denoiser\n\n x_old = x\n\n #Denoising of x_old and calculation of F_old\n Ds, f = self.denoiser_model.calculate_grad(x_old, self.sigma_denoiser / 255.)\n Ds = Ds.detach()\n f = f.detach()\n Dx = x_old - self.denoiser_model.hparams.weight_Ds * Ds\n s_old = 0.5 * (torch.norm(x_old.double() - f.double(), p=2) ** 2)\n F_old = self.calculate_F(x_old, s_old, img_tensor)\n\n backtracking_check = False\n\n while not backtracking_check:\n\n # Gradient step\n z = (1 - self.hparams.lamb * self.tau) * x_old + self.hparams.lamb * self.tau * Dx\n\n # Proximal step\n x = self.calculate_prox(z)\n\n # Calculation of Fnew\n f = self.denoiser_model.calculate_grad(x, self.sigma_denoiser / 255.)[1]\n f = f.detach()\n s = 0.5 * (torch.norm(x.double() - f.double(), p=2) ** 2)\n F_new = self.calculate_F(x,s,img_tensor)\n\n # Backtracking\n diff_x = (torch.norm(x - x_old, p=2) ** 2).item()\n diff_F = F_old - F_new\n if self.hparams.degradation_mode == 'inpainting':\n diff_F = 1\n F_old = 1\n if self.hparams.use_backtracking and diff_F < (self.hparams.gamma / self.tau) * diff_x and abs(diff_F)/F_old > self.relative_diff_F_min:\n backtracking_check = False\n self.tau = self.hparams.eta_tau * self.tau\n x = x_old\n else:\n backtracking_check = True\n\n # Logging\n if extract_results:\n out_z = tensor2array(z.cpu())\n out_x = tensor2array(x.cpu())\n current_z_psnr = psnr(clean_img, out_z)\n current_x_psnr = psnr(clean_img, out_x)\n if self.hparams.print_each_step:\n print('iteration : ', i)\n print('current z PSNR : ', current_z_psnr)\n print('current x PSNR : ', current_x_psnr)\n x_list.append(out_x)\n z_list.append(out_z)\n Dx_list.append(tensor2array(Dx.cpu()))\n Ds_list.append(torch.norm(Ds).cpu().item())\n s_list.append(s.cpu().item())\n F_list.append(F_new)\n psnr_tab.append(current_x_psnr)\n\n i += 1 # next iteration\n\n # post-processing gradient step\n if extract_results:\n Ds, f = self.denoiser_model.calculate_grad(x, self.sigma_denoiser / 255.)\n Ds = Ds.detach()\n f = f.detach()\n Dx = x - self.denoiser_model.hparams.weight_Ds * Ds.detach()\n s = 0.5 * (torch.norm(x.double() - f.double(), p=2) ** 2)\n else:\n Ds, _ = self.denoiser_model.calculate_grad(x, self.sigma_denoiser / 255.)\n Ds = Ds.detach()\n Dx = x - self.denoiser_model.hparams.weight_Ds * Ds\n\n z = (1 - self.hparams.lamb * self.tau) * x + self.hparams.lamb * self.tau * Dx\n\n if self.hparams.degradation_mode == 'inpainting':\n output_img = tensor2array(x.cpu())\n else :\n output_img = tensor2array(z.cpu())\n\n output_psnr = psnr(clean_img, output_img)\n output_psnrY = psnr(rgb2y(clean_img), rgb2y(output_img))\n\n if extract_results:\n if self.hparams.print_each_step:\n print('current z PSNR : ', output_psnr)\n z_list.append(tensor2array(z.cpu()))\n Dx_list.append(tensor2array(Dx.cpu()))\n Ds_list.append(torch.norm(Ds).cpu().item())\n s_list.append(s.cpu().item())\n return output_img, output_psnr, output_psnrY, np.array(x_list), np.array(z_list), np.array(Dx_list), np.array(psnr_tab), np.array(Ds_list), np.array(s_list), np.array(F_list)\n else:\n return output_img, output_psnr, output_psnrY\n\n def initialize_curves(self):\n\n self.rprox = []\n self.prox = []\n self.conv = []\n self.lip_algo = []\n self.lip_D = []\n self.PSNR = []\n self.s = []\n self.Ds = []\n self.F = []\n\n def update_curves(self, x_list, z_list, Dx_list, psnr_tab, Ds_list, s_list, F_list):\n\n prox_list = x_list\n self.F.append(F_list)\n self.s.append(s_list)\n self.Ds.append(Ds_list)\n self.prox.append(np.sqrt(np.array([np.sum(np.abs(prox_list[i + 1] - prox_list[i]) ** 2) for i in range(len(x_list[:-1]))]) / np.array([np.sum(np.abs(z_list[i + 1] - z_list[i]) ** 2) for i in range(len(z_list[:-1]))])))\n rprox_list = 2 * prox_list - z_list\n self.rprox.append(np.sqrt(np.array([np.sum(np.abs(rprox_list[i + 1] - rprox_list[i]) ** 2) for i in range(len(rprox_list[:-1]))]) / np.array([np.sum(np.abs(z_list[i + 1] - z_list[i]) ** 2) for i in range(len(rprox_list[:-1]))])))\n self.conv.append(np.array([np.sum(np.abs(x_list[k + 1] - x_list[k]) ** 2) for k in range(len(x_list) - 1)]) / np.sum(np.abs(x_list[0]) ** 2))\n self.lip_algo.append(np.sqrt(np.array([np.sum(np.abs(x_list[k + 1] - x_list[k]) ** 2) for k in range(1, len(x_list) - 1)]) / np.array([np.sum(np.abs(x_list[k] - x_list[k - 1]) ** 2) for k in range(1, len(x_list[:-1]))])))\n self.lip_D.append(np.sqrt(np.array([np.sum(np.abs(Dx_list[i + 1] - Dx_list[i]) ** 2) for i in range(len(Dx_list) - 1)]) / np.array([np.sum(np.abs(x_list[i + 1] - x_list[i]) ** 2) for i in range(len(x_list) - 1)])))\n self.PSNR.append(psnr_tab)\n\n def save_curves(self, save_path):\n\n import matplotlib\n matplotlib.rcParams.update({'font.size': 15})\n\n plt.figure(1)\n fig, ax = plt.subplots()\n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False)\n for i in range(len(self.PSNR)):\n plt.plot(self.PSNR[i], '*', label='im_' + str(i))\n plt.legend()\n plt.grid()\n plt.savefig(os.path.join(save_path, 'PSNR.png'))\n\n plt.figure(2)\n fig, ax = plt.subplots()\n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False)\n for i in range(len(self.F)):\n plt.plot(self.F[i], '-o', markersize=10)\n ax.xaxis.set_major_locator(MaxNLocator(integer=True))\n plt.savefig(os.path.join(save_path, 'F.png'))\n\n plt.figure(3)\n fig, ax = plt.subplots()\n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False)\n for i in range(len(self.conv)):\n plt.plot(self.conv[i], '-o', markersize=10)\n plt.semilogy()\n ax.xaxis.set_major_locator(MaxNLocator(integer=True))\n plt.savefig(os.path.join(save_path, 'conv_log.png'), bbox_inches=\"tight\")\n\n self.conv2 = [[np.min(self.conv[i][:k]) for k in range(1, len(self.conv[i]))] for i in range(len(self.conv))]\n conv_rate = [self.conv2[i][0]*np.array([(1/k) for k in range(1,len(self.conv2[i]))]) for i in range(len(self.conv2))]\n\n plt.figure(4)\n fig, ax = plt.subplots()\n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False)\n for i in range(len(self.conv2)):\n plt.plot(self.conv2[i], '-o', markersize=10, label='GS-PnP')\n plt.plot(conv_rate[i], '--', color='red', label=r'$\\mathcal{O}(\\frac{1}{K})$')\n plt.semilogy()\n plt.legend()\n ax.xaxis.set_major_locator(MaxNLocator(integer=True))\n plt.savefig(os.path.join(save_path, 'conv_log2.png'), bbox_inches=\"tight\")\n\n plt.figure(5)\n fig, ax = plt.subplots()\n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False)\n for i in range(len(self.lip_algo)):\n plt.plot(self.lip_algo[i], '-o', label='im_' + str(i))\n ax.xaxis.set_major_locator(MaxNLocator(integer=True))\n plt.grid()\n plt.savefig(os.path.join(save_path, 'lip_algo.png'))\n\n plt.figure(6)\n for i in range(len(self.lip_D)):\n plt.plot(self.lip_D[i], '-o', label='im_' + str(i))\n ax.xaxis.set_major_locator(MaxNLocator(integer=True))\n plt.grid()\n plt.savefig(os.path.join(save_path, 'lip_D.png'))\n\n\n def add_specific_args(parent_parser):\n parser = ArgumentParser(parents=[parent_parser], add_help=False)\n parser.add_argument('--denoiser_name', type=str, default='GS-DRUNet')\n parser.add_argument('--dataset_path', type=str, default='../datasets')\n parser.add_argument('--pretrained_checkpoint', type=str,default='../GS_denoising/ckpts/GSDRUNet.ckpt')\n parser.add_argument('--PnP_algo', type=str, default='HQS')\n parser.add_argument('--dataset_name', type=str, default='CBSD10')\n parser.add_argument('--sigma_denoiser', type=float)\n parser.add_argument('--noise_level_img', type=float, default=2.55)\n parser.add_argument('--maxitr', type=int, default=400)\n parser.add_argument('--lamb', type=float, default=0.1)\n parser.add_argument('--tau', type=float)\n parser.add_argument('--n_images', type=int, default=68)\n parser.add_argument('--weight_Ds', type=float, default=1.)\n parser.add_argument('--eta_tau', type=float, default=0.9)\n parser.add_argument('--gamma', type=float, default=0.1)\n parser.add_argument('--no_use_backtracking', dest='use_backtracking', action='store_false')\n parser.set_defaults(use_backtracking=True)\n parser.add_argument('--relative_diff_F_min', type=float, default=1e-6)\n parser.add_argument('--inpainting_init', dest='inpainting_init', action='store_true')\n parser.set_defaults(inpainting_init=False)\n parser.add_argument('--precision', type=str, default='simple')\n parser.add_argument('--n_init', type=int, default=10)\n parser.add_argument('--patch_size', type=int, default=256)\n parser.add_argument('--extract_curves', dest='extract_curves', action='store_true')\n parser.set_defaults(extract_curves=False)\n parser.add_argument('--extract_images', dest='extract_images', action='store_true')\n parser.set_defaults(extract_images=False)\n parser.add_argument('--print_each_step', dest='print_each_step', action='store_true')\n parser.set_defaults(print_each_step=False)\n parser.add_argument('--act_mode_denoiser', type=str, default='E')\n return parser\n" ]
[ [ "matplotlib.pyplot.legend", "matplotlib.pyplot.semilogy", "torch.norm", "numpy.expand_dims", "numpy.abs", "torch.load", "numpy.min", "matplotlib.pyplot.subplots", "torch.tensor", "matplotlib.pyplot.plot", "matplotlib.rcParams.update", "matplotlib.ticker.MaxNLocator", "matplotlib.pyplot.grid", "torch.cuda.is_available", "numpy.array", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
12190143/transformers
[ "ab90353f1abfd15f8d21f99395658d060679a08c", "ab90353f1abfd15f8d21f99395658d060679a08c" ]
[ "examples/mm-imdb/run_mmimdb.py", "src/transformers/modeling_xlm.py" ]
[ "# coding=utf-8\n# Copyright (c) Facebook, Inc. and its affiliates.\n# Copyright (c) HuggingFace Inc. team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\" Finetuning the library models for multimodal multiclass prediction on MM-IMDB dataset.\"\"\"\n\n\nimport argparse\nimport glob\nimport json\nimport logging\nimport os\nimport random\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom sklearn.metrics import f1_score\nfrom torch.utils.data import DataLoader, RandomSampler, SequentialSampler\nfrom torch.utils.data.distributed import DistributedSampler\nfrom tqdm import tqdm, trange\n\nfrom transformers import (\n WEIGHTS_NAME,\n AdamW,\n AlbertConfig,\n AlbertModel,\n AlbertTokenizer,\n BertConfig,\n BertModel,\n BertTokenizer,\n DistilBertConfig,\n DistilBertModel,\n DistilBertTokenizer,\n MMBTConfig,\n MMBTForClassification,\n RobertaConfig,\n RobertaModel,\n RobertaTokenizer,\n XLMConfig,\n XLMModel,\n XLMTokenizer,\n XLNetConfig,\n XLNetModel,\n XLNetTokenizer,\n get_linear_schedule_with_warmup,\n)\nfrom utils_mmimdb import ImageEncoder, JsonlDataset, collate_fn, get_image_transforms, get_mmimdb_labels\n\n\ntry:\n from torch.utils.tensorboard import SummaryWriter\nexcept ImportError:\n from tensorboardX import SummaryWriter\n\n\nlogger = logging.getLogger(__name__)\n\nALL_MODELS = sum(\n (\n tuple(conf.pretrained_config_archive_map.keys())\n for conf in (BertConfig, XLNetConfig, XLMConfig, RobertaConfig, DistilBertConfig)\n ),\n (),\n)\n\nMODEL_CLASSES = {\n \"bert\": (BertConfig, BertModel, BertTokenizer),\n \"xlnet\": (XLNetConfig, XLNetModel, XLNetTokenizer),\n \"xlm\": (XLMConfig, XLMModel, XLMTokenizer),\n \"roberta\": (RobertaConfig, RobertaModel, RobertaTokenizer),\n \"distilbert\": (DistilBertConfig, DistilBertModel, DistilBertTokenizer),\n \"albert\": (AlbertConfig, AlbertModel, AlbertTokenizer),\n}\n\n\ndef set_seed(args):\n random.seed(args.seed)\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n if args.n_gpu > 0:\n torch.cuda.manual_seed_all(args.seed)\n\n\ndef train(args, train_dataset, model, tokenizer, criterion):\n \"\"\" Train the model \"\"\"\n if args.local_rank in [-1, 0]:\n tb_writer = SummaryWriter()\n\n args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)\n train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)\n train_dataloader = DataLoader(\n train_dataset,\n sampler=train_sampler,\n batch_size=args.train_batch_size,\n collate_fn=collate_fn,\n num_workers=args.num_workers,\n )\n\n if args.max_steps > 0:\n t_total = args.max_steps\n args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1\n else:\n t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs\n\n # Prepare optimizer and schedule (linear warmup and decay)\n no_decay = [\"bias\", \"LayerNorm.weight\"]\n optimizer_grouped_parameters = [\n {\n \"params\": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],\n \"weight_decay\": args.weight_decay,\n },\n {\"params\": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], \"weight_decay\": 0.0},\n ]\n\n optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)\n scheduler = get_linear_schedule_with_warmup(\n optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total\n )\n if args.fp16:\n try:\n from apex import amp\n except ImportError:\n raise ImportError(\"Please install apex from https://www.github.com/nvidia/apex to use fp16 training.\")\n model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)\n\n # multi-gpu training (should be after apex fp16 initialization)\n if args.n_gpu > 1:\n model = torch.nn.DataParallel(model)\n\n # Distributed training (should be after apex fp16 initialization)\n if args.local_rank != -1:\n model = torch.nn.parallel.DistributedDataParallel(\n model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True\n )\n\n # Train!\n logger.info(\"***** Running training *****\")\n logger.info(\" Num examples = %d\", len(train_dataset))\n logger.info(\" Num Epochs = %d\", args.num_train_epochs)\n logger.info(\" Instantaneous batch size per GPU = %d\", args.per_gpu_train_batch_size)\n logger.info(\n \" Total train batch size (w. parallel, distributed & accumulation) = %d\",\n args.train_batch_size\n * args.gradient_accumulation_steps\n * (torch.distributed.get_world_size() if args.local_rank != -1 else 1),\n )\n logger.info(\" Gradient Accumulation steps = %d\", args.gradient_accumulation_steps)\n logger.info(\" Total optimization steps = %d\", t_total)\n\n global_step = 0\n tr_loss, logging_loss = 0.0, 0.0\n best_f1, n_no_improve = 0, 0\n model.zero_grad()\n train_iterator = trange(int(args.num_train_epochs), desc=\"Epoch\", disable=args.local_rank not in [-1, 0])\n set_seed(args) # Added here for reproductibility\n for _ in train_iterator:\n epoch_iterator = tqdm(train_dataloader, desc=\"Iteration\", disable=args.local_rank not in [-1, 0])\n for step, batch in enumerate(epoch_iterator):\n model.train()\n batch = tuple(t.to(args.device) for t in batch)\n labels = batch[5]\n inputs = {\n \"input_ids\": batch[0],\n \"input_modal\": batch[2],\n \"attention_mask\": batch[1],\n \"modal_start_tokens\": batch[3],\n \"modal_end_tokens\": batch[4],\n }\n outputs = model(**inputs)\n logits = outputs[0] # model outputs are always tuple in transformers (see doc)\n loss = criterion(logits, labels)\n\n if args.n_gpu > 1:\n loss = loss.mean() # mean() to average on multi-gpu parallel training\n if args.gradient_accumulation_steps > 1:\n loss = loss / args.gradient_accumulation_steps\n\n if args.fp16:\n with amp.scale_loss(loss, optimizer) as scaled_loss:\n scaled_loss.backward()\n else:\n loss.backward()\n\n tr_loss += loss.item()\n if (step + 1) % args.gradient_accumulation_steps == 0:\n if args.fp16:\n torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)\n else:\n torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)\n\n optimizer.step()\n scheduler.step() # Update learning rate schedule\n model.zero_grad()\n global_step += 1\n\n if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:\n logs = {}\n if (\n args.local_rank == -1 and args.evaluate_during_training\n ): # Only evaluate when single GPU otherwise metrics may not average well\n results = evaluate(args, model, tokenizer, criterion)\n for key, value in results.items():\n eval_key = \"eval_{}\".format(key)\n logs[eval_key] = value\n\n loss_scalar = (tr_loss - logging_loss) / args.logging_steps\n learning_rate_scalar = scheduler.get_lr()[0]\n logs[\"learning_rate\"] = learning_rate_scalar\n logs[\"loss\"] = loss_scalar\n logging_loss = tr_loss\n\n for key, value in logs.items():\n tb_writer.add_scalar(key, value, global_step)\n print(json.dumps({**logs, **{\"step\": global_step}}))\n\n if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0:\n # Save model checkpoint\n output_dir = os.path.join(args.output_dir, \"checkpoint-{}\".format(global_step))\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n model_to_save = (\n model.module if hasattr(model, \"module\") else model\n ) # Take care of distributed/parallel training\n torch.save(model_to_save.state_dict(), os.path.join(output_dir, WEIGHTS_NAME))\n torch.save(args, os.path.join(output_dir, \"training_args.bin\"))\n logger.info(\"Saving model checkpoint to %s\", output_dir)\n\n if args.max_steps > 0 and global_step > args.max_steps:\n epoch_iterator.close()\n break\n if args.max_steps > 0 and global_step > args.max_steps:\n train_iterator.close()\n break\n\n if args.local_rank == -1:\n results = evaluate(args, model, tokenizer, criterion)\n if results[\"micro_f1\"] > best_f1:\n best_f1 = results[\"micro_f1\"]\n n_no_improve = 0\n else:\n n_no_improve += 1\n\n if n_no_improve > args.patience:\n train_iterator.close()\n break\n\n if args.local_rank in [-1, 0]:\n tb_writer.close()\n\n return global_step, tr_loss / global_step\n\n\ndef evaluate(args, model, tokenizer, criterion, prefix=\"\"):\n # Loop to handle MNLI double evaluation (matched, mis-matched)\n eval_output_dir = args.output_dir\n eval_dataset = load_examples(args, tokenizer, evaluate=True)\n\n if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]:\n os.makedirs(eval_output_dir)\n\n args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)\n # Note that DistributedSampler samples randomly\n eval_sampler = SequentialSampler(eval_dataset)\n eval_dataloader = DataLoader(\n eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size, collate_fn=collate_fn\n )\n\n # multi-gpu eval\n if args.n_gpu > 1 and not isinstance(model, torch.nn.DataParallel):\n model = torch.nn.DataParallel(model)\n\n # Eval!\n logger.info(\"***** Running evaluation {} *****\".format(prefix))\n logger.info(\" Num examples = %d\", len(eval_dataset))\n logger.info(\" Batch size = %d\", args.eval_batch_size)\n eval_loss = 0.0\n nb_eval_steps = 0\n preds = None\n out_label_ids = None\n for batch in tqdm(eval_dataloader, desc=\"Evaluating\"):\n model.eval()\n batch = tuple(t.to(args.device) for t in batch)\n\n with torch.no_grad():\n batch = tuple(t.to(args.device) for t in batch)\n labels = batch[5]\n inputs = {\n \"input_ids\": batch[0],\n \"input_modal\": batch[2],\n \"attention_mask\": batch[1],\n \"modal_start_tokens\": batch[3],\n \"modal_end_tokens\": batch[4],\n }\n outputs = model(**inputs)\n logits = outputs[0] # model outputs are always tuple in transformers (see doc)\n tmp_eval_loss = criterion(logits, labels)\n eval_loss += tmp_eval_loss.mean().item()\n nb_eval_steps += 1\n if preds is None:\n preds = torch.sigmoid(logits).detach().cpu().numpy() > 0.5\n out_label_ids = labels.detach().cpu().numpy()\n else:\n preds = np.append(preds, torch.sigmoid(logits).detach().cpu().numpy() > 0.5, axis=0)\n out_label_ids = np.append(out_label_ids, labels.detach().cpu().numpy(), axis=0)\n\n eval_loss = eval_loss / nb_eval_steps\n result = {\n \"loss\": eval_loss,\n \"macro_f1\": f1_score(out_label_ids, preds, average=\"macro\"),\n \"micro_f1\": f1_score(out_label_ids, preds, average=\"micro\"),\n }\n\n output_eval_file = os.path.join(eval_output_dir, prefix, \"eval_results.txt\")\n with open(output_eval_file, \"w\") as writer:\n logger.info(\"***** Eval results {} *****\".format(prefix))\n for key in sorted(result.keys()):\n logger.info(\" %s = %s\", key, str(result[key]))\n writer.write(\"%s = %s\\n\" % (key, str(result[key])))\n\n return result\n\n\ndef load_examples(args, tokenizer, evaluate=False):\n path = os.path.join(args.data_dir, \"dev.jsonl\" if evaluate else \"train.jsonl\")\n transforms = get_image_transforms()\n labels = get_mmimdb_labels()\n dataset = JsonlDataset(path, tokenizer, transforms, labels, args.max_seq_length - args.num_image_embeds - 2)\n return dataset\n\n\ndef main():\n parser = argparse.ArgumentParser()\n\n # Required parameters\n parser.add_argument(\n \"--data_dir\",\n default=None,\n type=str,\n required=True,\n help=\"The input data dir. Should contain the .jsonl files for MMIMDB.\",\n )\n parser.add_argument(\n \"--model_type\",\n default=None,\n type=str,\n required=True,\n help=\"Model type selected in the list: \" + \", \".join(MODEL_CLASSES.keys()),\n )\n parser.add_argument(\n \"--model_name_or_path\",\n default=None,\n type=str,\n required=True,\n help=\"Path to pre-trained model or shortcut name selected in the list: \" + \", \".join(ALL_MODELS),\n )\n parser.add_argument(\n \"--output_dir\",\n default=None,\n type=str,\n required=True,\n help=\"The output directory where the model predictions and checkpoints will be written.\",\n )\n\n # Other parameters\n parser.add_argument(\n \"--config_name\", default=\"\", type=str, help=\"Pretrained config name or path if not the same as model_name\"\n )\n parser.add_argument(\n \"--tokenizer_name\",\n default=\"\",\n type=str,\n help=\"Pretrained tokenizer name or path if not the same as model_name\",\n )\n parser.add_argument(\n \"--cache_dir\",\n default=\"\",\n type=str,\n help=\"Where do you want to store the pre-trained models downloaded from s3\",\n )\n parser.add_argument(\n \"--max_seq_length\",\n default=128,\n type=int,\n help=\"The maximum total input sequence length after tokenization. Sequences longer \"\n \"than this will be truncated, sequences shorter will be padded.\",\n )\n parser.add_argument(\n \"--num_image_embeds\", default=1, type=int, help=\"Number of Image Embeddings from the Image Encoder\"\n )\n parser.add_argument(\"--do_train\", action=\"store_true\", help=\"Whether to run training.\")\n parser.add_argument(\"--do_eval\", action=\"store_true\", help=\"Whether to run eval on the dev set.\")\n parser.add_argument(\n \"--evaluate_during_training\", action=\"store_true\", help=\"Rul evaluation during training at each logging step.\"\n )\n parser.add_argument(\n \"--do_lower_case\", action=\"store_true\", help=\"Set this flag if you are using an uncased model.\"\n )\n\n parser.add_argument(\"--per_gpu_train_batch_size\", default=8, type=int, help=\"Batch size per GPU/CPU for training.\")\n parser.add_argument(\n \"--per_gpu_eval_batch_size\", default=8, type=int, help=\"Batch size per GPU/CPU for evaluation.\"\n )\n parser.add_argument(\n \"--gradient_accumulation_steps\",\n type=int,\n default=1,\n help=\"Number of updates steps to accumulate before performing a backward/update pass.\",\n )\n parser.add_argument(\"--learning_rate\", default=5e-5, type=float, help=\"The initial learning rate for Adam.\")\n parser.add_argument(\"--weight_decay\", default=0.0, type=float, help=\"Weight deay if we apply some.\")\n parser.add_argument(\"--adam_epsilon\", default=1e-8, type=float, help=\"Epsilon for Adam optimizer.\")\n parser.add_argument(\"--max_grad_norm\", default=1.0, type=float, help=\"Max gradient norm.\")\n parser.add_argument(\n \"--num_train_epochs\", default=3.0, type=float, help=\"Total number of training epochs to perform.\"\n )\n parser.add_argument(\"--patience\", default=5, type=int, help=\"Patience for Early Stopping.\")\n parser.add_argument(\n \"--max_steps\",\n default=-1,\n type=int,\n help=\"If > 0: set total number of training steps to perform. Override num_train_epochs.\",\n )\n parser.add_argument(\"--warmup_steps\", default=0, type=int, help=\"Linear warmup over warmup_steps.\")\n\n parser.add_argument(\"--logging_steps\", type=int, default=50, help=\"Log every X updates steps.\")\n parser.add_argument(\"--save_steps\", type=int, default=50, help=\"Save checkpoint every X updates steps.\")\n parser.add_argument(\n \"--eval_all_checkpoints\",\n action=\"store_true\",\n help=\"Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number\",\n )\n parser.add_argument(\"--no_cuda\", action=\"store_true\", help=\"Avoid using CUDA when available\")\n parser.add_argument(\"--num_workers\", type=int, default=8, help=\"number of worker threads for dataloading\")\n parser.add_argument(\n \"--overwrite_output_dir\", action=\"store_true\", help=\"Overwrite the content of the output directory\"\n )\n parser.add_argument(\n \"--overwrite_cache\", action=\"store_true\", help=\"Overwrite the cached training and evaluation sets\"\n )\n parser.add_argument(\"--seed\", type=int, default=42, help=\"random seed for initialization\")\n\n parser.add_argument(\n \"--fp16\",\n action=\"store_true\",\n help=\"Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit\",\n )\n parser.add_argument(\n \"--fp16_opt_level\",\n type=str,\n default=\"O1\",\n help=\"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3'].\"\n \"See details at https://nvidia.github.io/apex/amp.html\",\n )\n parser.add_argument(\"--local_rank\", type=int, default=-1, help=\"For distributed training: local_rank\")\n parser.add_argument(\"--server_ip\", type=str, default=\"\", help=\"For distant debugging.\")\n parser.add_argument(\"--server_port\", type=str, default=\"\", help=\"For distant debugging.\")\n args = parser.parse_args()\n\n if (\n os.path.exists(args.output_dir)\n and os.listdir(args.output_dir)\n and args.do_train\n and not args.overwrite_output_dir\n ):\n raise ValueError(\n \"Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.\".format(\n args.output_dir\n )\n )\n\n # Setup distant debugging if needed\n if args.server_ip and args.server_port:\n # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script\n import ptvsd\n\n print(\"Waiting for debugger attach\")\n ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)\n ptvsd.wait_for_attach()\n\n # Setup CUDA, GPU & distributed training\n if args.local_rank == -1 or args.no_cuda:\n device = torch.device(\"cuda\" if torch.cuda.is_available() and not args.no_cuda else \"cpu\")\n args.n_gpu = 0 if args.no_cuda else torch.cuda.device_count()\n else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs\n torch.cuda.set_device(args.local_rank)\n device = torch.device(\"cuda\", args.local_rank)\n torch.distributed.init_process_group(backend=\"nccl\")\n args.n_gpu = 1\n\n args.device = device\n\n # Setup logging\n logging.basicConfig(\n format=\"%(asctime)s - %(levelname)s - %(name)s - %(message)s\",\n datefmt=\"%m/%d/%Y %H:%M:%S\",\n level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN,\n )\n logger.warning(\n \"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s\",\n args.local_rank,\n device,\n args.n_gpu,\n bool(args.local_rank != -1),\n args.fp16,\n )\n\n # Set seed\n set_seed(args)\n\n # Load pretrained model and tokenizer\n if args.local_rank not in [-1, 0]:\n torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab\n\n # Setup model\n labels = get_mmimdb_labels()\n num_labels = len(labels)\n args.model_type = args.model_type.lower()\n config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]\n transformer_config = config_class.from_pretrained(\n args.config_name if args.config_name else args.model_name_or_path\n )\n tokenizer = tokenizer_class.from_pretrained(\n args.tokenizer_name if args.tokenizer_name else args.model_name_or_path,\n do_lower_case=args.do_lower_case,\n cache_dir=args.cache_dir if args.cache_dir else None,\n )\n transformer = model_class.from_pretrained(\n args.model_name_or_path, config=transformer_config, cache_dir=args.cache_dir if args.cache_dir else None\n )\n img_encoder = ImageEncoder(args)\n config = MMBTConfig(transformer_config, num_labels=num_labels)\n model = MMBTForClassification(config, transformer, img_encoder)\n\n if args.local_rank == 0:\n torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab\n\n model.to(args.device)\n\n logger.info(\"Training/evaluation parameters %s\", args)\n\n # Training\n if args.do_train:\n train_dataset = load_examples(args, tokenizer, evaluate=False)\n label_frequences = train_dataset.get_label_frequencies()\n label_frequences = [label_frequences[l] for l in labels]\n label_weights = (\n torch.tensor(label_frequences, device=args.device, dtype=torch.float) / len(train_dataset)\n ) ** -1\n criterion = nn.BCEWithLogitsLoss(pos_weight=label_weights)\n global_step, tr_loss = train(args, train_dataset, model, tokenizer, criterion)\n logger.info(\" global_step = %s, average loss = %s\", global_step, tr_loss)\n\n # Saving best-practices: if you use defaults names for the model, you can reload it using from_pretrained()\n if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0):\n # Create output directory if needed\n if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:\n os.makedirs(args.output_dir)\n\n logger.info(\"Saving model checkpoint to %s\", args.output_dir)\n # Save a trained model, configuration and tokenizer using `save_pretrained()`.\n # They can then be reloaded using `from_pretrained()`\n model_to_save = (\n model.module if hasattr(model, \"module\") else model\n ) # Take care of distributed/parallel training\n torch.save(model_to_save.state_dict(), os.path.join(args.output_dir, WEIGHTS_NAME))\n tokenizer.save_pretrained(args.output_dir)\n\n # Good practice: save your training arguments together with the trained model\n torch.save(args, os.path.join(args.output_dir, \"training_args.bin\"))\n\n # Load a trained model and vocabulary that you have fine-tuned\n model = MMBTForClassification(config, transformer, img_encoder)\n model.load_state_dict(torch.load(os.path.join(args.output_dir, WEIGHTS_NAME)))\n tokenizer = tokenizer_class.from_pretrained(args.output_dir)\n model.to(args.device)\n\n # Evaluation\n results = {}\n if args.do_eval and args.local_rank in [-1, 0]:\n tokenizer = tokenizer_class.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case)\n checkpoints = [args.output_dir]\n if args.eval_all_checkpoints:\n checkpoints = list(\n os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + \"/**/\" + WEIGHTS_NAME, recursive=True))\n )\n logging.getLogger(\"transformers.modeling_utils\").setLevel(logging.WARN) # Reduce logging\n logger.info(\"Evaluate the following checkpoints: %s\", checkpoints)\n for checkpoint in checkpoints:\n global_step = checkpoint.split(\"-\")[-1] if len(checkpoints) > 1 else \"\"\n prefix = checkpoint.split(\"/\")[-1] if checkpoint.find(\"checkpoint\") != -1 else \"\"\n model = MMBTForClassification(config, transformer, img_encoder)\n model.load_state_dict(torch.load(checkpoint))\n model.to(args.device)\n result = evaluate(args, model, tokenizer, criterion, prefix=prefix)\n result = dict((k + \"_{}\".format(global_step), v) for k, v in result.items())\n results.update(result)\n\n return results\n\n\nif __name__ == \"__main__\":\n main()\n", "# coding=utf-8\n# Copyright 2019-present, Facebook, Inc and the HuggingFace Inc. team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\" PyTorch XLM model.\n\"\"\"\n\n\nimport itertools\nimport logging\nimport math\n\nimport numpy as np\nimport torch\nfrom torch import nn\nfrom torch.nn import CrossEntropyLoss, MSELoss\nfrom torch.nn import functional as F\n\nfrom .activations import gelu\nfrom .configuration_xlm import XLMConfig\nfrom .file_utils import add_start_docstrings, add_start_docstrings_to_callable\nfrom .modeling_utils import PreTrainedModel, SequenceSummary, SQuADHead, prune_linear_layer\n\n\nlogger = logging.getLogger(__name__)\n\nXLM_PRETRAINED_MODEL_ARCHIVE_MAP = {\n \"xlm-mlm-en-2048\": \"https://cdn.huggingface.co/xlm-mlm-en-2048-pytorch_model.bin\",\n \"xlm-mlm-ende-1024\": \"https://cdn.huggingface.co/xlm-mlm-ende-1024-pytorch_model.bin\",\n \"xlm-mlm-enfr-1024\": \"https://cdn.huggingface.co/xlm-mlm-enfr-1024-pytorch_model.bin\",\n \"xlm-mlm-enro-1024\": \"https://cdn.huggingface.co/xlm-mlm-enro-1024-pytorch_model.bin\",\n \"xlm-mlm-tlm-xnli15-1024\": \"https://cdn.huggingface.co/xlm-mlm-tlm-xnli15-1024-pytorch_model.bin\",\n \"xlm-mlm-xnli15-1024\": \"https://cdn.huggingface.co/xlm-mlm-xnli15-1024-pytorch_model.bin\",\n \"xlm-clm-enfr-1024\": \"https://cdn.huggingface.co/xlm-clm-enfr-1024-pytorch_model.bin\",\n \"xlm-clm-ende-1024\": \"https://cdn.huggingface.co/xlm-clm-ende-1024-pytorch_model.bin\",\n \"xlm-mlm-17-1280\": \"https://cdn.huggingface.co/xlm-mlm-17-1280-pytorch_model.bin\",\n \"xlm-mlm-100-1280\": \"https://cdn.huggingface.co/xlm-mlm-100-1280-pytorch_model.bin\",\n}\n\n\ndef create_sinusoidal_embeddings(n_pos, dim, out):\n position_enc = np.array([[pos / np.power(10000, 2 * (j // 2) / dim) for j in range(dim)] for pos in range(n_pos)])\n out[:, 0::2] = torch.FloatTensor(np.sin(position_enc[:, 0::2]))\n out[:, 1::2] = torch.FloatTensor(np.cos(position_enc[:, 1::2]))\n out.detach_()\n out.requires_grad = False\n\n\ndef get_masks(slen, lengths, causal, padding_mask=None):\n \"\"\"\n Generate hidden states mask, and optionally an attention mask.\n \"\"\"\n alen = torch.arange(slen, dtype=torch.long, device=lengths.device)\n if padding_mask is not None:\n mask = padding_mask\n else:\n assert lengths.max().item() <= slen\n mask = alen < lengths[:, None]\n\n # attention mask is the same as mask, or triangular inferior attention (causal)\n bs = lengths.size(0)\n if causal:\n attn_mask = alen[None, None, :].repeat(bs, slen, 1) <= alen[None, :, None]\n else:\n attn_mask = mask\n\n # sanity check\n assert mask.size() == (bs, slen)\n assert causal is False or attn_mask.size() == (bs, slen, slen)\n\n return mask, attn_mask\n\n\nclass MultiHeadAttention(nn.Module):\n\n NEW_ID = itertools.count()\n\n def __init__(self, n_heads, dim, config):\n super().__init__()\n self.layer_id = next(MultiHeadAttention.NEW_ID)\n self.output_attentions = config.output_attentions\n self.dim = dim\n self.n_heads = n_heads\n self.dropout = config.attention_dropout\n assert self.dim % self.n_heads == 0\n\n self.q_lin = nn.Linear(dim, dim)\n self.k_lin = nn.Linear(dim, dim)\n self.v_lin = nn.Linear(dim, dim)\n self.out_lin = nn.Linear(dim, dim)\n self.pruned_heads = set()\n\n def prune_heads(self, heads):\n attention_head_size = self.dim // self.n_heads\n if len(heads) == 0:\n return\n mask = torch.ones(self.n_heads, attention_head_size)\n heads = set(heads) - self.pruned_heads\n for head in heads:\n head -= sum(1 if h < head else 0 for h in self.pruned_heads)\n mask[head] = 0\n mask = mask.view(-1).contiguous().eq(1)\n index = torch.arange(len(mask))[mask].long()\n # Prune linear layers\n self.q_lin = prune_linear_layer(self.q_lin, index)\n self.k_lin = prune_linear_layer(self.k_lin, index)\n self.v_lin = prune_linear_layer(self.v_lin, index)\n self.out_lin = prune_linear_layer(self.out_lin, index, dim=1)\n # Update hyper params\n self.n_heads = self.n_heads - len(heads)\n self.dim = attention_head_size * self.n_heads\n self.pruned_heads = self.pruned_heads.union(heads)\n\n def forward(self, input, mask, kv=None, cache=None, head_mask=None):\n \"\"\"\n Self-attention (if kv is None) or attention over source sentence (provided by kv).\n \"\"\"\n # Input is (bs, qlen, dim)\n # Mask is (bs, klen) (non-causal) or (bs, klen, klen)\n bs, qlen, dim = input.size()\n if kv is None:\n klen = qlen if cache is None else cache[\"slen\"] + qlen\n else:\n klen = kv.size(1)\n # assert dim == self.dim, 'Dimensions do not match: %s input vs %s configured' % (dim, self.dim)\n n_heads = self.n_heads\n dim_per_head = self.dim // n_heads\n mask_reshape = (bs, 1, qlen, klen) if mask.dim() == 3 else (bs, 1, 1, klen)\n\n def shape(x):\n \"\"\" projection \"\"\"\n return x.view(bs, -1, self.n_heads, dim_per_head).transpose(1, 2)\n\n def unshape(x):\n \"\"\" compute context \"\"\"\n return x.transpose(1, 2).contiguous().view(bs, -1, self.n_heads * dim_per_head)\n\n q = shape(self.q_lin(input)) # (bs, n_heads, qlen, dim_per_head)\n if kv is None:\n k = shape(self.k_lin(input)) # (bs, n_heads, qlen, dim_per_head)\n v = shape(self.v_lin(input)) # (bs, n_heads, qlen, dim_per_head)\n elif cache is None or self.layer_id not in cache:\n k = v = kv\n k = shape(self.k_lin(k)) # (bs, n_heads, qlen, dim_per_head)\n v = shape(self.v_lin(v)) # (bs, n_heads, qlen, dim_per_head)\n\n if cache is not None:\n if self.layer_id in cache:\n if kv is None:\n k_, v_ = cache[self.layer_id]\n k = torch.cat([k_, k], dim=2) # (bs, n_heads, klen, dim_per_head)\n v = torch.cat([v_, v], dim=2) # (bs, n_heads, klen, dim_per_head)\n else:\n k, v = cache[self.layer_id]\n cache[self.layer_id] = (k, v)\n\n q = q / math.sqrt(dim_per_head) # (bs, n_heads, qlen, dim_per_head)\n scores = torch.matmul(q, k.transpose(2, 3)) # (bs, n_heads, qlen, klen)\n mask = (mask == 0).view(mask_reshape).expand_as(scores) # (bs, n_heads, qlen, klen)\n scores.masked_fill_(mask, -float(\"inf\")) # (bs, n_heads, qlen, klen)\n\n weights = F.softmax(scores.float(), dim=-1).type_as(scores) # (bs, n_heads, qlen, klen)\n weights = F.dropout(weights, p=self.dropout, training=self.training) # (bs, n_heads, qlen, klen)\n\n # Mask heads if we want to\n if head_mask is not None:\n weights = weights * head_mask\n\n context = torch.matmul(weights, v) # (bs, n_heads, qlen, dim_per_head)\n context = unshape(context) # (bs, qlen, dim)\n\n outputs = (self.out_lin(context),)\n if self.output_attentions:\n outputs = outputs + (weights,)\n return outputs\n\n\nclass TransformerFFN(nn.Module):\n def __init__(self, in_dim, dim_hidden, out_dim, config):\n super().__init__()\n self.dropout = config.dropout\n self.lin1 = nn.Linear(in_dim, dim_hidden)\n self.lin2 = nn.Linear(dim_hidden, out_dim)\n self.act = gelu if config.gelu_activation else F.relu\n\n def forward(self, input):\n x = self.lin1(input)\n x = self.act(x)\n x = self.lin2(x)\n x = F.dropout(x, p=self.dropout, training=self.training)\n return x\n\n\nclass XLMPreTrainedModel(PreTrainedModel):\n \"\"\" An abstract class to handle weights initialization and\n a simple interface for downloading and loading pretrained models.\n \"\"\"\n\n config_class = XLMConfig\n pretrained_model_archive_map = XLM_PRETRAINED_MODEL_ARCHIVE_MAP\n load_tf_weights = None\n base_model_prefix = \"transformer\"\n\n def __init__(self, *inputs, **kwargs):\n super().__init__(*inputs, **kwargs)\n\n @property\n def dummy_inputs(self):\n inputs_list = torch.tensor([[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]])\n attns_list = torch.tensor([[1, 1, 0, 0, 1], [1, 1, 1, 0, 0], [1, 0, 0, 1, 1]])\n if self.config.use_lang_emb and self.config.n_langs > 1:\n langs_list = torch.tensor([[1, 1, 0, 0, 1], [1, 1, 1, 0, 0], [1, 0, 0, 1, 1]])\n else:\n langs_list = None\n return {\"input_ids\": inputs_list, \"attention_mask\": attns_list, \"langs\": langs_list}\n\n def _init_weights(self, module):\n \"\"\" Initialize the weights. \"\"\"\n if isinstance(module, nn.Embedding):\n if self.config is not None and self.config.embed_init_std is not None:\n nn.init.normal_(module.weight, mean=0, std=self.config.embed_init_std)\n if isinstance(module, nn.Linear):\n if self.config is not None and self.config.init_std is not None:\n nn.init.normal_(module.weight, mean=0, std=self.config.init_std)\n if hasattr(module, \"bias\") and module.bias is not None:\n nn.init.constant_(module.bias, 0.0)\n if isinstance(module, nn.LayerNorm):\n module.bias.data.zero_()\n module.weight.data.fill_(1.0)\n\n\nXLM_START_DOCSTRING = r\"\"\"\n\n This model is a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`_ sub-class.\n Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general\n usage and behavior.\n\n Parameters:\n config (:class:`~transformers.XLMConfig`): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the configuration.\n Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights.\n\"\"\"\n\nXLM_INPUTS_DOCSTRING = r\"\"\"\n Args:\n input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using :class:`transformers.BertTokenizer`.\n See :func:`transformers.PreTrainedTokenizer.encode` and\n :func:`transformers.PreTrainedTokenizer.encode_plus` for details.\n\n `What are input IDs? <../glossary.html#input-ids>`__\n attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):\n Mask to avoid performing attention on padding token indices.\n Mask values selected in ``[0, 1]``:\n ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.\n\n `What are attention masks? <../glossary.html#attention-mask>`__\n langs (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):\n A parallel sequence of tokens to be used to indicate the language of each token in the input.\n Indices are languages ids which can be obtained from the language names by using two conversion mappings\n provided in the configuration of the model (only provided for multilingual models).\n More precisely, the `language name -> language id` mapping is in `model.config.lang2id` (dict str -> int) and\n the `language id -> language name` mapping is `model.config.id2lang` (dict int -> str).\n\n See usage examples detailed in the `multilingual documentation <https://huggingface.co/transformers/multilingual.html>`__.\n token_type_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):\n Segment token indices to indicate first and second portions of the inputs.\n Indices are selected in ``[0, 1]``: ``0`` corresponds to a `sentence A` token, ``1``\n corresponds to a `sentence B` token\n\n `What are token type IDs? <../glossary.html#token-type-ids>`_\n position_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):\n Indices of positions of each input sequence tokens in the position embeddings.\n Selected in the range ``[0, config.max_position_embeddings - 1]``.\n\n `What are position IDs? <../glossary.html#position-ids>`_\n lengths (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):\n Length of each sentence that can be used to avoid performing attention on padding token indices.\n You can also use `attention_mask` for the same result (see above), kept here for compatbility.\n Indices selected in ``[0, ..., input_ids.size(-1)]``:\n cache (:obj:`Dict[str, torch.FloatTensor]`, `optional`, defaults to :obj:`None`):\n dictionary with ``torch.FloatTensor`` that contains pre-computed\n hidden-states (key and values in the attention blocks) as computed by the model\n (see `cache` output below). Can be used to speed up sequential decoding.\n The dictionary object will be modified in-place during the forward pass to add newly computed hidden-states.\n head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`, defaults to :obj:`None`):\n Mask to nullify selected heads of the self-attention modules.\n Mask values selected in ``[0, 1]``:\n :obj:`1` indicates the head is **not masked**, :obj:`0` indicates the head is **masked**.\n input_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`, defaults to :obj:`None`):\n Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.\n This is useful if you want more control over how to convert `input_ids` indices into associated vectors\n than the model's internal embedding lookup matrix.\n\"\"\"\n\n\n@add_start_docstrings(\n \"The bare XLM Model transformer outputting raw hidden-states without any specific head on top.\",\n XLM_START_DOCSTRING,\n)\nclass XLMModel(XLMPreTrainedModel):\n def __init__(self, config): # , dico, is_encoder, with_output):\n super().__init__(config)\n self.output_attentions = config.output_attentions\n self.output_hidden_states = config.output_hidden_states\n\n # encoder / decoder, output layer\n self.is_encoder = config.is_encoder\n self.is_decoder = not config.is_encoder\n if self.is_decoder:\n raise NotImplementedError(\"Currently XLM can only be used as an encoder\")\n # self.with_output = with_output\n self.causal = config.causal\n\n # dictionary / languages\n self.n_langs = config.n_langs\n self.use_lang_emb = config.use_lang_emb\n self.n_words = config.n_words\n self.eos_index = config.eos_index\n self.pad_index = config.pad_index\n # self.dico = dico\n # self.id2lang = config.id2lang\n # self.lang2id = config.lang2id\n # assert len(self.dico) == self.n_words\n # assert len(self.id2lang) == len(self.lang2id) == self.n_langs\n\n # model parameters\n self.dim = config.emb_dim # 512 by default\n self.hidden_dim = self.dim * 4 # 2048 by default\n self.n_heads = config.n_heads # 8 by default\n self.n_layers = config.n_layers\n self.dropout = config.dropout\n self.attention_dropout = config.attention_dropout\n assert self.dim % self.n_heads == 0, \"transformer dim must be a multiple of n_heads\"\n\n # embeddings\n self.position_embeddings = nn.Embedding(config.max_position_embeddings, self.dim)\n if config.sinusoidal_embeddings:\n create_sinusoidal_embeddings(config.max_position_embeddings, self.dim, out=self.position_embeddings.weight)\n if config.n_langs > 1 and config.use_lang_emb:\n self.lang_embeddings = nn.Embedding(self.n_langs, self.dim)\n self.embeddings = nn.Embedding(self.n_words, self.dim, padding_idx=self.pad_index)\n self.layer_norm_emb = nn.LayerNorm(self.dim, eps=config.layer_norm_eps)\n\n # transformer layers\n self.attentions = nn.ModuleList()\n self.layer_norm1 = nn.ModuleList()\n self.ffns = nn.ModuleList()\n self.layer_norm2 = nn.ModuleList()\n # if self.is_decoder:\n # self.layer_norm15 = nn.ModuleList()\n # self.encoder_attn = nn.ModuleList()\n\n for _ in range(self.n_layers):\n self.attentions.append(MultiHeadAttention(self.n_heads, self.dim, config=config))\n self.layer_norm1.append(nn.LayerNorm(self.dim, eps=config.layer_norm_eps))\n # if self.is_decoder:\n # self.layer_norm15.append(nn.LayerNorm(self.dim, eps=config.layer_norm_eps))\n # self.encoder_attn.append(MultiHeadAttention(self.n_heads, self.dim, dropout=self.attention_dropout))\n self.ffns.append(TransformerFFN(self.dim, self.hidden_dim, self.dim, config=config))\n self.layer_norm2.append(nn.LayerNorm(self.dim, eps=config.layer_norm_eps))\n\n if hasattr(config, \"pruned_heads\"):\n pruned_heads = config.pruned_heads.copy().items()\n config.pruned_heads = {}\n for layer, heads in pruned_heads:\n if self.attentions[int(layer)].n_heads == config.n_heads:\n self.prune_heads({int(layer): list(map(int, heads))})\n\n self.init_weights()\n\n def get_input_embeddings(self):\n return self.embeddings\n\n def set_input_embeddings(self, new_embeddings):\n self.embeddings = new_embeddings\n\n def _prune_heads(self, heads_to_prune):\n \"\"\" Prunes heads of the model.\n heads_to_prune: dict of {layer_num: list of heads to prune in this layer}\n See base class PreTrainedModel\n \"\"\"\n for layer, heads in heads_to_prune.items():\n self.attentions[layer].prune_heads(heads)\n\n @add_start_docstrings_to_callable(XLM_INPUTS_DOCSTRING)\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n langs=None,\n token_type_ids=None,\n position_ids=None,\n lengths=None,\n cache=None,\n head_mask=None,\n inputs_embeds=None,\n ):\n r\"\"\"\n Return:\n :obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.XLMConfig`) and inputs:\n last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`):\n Sequence of hidden-states at the output of the last layer of the model.\n hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):\n Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)\n of shape :obj:`(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):\n Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape\n :obj:`(batch_size, num_heads, sequence_length, sequence_length)`.\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n heads.\n\n Examples::\n\n from transformers import XLMTokenizer, XLMModel\n import torch\n\n tokenizer = XLMTokenizer.from_pretrained('xlm-mlm-en-2048')\n model = XLMModel.from_pretrained('xlm-mlm-en-2048')\n input_ids = torch.tensor(tokenizer.encode(\"Hello, my dog is cute\", add_special_tokens=True)).unsqueeze(0) # Batch size 1\n outputs = model(input_ids)\n last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple\n\n \"\"\"\n if input_ids is not None:\n bs, slen = input_ids.size()\n else:\n bs, slen = inputs_embeds.size()[:-1]\n\n if lengths is None:\n if input_ids is not None:\n lengths = (input_ids != self.pad_index).sum(dim=1).long()\n else:\n lengths = torch.LongTensor([slen] * bs)\n # mask = input_ids != self.pad_index\n\n # check inputs\n assert lengths.size(0) == bs\n assert lengths.max().item() <= slen\n # input_ids = input_ids.transpose(0, 1) # batch size as dimension 0\n # assert (src_enc is None) == (src_len is None)\n # if src_enc is not None:\n # assert self.is_decoder\n # assert src_enc.size(0) == bs\n\n # generate masks\n mask, attn_mask = get_masks(slen, lengths, self.causal, padding_mask=attention_mask)\n # if self.is_decoder and src_enc is not None:\n # src_mask = torch.arange(src_len.max(), dtype=torch.long, device=lengths.device) < src_len[:, None]\n\n device = input_ids.device if input_ids is not None else inputs_embeds.device\n\n # position_ids\n if position_ids is None:\n position_ids = torch.arange(slen, dtype=torch.long, device=device)\n position_ids = position_ids.unsqueeze(0).expand((bs, slen))\n else:\n assert position_ids.size() == (bs, slen) # (slen, bs)\n # position_ids = position_ids.transpose(0, 1)\n\n # langs\n if langs is not None:\n assert langs.size() == (bs, slen) # (slen, bs)\n # langs = langs.transpose(0, 1)\n\n # Prepare head mask if needed\n head_mask = self.get_head_mask(head_mask, self.config.n_layers)\n\n # do not recompute cached elements\n if cache is not None and input_ids is not None:\n _slen = slen - cache[\"slen\"]\n input_ids = input_ids[:, -_slen:]\n position_ids = position_ids[:, -_slen:]\n if langs is not None:\n langs = langs[:, -_slen:]\n mask = mask[:, -_slen:]\n attn_mask = attn_mask[:, -_slen:]\n\n # embeddings\n if inputs_embeds is None:\n inputs_embeds = self.embeddings(input_ids)\n\n tensor = inputs_embeds + self.position_embeddings(position_ids).expand_as(inputs_embeds)\n if langs is not None and self.use_lang_emb and self.n_langs > 1:\n tensor = tensor + self.lang_embeddings(langs)\n if token_type_ids is not None:\n tensor = tensor + self.embeddings(token_type_ids)\n tensor = self.layer_norm_emb(tensor)\n tensor = F.dropout(tensor, p=self.dropout, training=self.training)\n tensor *= mask.unsqueeze(-1).to(tensor.dtype)\n\n # transformer layers\n hidden_states = ()\n attentions = ()\n for i in range(self.n_layers):\n if self.output_hidden_states:\n hidden_states = hidden_states + (tensor,)\n\n # self attention\n attn_outputs = self.attentions[i](tensor, attn_mask, cache=cache, head_mask=head_mask[i])\n attn = attn_outputs[0]\n if self.output_attentions:\n attentions = attentions + (attn_outputs[1],)\n attn = F.dropout(attn, p=self.dropout, training=self.training)\n tensor = tensor + attn\n tensor = self.layer_norm1[i](tensor)\n\n # encoder attention (for decoder only)\n # if self.is_decoder and src_enc is not None:\n # attn = self.encoder_attn[i](tensor, src_mask, kv=src_enc, cache=cache)\n # attn = F.dropout(attn, p=self.dropout, training=self.training)\n # tensor = tensor + attn\n # tensor = self.layer_norm15[i](tensor)\n\n # FFN\n tensor = tensor + self.ffns[i](tensor)\n tensor = self.layer_norm2[i](tensor)\n tensor *= mask.unsqueeze(-1).to(tensor.dtype)\n\n # Add last hidden state\n if self.output_hidden_states:\n hidden_states = hidden_states + (tensor,)\n\n # update cache length\n if cache is not None:\n cache[\"slen\"] += tensor.size(1)\n\n # move back sequence length to dimension 0\n # tensor = tensor.transpose(0, 1)\n\n outputs = (tensor,)\n if self.output_hidden_states:\n outputs = outputs + (hidden_states,)\n if self.output_attentions:\n outputs = outputs + (attentions,)\n return outputs # outputs, (hidden_states), (attentions)\n\n\nclass XLMPredLayer(nn.Module):\n \"\"\"\n Prediction layer (cross_entropy or adaptive_softmax).\n \"\"\"\n\n def __init__(self, config):\n super().__init__()\n self.asm = config.asm\n self.n_words = config.n_words\n self.pad_index = config.pad_index\n dim = config.emb_dim\n\n if config.asm is False:\n self.proj = nn.Linear(dim, config.n_words, bias=True)\n else:\n self.proj = nn.AdaptiveLogSoftmaxWithLoss(\n in_features=dim,\n n_classes=config.n_words,\n cutoffs=config.asm_cutoffs,\n div_value=config.asm_div_value,\n head_bias=True, # default is False\n )\n\n def forward(self, x, y=None):\n \"\"\" Compute the loss, and optionally the scores.\n \"\"\"\n outputs = ()\n if self.asm is False:\n scores = self.proj(x)\n outputs = (scores,) + outputs\n if y is not None:\n loss = F.cross_entropy(scores.view(-1, self.n_words), y.view(-1), reduction=\"elementwise_mean\")\n outputs = (loss,) + outputs\n else:\n scores = self.proj.log_prob(x)\n outputs = (scores,) + outputs\n if y is not None:\n _, loss = self.proj(x, y)\n outputs = (loss,) + outputs\n\n return outputs\n\n\n@add_start_docstrings(\n \"\"\"The XLM Model transformer with a language modeling head on top\n (linear layer with weights tied to the input embeddings). \"\"\",\n XLM_START_DOCSTRING,\n)\nclass XLMWithLMHeadModel(XLMPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n self.transformer = XLMModel(config)\n self.pred_layer = XLMPredLayer(config)\n\n self.init_weights()\n\n def get_output_embeddings(self):\n return self.pred_layer.proj\n\n def prepare_inputs_for_generation(self, input_ids, **kwargs):\n mask_token_id = self.config.mask_token_id\n lang_id = self.config.lang_id\n\n effective_batch_size = input_ids.shape[0]\n mask_token = torch.full((effective_batch_size, 1), mask_token_id, dtype=torch.long, device=input_ids.device)\n input_ids = torch.cat([input_ids, mask_token], dim=1)\n if lang_id is not None:\n langs = torch.full_like(input_ids, lang_id)\n else:\n langs = None\n return {\"input_ids\": input_ids, \"langs\": langs}\n\n @add_start_docstrings_to_callable(XLM_INPUTS_DOCSTRING)\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n langs=None,\n token_type_ids=None,\n position_ids=None,\n lengths=None,\n cache=None,\n head_mask=None,\n inputs_embeds=None,\n labels=None,\n ):\n r\"\"\"\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):\n Labels for language modeling.\n Note that the labels **are shifted** inside the model, i.e. you can set ``lm_labels = input_ids``\n Indices are selected in ``[-100, 0, ..., config.vocab_size]``\n All labels set to ``-100`` are ignored (masked), the loss is only\n computed for labels in ``[0, ..., config.vocab_size]``\n\n Return:\n :obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.XLMConfig`) and inputs:\n loss (:obj:`torch.FloatTensor` of shape `(1,)`, `optional`, returned when ``labels`` is provided)\n Language modeling loss.\n prediction_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`):\n Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).\n hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):\n Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)\n of shape :obj:`(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):\n Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape\n :obj:`(batch_size, num_heads, sequence_length, sequence_length)`.\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n heads.\n\n Examples::\n\n from transformers import XLMTokenizer, XLMWithLMHeadModel\n import torch\n\n tokenizer = XLMTokenizer.from_pretrained('xlm-mlm-en-2048')\n model = XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048')\n input_ids = torch.tensor(tokenizer.encode(\"Hello, my dog is cute\", add_special_tokens=True)).unsqueeze(0) # Batch size 1\n outputs = model(input_ids)\n last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple\n\n \"\"\"\n transformer_outputs = self.transformer(\n input_ids,\n attention_mask=attention_mask,\n langs=langs,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n lengths=lengths,\n cache=cache,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n )\n\n output = transformer_outputs[0]\n outputs = self.pred_layer(output, labels)\n outputs = outputs + transformer_outputs[1:] # Keep new_mems and attention/hidden states if they are here\n\n return outputs\n\n\n@add_start_docstrings(\n \"\"\"XLM Model with a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. \"\"\",\n XLM_START_DOCSTRING,\n)\nclass XLMForSequenceClassification(XLMPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n self.num_labels = config.num_labels\n\n self.transformer = XLMModel(config)\n self.sequence_summary = SequenceSummary(config)\n\n self.init_weights()\n\n @add_start_docstrings_to_callable(XLM_INPUTS_DOCSTRING)\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n langs=None,\n token_type_ids=None,\n position_ids=None,\n lengths=None,\n cache=None,\n head_mask=None,\n inputs_embeds=None,\n labels=None,\n ):\n r\"\"\"\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):\n Labels for computing the sequence classification/regression loss.\n Indices should be in :obj:`[0, ..., config.num_labels - 1]`.\n If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),\n If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).\n\n Returns:\n :obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.XLMConfig`) and inputs:\n loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`label` is provided):\n Classification (or regression if config.num_labels==1) loss.\n logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, config.num_labels)`):\n Classification (or regression if config.num_labels==1) scores (before SoftMax).\n hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):\n Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)\n of shape :obj:`(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):\n Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape\n :obj:`(batch_size, num_heads, sequence_length, sequence_length)`.\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n heads.\n\n Examples::\n\n from transformers import XLMTokenizer, XLMForSequenceClassification\n import torch\n\n tokenizer = XLMTokenizer.from_pretrained('xlm-mlm-en-2048')\n model = XLMForSequenceClassification.from_pretrained('xlm-mlm-en-2048')\n input_ids = torch.tensor(tokenizer.encode(\"Hello, my dog is cute\", add_special_tokens=True)).unsqueeze(0) # Batch size 1\n labels = torch.tensor([1]).unsqueeze(0) # Batch size 1\n outputs = model(input_ids, labels=labels)\n loss, logits = outputs[:2]\n\n \"\"\"\n transformer_outputs = self.transformer(\n input_ids,\n attention_mask=attention_mask,\n langs=langs,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n lengths=lengths,\n cache=cache,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n )\n\n output = transformer_outputs[0]\n logits = self.sequence_summary(output)\n\n outputs = (logits,) + transformer_outputs[1:] # Keep new_mems and attention/hidden states if they are here\n\n if labels is not None:\n if self.num_labels == 1:\n # We are doing regression\n loss_fct = MSELoss()\n loss = loss_fct(logits.view(-1), labels.view(-1))\n else:\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))\n outputs = (loss,) + outputs\n\n return outputs\n\n\n@add_start_docstrings(\n \"\"\"XLM Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of\n the hidden-states output to compute `span start logits` and `span end logits`). \"\"\",\n XLM_START_DOCSTRING,\n)\nclass XLMForQuestionAnsweringSimple(XLMPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n\n self.transformer = XLMModel(config)\n self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)\n\n self.init_weights()\n\n @add_start_docstrings_to_callable(XLM_INPUTS_DOCSTRING)\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n langs=None,\n token_type_ids=None,\n position_ids=None,\n lengths=None,\n cache=None,\n head_mask=None,\n inputs_embeds=None,\n start_positions=None,\n end_positions=None,\n ):\n r\"\"\"\n start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):\n Labels for position (index) of the start of the labelled span for computing the token classification loss.\n Positions are clamped to the length of the sequence (`sequence_length`).\n Position outside of the sequence are not taken into account for computing the loss.\n end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):\n Labels for position (index) of the end of the labelled span for computing the token classification loss.\n Positions are clamped to the length of the sequence (`sequence_length`).\n Position outside of the sequence are not taken into account for computing the loss.\n\n Returns:\n :obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.XLMConfig`) and inputs:\n loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`labels` is provided):\n Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.\n start_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length,)`):\n Span-start scores (before SoftMax).\n end_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length,)`):\n Span-end scores (before SoftMax).\n hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):\n Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)\n of shape :obj:`(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):\n Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape\n :obj:`(batch_size, num_heads, sequence_length, sequence_length)`.\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n heads.\n\n Examples::\n\n from transformers import XLMTokenizer, XLMForQuestionAnsweringSimple\n import torch\n\n tokenizer = XLMTokenizer.from_pretrained('xlm-mlm-en-2048')\n model = XLMForQuestionAnsweringSimple.from_pretrained('xlm-mlm-en-2048')\n input_ids = torch.tensor(tokenizer.encode(\"Hello, my dog is cute\", add_special_tokens=True)).unsqueeze(0) # Batch size 1\n start_positions = torch.tensor([1])\n end_positions = torch.tensor([3])\n outputs = model(input_ids, start_positions=start_positions, end_positions=end_positions)\n loss = outputs[0]\n\n \"\"\"\n transformer_outputs = self.transformer(\n input_ids,\n attention_mask=attention_mask,\n langs=langs,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n lengths=lengths,\n cache=cache,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n )\n\n sequence_output = transformer_outputs[0]\n\n logits = self.qa_outputs(sequence_output)\n start_logits, end_logits = logits.split(1, dim=-1)\n start_logits = start_logits.squeeze(-1)\n end_logits = end_logits.squeeze(-1)\n\n outputs = (\n start_logits,\n end_logits,\n )\n if start_positions is not None and end_positions is not None:\n # If we are on multi-GPU, split add a dimension\n if len(start_positions.size()) > 1:\n start_positions = start_positions.squeeze(-1)\n if len(end_positions.size()) > 1:\n end_positions = end_positions.squeeze(-1)\n # sometimes the start/end positions are outside our model inputs, we ignore these terms\n ignored_index = start_logits.size(1)\n start_positions.clamp_(0, ignored_index)\n end_positions.clamp_(0, ignored_index)\n\n loss_fct = CrossEntropyLoss(ignore_index=ignored_index)\n start_loss = loss_fct(start_logits, start_positions)\n end_loss = loss_fct(end_logits, end_positions)\n total_loss = (start_loss + end_loss) / 2\n outputs = (total_loss,) + outputs\n\n outputs = outputs + transformer_outputs[1:] # Keep new_mems and attention/hidden states if they are here\n\n return outputs\n\n\n@add_start_docstrings(\n \"\"\"XLM Model with a beam-search span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of\n the hidden-states output to compute `span start logits` and `span end logits`). \"\"\",\n XLM_START_DOCSTRING,\n)\nclass XLMForQuestionAnswering(XLMPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n\n self.transformer = XLMModel(config)\n self.qa_outputs = SQuADHead(config)\n\n self.init_weights()\n\n @add_start_docstrings_to_callable(XLM_INPUTS_DOCSTRING)\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n langs=None,\n token_type_ids=None,\n position_ids=None,\n lengths=None,\n cache=None,\n head_mask=None,\n inputs_embeds=None,\n start_positions=None,\n end_positions=None,\n is_impossible=None,\n cls_index=None,\n p_mask=None,\n ):\n r\"\"\"\n start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):\n Labels for position (index) of the start of the labelled span for computing the token classification loss.\n Positions are clamped to the length of the sequence (`sequence_length`).\n Position outside of the sequence are not taken into account for computing the loss.\n end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):\n Labels for position (index) of the end of the labelled span for computing the token classification loss.\n Positions are clamped to the length of the sequence (`sequence_length`).\n Position outside of the sequence are not taken into account for computing the loss.\n is_impossible (``torch.LongTensor`` of shape ``(batch_size,)``, `optional`, defaults to :obj:`None`):\n Labels whether a question has an answer or no answer (SQuAD 2.0)\n cls_index (``torch.LongTensor`` of shape ``(batch_size,)``, `optional`, defaults to :obj:`None`):\n Labels for position (index) of the classification token to use as input for computing plausibility of the answer.\n p_mask (``torch.FloatTensor`` of shape ``(batch_size, sequence_length)``, `optional`, defaults to :obj:`None`):\n Optional mask of tokens which can't be in answers (e.g. [CLS], [PAD], ...).\n 1.0 means token should be masked. 0.0 mean token is not masked.\n\n Returns:\n :obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.XLMConfig`) and inputs:\n loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned if both :obj:`start_positions` and :obj:`end_positions` are provided):\n Classification loss as the sum of start token, end token (and is_impossible if provided) classification losses.\n start_top_log_probs (``torch.FloatTensor`` of shape ``(batch_size, config.start_n_top)``, `optional`, returned if ``start_positions`` or ``end_positions`` is not provided):\n Log probabilities for the top config.start_n_top start token possibilities (beam-search).\n start_top_index (``torch.LongTensor`` of shape ``(batch_size, config.start_n_top)``, `optional`, returned if ``start_positions`` or ``end_positions`` is not provided):\n Indices for the top config.start_n_top start token possibilities (beam-search).\n end_top_log_probs (``torch.FloatTensor`` of shape ``(batch_size, config.start_n_top * config.end_n_top)``, `optional`, returned if ``start_positions`` or ``end_positions`` is not provided):\n Log probabilities for the top ``config.start_n_top * config.end_n_top`` end token possibilities (beam-search).\n end_top_index (``torch.LongTensor`` of shape ``(batch_size, config.start_n_top * config.end_n_top)``, `optional`, returned if ``start_positions`` or ``end_positions`` is not provided):\n Indices for the top ``config.start_n_top * config.end_n_top`` end token possibilities (beam-search).\n cls_logits (``torch.FloatTensor`` of shape ``(batch_size,)``, `optional`, returned if ``start_positions`` or ``end_positions`` is not provided):\n Log probabilities for the ``is_impossible`` label of the answers.\n hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):\n Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)\n of shape :obj:`(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):\n Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape\n :obj:`(batch_size, num_heads, sequence_length, sequence_length)`.\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n heads.\n\n Examples::\n\n from transformers import XLMTokenizer, XLMForQuestionAnswering\n import torch\n\n tokenizer = XLMTokenizer.from_pretrained('xlm-mlm-en-2048')\n model = XLMForQuestionAnswering.from_pretrained('xlm-mlm-en-2048')\n input_ids = torch.tensor(tokenizer.encode(\"Hello, my dog is cute\", add_special_tokens=True)).unsqueeze(0) # Batch size 1\n start_positions = torch.tensor([1])\n end_positions = torch.tensor([3])\n outputs = model(input_ids, start_positions=start_positions, end_positions=end_positions)\n loss = outputs[0]\n\n \"\"\"\n transformer_outputs = self.transformer(\n input_ids,\n attention_mask=attention_mask,\n langs=langs,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n lengths=lengths,\n cache=cache,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n )\n\n output = transformer_outputs[0]\n\n outputs = self.qa_outputs(\n output,\n start_positions=start_positions,\n end_positions=end_positions,\n cls_index=cls_index,\n is_impossible=is_impossible,\n p_mask=p_mask,\n )\n\n outputs = outputs + transformer_outputs[1:] # Keep new_mems and attention/hidden states if they are here\n\n return outputs\n\n\n@add_start_docstrings(\n \"\"\"XLM Model with a token classification head on top (a linear layer on top of\n the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. \"\"\",\n XLM_START_DOCSTRING,\n)\nclass XLMForTokenClassification(XLMPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n self.num_labels = config.num_labels\n\n self.transformer = XLMModel(config)\n self.dropout = nn.Dropout(config.dropout)\n self.classifier = nn.Linear(config.hidden_size, config.num_labels)\n\n self.init_weights()\n\n @add_start_docstrings_to_callable(XLM_INPUTS_DOCSTRING)\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n langs=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n labels=None,\n ):\n r\"\"\"\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):\n Labels for computing the token classification loss.\n Indices should be in ``[0, ..., config.num_labels - 1]``.\n\n Returns:\n :obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.XLMConfig`) and inputs:\n loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when ``labels`` is provided) :\n Classification loss.\n scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.num_labels)`)\n Classification scores (before SoftMax).\n hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):\n Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)\n of shape :obj:`(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):\n Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape\n :obj:`(batch_size, num_heads, sequence_length, sequence_length)`.\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n heads.\n\n Examples::\n\n from transformers import XLMTokenizer, XLMForTokenClassification\n import torch\n\n tokenizer = XLMTokenizer.from_pretrained('xlm-mlm-100-1280')\n model = XLMForTokenClassification.from_pretrained('xlm-mlm-100-1280')\n input_ids = torch.tensor(tokenizer.encode(\"Hello, my dog is cute\")).unsqueeze(0) # Batch size 1\n labels = torch.tensor([1] * input_ids.size(1)).unsqueeze(0) # Batch size 1\n outputs = model(input_ids, labels=labels)\n loss, scores = outputs[:2]\n\n \"\"\"\n outputs = self.transformer(\n input_ids,\n attention_mask=attention_mask,\n langs=langs,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n )\n\n sequence_output = outputs[0]\n\n sequence_output = self.dropout(sequence_output)\n logits = self.classifier(sequence_output)\n\n outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here\n if labels is not None:\n loss_fct = CrossEntropyLoss()\n # Only keep active parts of the loss\n if attention_mask is not None:\n active_loss = attention_mask.view(-1) == 1\n active_logits = logits.view(-1, self.num_labels)\n active_labels = torch.where(\n active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels)\n )\n loss = loss_fct(active_logits, active_labels)\n else:\n loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))\n outputs = (loss,) + outputs\n\n return outputs # (loss), scores, (hidden_states), (attentions)\n" ]
[ [ "torch.load", "torch.utils.data.DataLoader", "torch.nn.BCEWithLogitsLoss", "torch.no_grad", "torch.cuda.manual_seed_all", "torch.cuda.is_available", "torch.device", "torch.distributed.get_rank", "sklearn.metrics.f1_score", "torch.distributed.init_process_group", "torch.utils.data.distributed.DistributedSampler", "torch.distributed.barrier", "torch.tensor", "torch.sigmoid", "torch.cuda.device_count", "torch.distributed.get_world_size", "torch.nn.parallel.DistributedDataParallel", "numpy.random.seed", "torch.cuda.set_device", "torch.manual_seed", "torch.utils.data.SequentialSampler", "torch.utils.data.RandomSampler", "torch.nn.DataParallel" ], [ "torch.nn.functional.dropout", "torch.cat", "torch.nn.Embedding", "torch.full_like", "torch.nn.Dropout", "torch.nn.CrossEntropyLoss", "torch.ones", "numpy.sin", "torch.tensor", "torch.arange", "torch.nn.AdaptiveLogSoftmaxWithLoss", "torch.LongTensor", "torch.full", "numpy.power", "torch.nn.init.constant_", "torch.nn.ModuleList", "torch.nn.Linear", "torch.nn.init.normal_", "numpy.cos", "torch.nn.LayerNorm", "torch.matmul", "torch.nn.MSELoss" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
shubhomb/clonal_evolution
[ "bbbd0a482293d41cadebe53028e0adeded591e6b", "bbbd0a482293d41cadebe53028e0adeded591e6b" ]
[ "simulation/subclone.py", "graph-model/main.py" ]
[ "import numpy as np\n\nclass Subclone:\n \"\"\"\n Initializes a Subclone Population.\n :attr label: Either A, B or S\n :attr fitness: Current fitness\n :attr prop: Current Proportion\n \"\"\"\n\n def __init__(self, lbl, c, alpha, prop=0.333, parent=None, birthtime=None, color=None):\n self.label = lbl\n self.fitness = 0.0\n self.prop = prop\n self.c = c\n self.parent = parent\n self.alpha = alpha\n self.bt = birthtime\n self.color = None\n\n def __str__(self):\n return self.label\n\n def update_fitness(self, treatment):\n \"\"\"\n Returns the fitness with the given environment for subclone [type]\n @ param treatment: 1d np.ndarray of shape (num_treatments) for intensity of treatment\n \"\"\"\n self.fitness = max(0, min(1, 1 - self.c - np.dot(self.alpha, treatment)))\n return self.fitness\n\n\n def log(self):\n print(\"Node: \", self.label)\n print(\"Birthtime: \", self.bt)\n print(f'\\t \\t Alpha: {self.alpha}')\n print(f'\\t \\t Prop: {self.prop}')\n print(f'\\t \\t Resistant: {self.c}')\n print(f'\\t \\t Fitness: {self.fitness}')\n", "\"\"\"\n This file contains functions necessary to simulate the entire evolution\n of a finite number of subclone colonies using a graph framework. \n\n First, we set up an environment containing the names of subclone colonies,\n the adjacency matrix describing the relationship, the alpha values denoting\n their immunity and their current state of relative proportion.\n\n Next, we pass this environment to the simulation engine with a few additional\n parameters and we can watch the network evolve. \n\n\"\"\"\nimport pandas as pd\nimport numpy as np\nimport json\nimport doctor\nfrom graph import *\n\ndef parse_json(path_to_file):\n \"\"\"Return subclone environment as json dictionary\"\"\"\n with open(path_to_file) as f:\n data = json.load(f)\n return data\n\nclass Environment():\n \"\"\"\n This is used as an input to contruct the graph.\n \"\"\"\n def __init__(self, names, matrix, alpha, prop):\n self.names = names # Names of Subclone Colony (List)\n self.relations = matrix # Adj Matrix representing relations\n self.alpha = alpha\n self.prop = prop\n\n def log(self):\n print('Logging Environment:')\n items = [f' --> {d}' for d in zip(self.names, self.alpha, self.prop)]\n for i in items:\n print(i)\n print(20*'-*')\n \n def get_env_data(self):\n return [d for d in zip(self.names, self.alpha, self.prop)] \n\n\nclass Simulation():\n def __init__(self, env, graph, doc, MAX_TIME, debug=False):\n self.env = env\n self.graph = graph\n self.MAX_TIME = MAX_TIME\n self.debug = debug\n self.doctor = doc\n \n def printsim(self):\n self.env.log()\n self.graph.log()\n self.graph.nxgraph\n print(self.MAX_TIME)\n\n def update_fitness(self):\n \"\"\"\n Updates each node's fitness and recalculates average fitness\n \"\"\"\n total = 0\n for node in self.graph.nxgraph.nodes:\n node.update_fitness()\n total += node.colony.prop*node.fitness\n \n self.graph.avgfitness = total\n\n def update_proportion(self):\n \"\"\"\n Update each colony's proportions AFTER update_fitness(self)\n is run.\n \"\"\"\n for node in self.graph.nxgraph.nodes:\n node.colony.prop *= (node.fitness ) / (self.graph.avgfitness)\n\n\n\n\n def evolve(self, time, verbose=False):\n \"\"\" Takes in graph and evolves graph using doctor strategy\n \"\"\" \n target_node = self.doctor.choose_node(time)\n if verbose:\n print(\"t=%d Target Node: \"%(time, target_node.colony.name))\n self.graph.apply_medicine(target_node, 0.1, debug=True)\n\n\n def log(self):\n print('Model parameters:')\n for node in self.graph.nxgraph.nodes():\n node.log()\n \n\nif __name__ == \"__main__\":\n \"\"\"\n Begins simulation\n \"\"\"\n MAX_TIME = 20\n num_treatments = 2\n treatments = np.zeros(shape=(MAX_TIME, num_treatments))\n\n # Eventually put this into a json environment object\n names = ['RA', 'S', 'RB']\n relations = np.array([\n [1, 0.1, 0],\n [0.1, 1, 0.1],\n [0, 0.1, 1] ])\n \n alphas = [0.3, 0.3, 0.3]\n props = [0.33, 0.34, 0.33]\n\n # Make environment\n env = Environment(names, relations, alphas, props)\n graph = Graph(env)\n sim = Simulation(env, graph, MAX_TIME)\n\n # Model parameters at time t = 0\n print('-'*10 + f'SIMULATION TIME 0' + '-'*10)\n sim.graph.plot(0, fitness=True)\n sim.log()\n\n dataframes = []\n df = sim.graph.get_data()\n dataframes.append(df)\n for t in range(1, MAX_TIME):\n print('-'*10 + f'SIMULATION TIME {t}' + '-'*10)\n \n sim.evolve(t) # Evolve using specified Doctor's strategy\n sim.update_fitness() #Update fitness \n sim.update_proportion() # Update proportion MUST BE AFTER FITNESS \n # gives visual\n sim.graph.plot(t, fitness=True)\n sim.log() # Log data to console\n\n # Log dataframes for plotting\n df = sim.graph.get_data()\n dataframes.append(df)\n\n print(f'logged{len(dataframes)} dataframes')\n \n # Plot Data Proportion\n filtered = np.array(list(map(lambda x: list(x['prop']), dataframes)))\n print(filtered)\n xaxis = [i for i in range(MAX_TIME)]\n plt.plot(xaxis, filtered)\n plt.title('Proportion vs Time')\n plt.legend(names)\n plt.savefig('Proportion vs time.png')\n plt.close()\n\n # Plot Data Fitness\n filtered = np.array(list(map(lambda x: list(x['fitness']), dataframes)))\n print(filtered)\n xaxis = [i for i in range(MAX_TIME)]\n plt.plot(xaxis, filtered)\n plt.title('Fitness vs Time')\n plt.legend(names)\n plt.savefig('Fitness vs time.png')\n plt.close()\n\n" ]
[ [ "numpy.dot" ], [ "numpy.array", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
richtertill/node_embeddings
[ "7b34fd060178e90b0c82744f5ad2fd600723dbf2", "7b34fd060178e90b0c82744f5ad2fd600723dbf2" ]
[ "gust/tests/test_preprocessing.py", "utils/plot_util.py" ]
[ "import pytest\nimport numpy as np\nimport scipy.sparse as sp\n\nimport gust\n\n\nclass TestPreprocessing:\n def setup(self):\n self.A = sp.csr_matrix(np.array(\n [[1. , 0. , 0.5, 0. , 0. ],\n [0. , 1. , 1. , 0. , 1. ],\n [0.5, 0. , 1. , 0. , 0. ],\n [0. , 0. , 1. , 0. , 2. ],\n [0. , 1. , 0. , 0. , 0. ]]))\n\n def test_create_subgraph(self):\n spA = gust.SparseGraph(self.A.copy())\n keep = [0, 2, 3]\n spB = gust.create_subgraph(spA, nodes_to_keep=keep)\n # Check that changes are not done in-place\n assert np.allclose(self.A.A, spA.adj_matrix.A)\n B = sp.csr_matrix(np.array(\n [[1. , 0.5, 0. ],\n [0.5, 1. , 0. ],\n [0. , 1. , 0. ]]))\n assert np.allclose(B.A, spB.adj_matrix.A)\n\n def test_create_subgraph_edgeattrs(self):\n edge_attrs = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])\n spA = gust.SparseGraph(self.A, edge_attr_matrix=edge_attrs)\n keep = [0, 2, 3]\n spB = gust.create_subgraph(spA, nodes_to_keep=keep)\n # Check that changes are not done in-place\n assert np.allclose(spA.edge_attr_matrix, edge_attrs)\n B = sp.csr_matrix(np.array(\n [[1. , 0.5, 0. ],\n [0.5, 1. , 0. ],\n [0. , 1. , 0. ]]))\n edge_attrs_B = np.array([0, 1, 5, 6, 7])\n assert np.allclose(B.A, spB.adj_matrix.A)\n assert np.allclose(spB.edge_attr_matrix, edge_attrs_B)\n\n def test_remove_self_loops(self):\n spA = gust.SparseGraph(self.A.copy())\n spB = gust.remove_self_loops(spA)\n # Check that changes are not done in-place\n assert np.allclose(self.A.A, spA.adj_matrix.A)\n B = sp.csr_matrix(np.array(\n [[0. , 0. , 0.5, 0. , 0. ],\n [0. , 0. , 1. , 0. , 1. ],\n [0.5, 0. , 0. , 0. , 0. ],\n [0. , 0. , 1. , 0. , 2. ],\n [0. , 1. , 0. , 0. , 0. ]]))\n assert np.allclose(B.A, spB.adj_matrix.A)\n\n def test_remove_self_loops_edgeattrs(self):\n edge_attrs = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])\n spA = gust.SparseGraph(self.A, edge_attr_matrix=edge_attrs)\n spB = gust.remove_self_loops(spA)\n # Check that changes are not done in-place\n assert np.allclose(spA.edge_attr_matrix, edge_attrs)\n B = sp.csr_matrix(np.array(\n [[0. , 0. , 0.5, 0. , 0. ],\n [0. , 0. , 1. , 0. , 1. ],\n [0.5, 0. , 0. , 0. , 0. ],\n [0. , 0. , 1. , 0. , 2. ],\n [0. , 1. , 0. , 0. , 0. ]]))\n edge_attrs_B = np.array([1, 3, 4, 5, 7, 8, 9])\n assert np.allclose(B.A, spB.adj_matrix.A)\n assert np.allclose(spB.edge_attr_matrix, edge_attrs_B)\n\n def test_remove_full_self_loops(self):\n self.A += sp.eye(5)\n spA = gust.SparseGraph(self.A.copy())\n spB = gust.remove_self_loops(spA)\n # Check that changes are not done in-place\n assert np.allclose(self.A.A, spA.adj_matrix.A)\n B = sp.csr_matrix(np.array(\n [[0. , 0. , 0.5, 0. , 0. ],\n [0. , 0. , 1. , 0. , 1. ],\n [0.5, 0. , 0. , 0. , 0. ],\n [0. , 0. , 1. , 0. , 2. ],\n [0. , 1. , 0. , 0. , 0. ]]))\n assert np.allclose(B.A, spB.adj_matrix.A)\n\n def test_sparsegraph_to_from_networkx_simple(self):\n spA = gust.SparseGraph(self.A)\n nx_graph = gust.sparsegraph_to_networkx(spA)\n spB = gust.networkx_to_sparsegraph(nx_graph)\n assert np.allclose(spA.adj_matrix.A, spB.adj_matrix.A)\n\n @pytest.mark.parametrize('sparse', [True, False])\n def test_sparsegraph_to_from_networkx(self, sparse):\n\n # Set up original graph\n node_attrs = sp.csr_matrix(np.array(\n [[0. , 3. , 2. ],\n [0. , 0. , 4. ],\n [1. , 1. , 0. ],\n [0. , 0. , 1. ],\n [0. , 2. , 0. ]]))\n attr_names = np.array(['a', 'b', 'c'])\n edge_attrs = np.array(\n [[0 , 1. ],\n [1 , 0. ],\n [2 , 1. ],\n [3 , 0. ],\n [1 , 0. ],\n [5 , 4. ],\n [6 , 0. ],\n [7 , 3. ],\n [8 , 2. ],\n [9 , 0.3]])\n edge_attr_names = np.array(['ae', 'be'])\n labels = np.array([0, 1, 1, 0, 2])\n class_names = np.array(['in', 'between', 'out'])\n A_sym = sp.csr_matrix(np.array(\n [[1. , 0. , 0.5, 0. , 0. ],\n [0. , 1. , 1. , 0. , 1. ],\n [0.5, 1. , 0. , 0. , 0. ],\n [0. , 0. , 0. , 0. , 2. ],\n [0. , 1. , 0. , 2. , 0. ]]))\n edge_attrs_sym = np.array(\n [[0 , 1. ],\n [1 , 0. ],\n [2 , 1. ],\n [3 , 0. ],\n [8 , 2. ],\n [1 , 0. ],\n [3 , 0. ],\n [7 , 3. ],\n [8 , 2. ],\n [7 , 3.]])\n for i in range(2):\n A = self.A if i == 0 else A_sym\n edge_a = edge_attrs if i == 0 else edge_attrs_sym\n spA = gust.SparseGraph(\n A, attr_matrix=node_attrs, edge_attr_matrix=edge_a,\n attr_names=attr_names, edge_attr_names=edge_attr_names,\n labels=labels, class_names=class_names)\n\n # Convert to NetworkX and back\n nx_graph = gust.sparsegraph_to_networkx(spA)\n spB = gust.networkx_to_sparsegraph(\n nx_graph, label_name='label',\n sparse_node_attrs=sparse, sparse_edge_attrs=sparse)\n\n # Check adjacency matrix\n assert np.allclose(spA.adj_matrix.A, spB.adj_matrix.A)\n\n # Check node attributes\n assert len(spA.attr_names) == len(spB.attr_names)\n for iold, attr in enumerate(spA.attr_names):\n assert len(np.where(spB.attr_names == attr)[0]) == 1\n inew = np.where(spB.attr_names == attr)[0][0]\n if sparse:\n assert (spA.attr_matrix[:, iold]\n != spB.attr_matrix[:, inew]).nnz == 0\n else:\n assert np.allclose(spA.attr_matrix.A[:, iold],\n spB.attr_matrix[:, inew])\n\n # Check edge attributes\n assert len(spA.edge_attr_names) == len(spB.edge_attr_names)\n for iold, attr in enumerate(spA.edge_attr_names):\n assert len(np.where(spB.edge_attr_names == attr)[0]) == 1\n inew = np.where(spB.edge_attr_names == attr)[0][0]\n if sparse:\n assert np.allclose(spA.edge_attr_matrix[:, iold],\n spB.edge_attr_matrix.A[:, inew])\n else:\n assert np.allclose(spA.edge_attr_matrix[:, iold],\n spB.edge_attr_matrix[:, inew])\n\n # Check labels and class names\n assert len(spA.class_names) == len(spB.class_names)\n class_mapping = {}\n for iold, label in enumerate(spA.class_names):\n assert len(np.where(spB.class_names == label)[0]) == 1\n class_mapping[iold] = np.where(spB.class_names == label)[0][0]\n assert len(spA.labels) == len(spB.labels)\n all((class_mapping[old_label] == spB.labels[i]\n for i, old_label in enumerate(spA.labels)))\n\n\ndef test_largest_connected_components():\n A = sp.csr_matrix(np.array(\n [[1. , 0. , 0.5, 0. , 0. ],\n [0. , 1. , 1. , 0. , 0. ],\n [0.5, 0. , 1. , 0. , 0. ],\n [0. , 0. , 0. , 0. , 2. ],\n [0. , 0. , 0. , 0. , 0. ]]))\n spA = gust.SparseGraph(A.copy())\n spB = gust.largest_connected_components(spA)\n # Check that changes are not done in-place\n assert np.allclose(A.A, spA.adj_matrix.A)\n B = sp.csr_matrix(np.array(\n [[1. , 0. , 0.5 ],\n [0. , 1. , 1. ],\n [0.5, 0. , 1. ]]))\n assert np.allclose(B.A, spB.adj_matrix.A)\n\n\ndef test_largest_connected_components_edgeattrs():\n A = sp.csr_matrix(np.array(\n [[1. , 0. , 0.5, 0. , 0. ],\n [0. , 1. , 0. , 0.5, 0. ],\n [0. , 0. , 1. , 0. , 0. ],\n [0. , 0. , 0. , 0. , 2. ],\n [0. , 0. , 0. , 0. , 0. ]]))\n edge_attrs = np.array([0, 1, 2, 3, 4, 5])\n spA = gust.SparseGraph(A, edge_attr_matrix=edge_attrs)\n spB = gust.largest_connected_components(spA)\n # Check that changes are not done in-place\n assert np.allclose(spA.edge_attr_matrix, edge_attrs)\n B = sp.csr_matrix(np.array(\n [[1. , 0.5, 0. ],\n [0. , 0. , 2. ],\n [0. , 0. , 0. ]]))\n edge_attrs_B = np.array([2, 3, 5])\n assert np.allclose(B.A, spB.adj_matrix.A)\n assert np.allclose(spB.edge_attr_matrix, edge_attrs_B)\n", "import datetime\nimport pathlib\nimport pandas as pd\n\ndef create_experiment_folder():\n\tnow = datetime.datetime.now()\n\tyear = '{:04d}'.format(now.year)\n\tmonth = '{:02d}'.format(now.month)\n\tday = '{:02d}'.format(now.day)\n\tminute = '{:02d}'.format(now.minute)\n\thour = '{:02d}'.format(now.hour)\n\ttimestamp = year + \"_\" + month + \"_\" + day + \"_\" + hour + \"_\" + minute\n\tfoldername = timestamp\n\tdirectory_name = \"results/\" + foldername \n\tpathlib.Path(directory_name).mkdir(parents=True, exist_ok=True) \n\treturn directory_name\n\n\ndef setup_folders_and_summary_files(exp, datasets, embedding_methods):\n\tresult_folder = create_experiment_folder()\n\n\tsave_experiment_summary(result_folder,exp,datasets, embedding_methods)\n\n\tif(exp[\"link_prediction\"]):\n\t\tlink_prediction_folder = result_folder + \"/link_prediction\"\n\t\tpathlib.Path(link_prediction_folder).mkdir(parents=True, exist_ok=True)\n\n\t\tcol_names_link_prediction = ['embedding_method', 'dataset', 'run_number', 'auc_score']\n\t\tdf = pd.DataFrame(columns = col_names_link_prediction)\n\t\tdf.to_csv(f'{link_prediction_folder}/link_prediction_results.csv',index=False)\n\n\tif(exp[\"node_classification\"]):\n\t\tnode_classification_folder = result_folder + \"/node_classification\"\n\t\tpathlib.Path(node_classification_folder).mkdir(parents=True, exist_ok=True)\n\n\t\tcol_names_node_classification = ['embedding_method', 'dataset', 'run_number', 'acc_score']\n\t\tdf = pd.DataFrame(columns = col_names_node_classification)\n\t\tdf.to_csv(f'{node_classification_folder}/node_classification_results.csv',index=False)\n\n\tif(exp[\"node_clustering\"]):\n\t\tnode_clustering_folder = result_folder + \"/node_clustering\"\n\t\tpathlib.Path(node_clustering_folder).mkdir(parents=True, exist_ok=True)\n\n\t\tcol_names_node_clustering = ['embedding_method', 'dataset', 'run_number', 'nmi_score']\n\t\tdf = pd.DataFrame(columns = col_names_node_clustering)\n\t\tdf.to_csv(f'{node_clustering_folder}/node_clustering_results.csv',index=False)\n\n\treturn result_folder\n\ndef save_experiment_summary(result_folder,exp, datasets, embedding_methods):\n\twith open(result_folder + '/experiment_setup_summary.txt', 'w') as file:\n\t\tfile.write(\"## Experiment Setup Summary ## \\n\\n\")\n\t\tfile.write(\"Datasets used:\\n\")\n\t\tfor ds in datasets:\n\t\t\tfile.write(f'- {ds}\\n')\n\t\tfile.write(\"\\n\")\n\t\tfile.write(\"Embedding methods used:\\n\")\n\t\tfor em in embedding_methods:\n\t\t\tfile.write(f'{em.get_method_summary()}\\n')\n\t\tfile.write(\"\\n\")\n\t\tfile.write(\"Experiment Parameters:\\n\")\n\t\tfor key, value in exp.items():\n\t\t\tfile.write(f'- {key}: {value}\\n')\n" ]
[ [ "scipy.sparse.eye", "numpy.array", "numpy.where", "numpy.allclose" ], [ "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
DDXDaniel/PaddleViT
[ "9b78cd33feafe75daf68c5e5979ea7a27d4d40e9", "9b78cd33feafe75daf68c5e5979ea7a27d4d40e9" ]
[ "image_classification/PiT/main_multi_gpu.py", "image_classification/SwinTransformer/main_multi_gpu.py" ]
[ "# Copyright (c) 2021 PPViT Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"PiT training/validation using multiple GPU \"\"\"\n\nimport sys\nimport os\nimport time\nimport logging\nimport argparse\nimport random\nimport numpy as np\nimport paddle\nimport paddle.nn as nn\nimport paddle.nn.functional as F\nimport paddle.distributed as dist\nfrom datasets import get_dataloader\nfrom datasets import get_dataset\nfrom utils import AverageMeter\nfrom utils import WarmupCosineScheduler\nfrom utils import get_exclude_from_weight_decay_fn\nfrom config import get_config\nfrom config import update_config\nfrom mixup import Mixup\nfrom losses import LabelSmoothingCrossEntropyLoss\nfrom losses import SoftTargetCrossEntropyLoss\nfrom losses import DistillationLoss\nfrom model_ema import ModelEma\nfrom pit import build_pit as build_model\nfrom regnet import build_regnet as build_teacher_model\n\n\ndef get_arguments():\n \"\"\"return argumeents, this will overwrite the config after loading yaml file\"\"\"\n parser = argparse.ArgumentParser('PiT')\n parser.add_argument('-cfg', type=str, default=None)\n parser.add_argument('-dataset', type=str, default=None)\n parser.add_argument('-batch_size', type=int, default=None)\n parser.add_argument('-image_size', type=int, default=None)\n parser.add_argument('-data_path', type=str, default=None)\n parser.add_argument('-ngpus', type=int, default=None)\n parser.add_argument('-pretrained', type=str, default=None)\n parser.add_argument('-resume', type=str, default=None)\n parser.add_argument('-last_epoch', type=int, default=None)\n parser.add_argument('-teacher_model', type=str, default=None)\n parser.add_argument('-eval', action='store_true')\n parser.add_argument('-amp', action='store_true')\n arguments = parser.parse_args()\n return arguments\n\n\ndef get_logger(filename, logger_name=None):\n \"\"\"set logging file and format\n Args:\n filename: str, full path of the logger file to write\n logger_name: str, the logger name, e.g., 'master_logger', 'local_logger'\n Return:\n logger: python logger\n \"\"\"\n log_format = \"%(asctime)s %(message)s\"\n logging.basicConfig(stream=sys.stdout, level=logging.INFO,\n format=log_format, datefmt=\"%m%d %I:%M:%S %p\")\n # different name is needed when creating multiple logger in one process\n logger = logging.getLogger(logger_name)\n fh = logging.FileHandler(os.path.join(filename))\n fh.setFormatter(logging.Formatter(log_format))\n logger.addHandler(fh)\n return logger\n\n\ndef train(dataloader,\n model,\n criterion,\n optimizer,\n epoch,\n total_epochs,\n total_batch,\n debug_steps=100,\n accum_iter=1,\n model_ema=None,\n mixup_fn=None,\n amp=False,\n local_logger=None,\n master_logger=None):\n \"\"\"Training for one epoch\n Args:\n dataloader: paddle.io.DataLoader, dataloader instance\n model: nn.Layer, a ViT model\n criterion: nn.criterion\n epoch: int, current epoch\n total_epochs: int, total num of epochs\n total_batch: int, total num of batches for one epoch\n debug_steps: int, num of iters to log info, default: 100\n accum_iter: int, num of iters for accumulating gradients, default: 1\n model_ema: ModelEma, model moving average instance\n mixup_fn: Mixup, mixup instance, default: None\n amp: bool, if True, use mix precision training, default: False\n local_logger: logger for local process/gpu, default: None\n master_logger: logger for main process, default: None\n Returns:\n train_loss_meter.avg: float, average loss on current process/gpu\n train_acc_meter.avg: float, average top1 accuracy on current process/gpu\n master_train_loss_meter.avg: float, average loss on all processes/gpus\n master_train_acc_meter.avg: float, average top1 accuracy on all processes/gpus\n train_time: float, training time\n \"\"\"\n model.train()\n train_loss_meter = AverageMeter()\n train_acc_meter = AverageMeter()\n master_train_loss_meter = AverageMeter()\n master_train_acc_meter = AverageMeter()\n\n if amp is True:\n scaler = paddle.amp.GradScaler(init_loss_scaling=1024)\n time_st = time.time()\n\n for batch_id, data in enumerate(dataloader):\n image = data[0]\n label = data[1]\n label_orig = label.clone()\n\n if mixup_fn is not None:\n image, label = mixup_fn(image, label_orig)\n \n if amp is True: # mixed precision training\n with paddle.amp.auto_cast():\n output = model(image) # output[0]: class_token, output[1]: distill_token\n loss = criterion(image, output, label)\n scaled = scaler.scale(loss)\n scaled.backward()\n if ((batch_id +1) % accum_iter == 0) or (batch_id + 1 == len(dataloader)):\n scaler.minimize(optimizer, scaled)\n optimizer.clear_grad()\n else: # full precision training\n output = model(image) # output[0]: class_token, output[1]: distill_token\n loss = criterion(image, output, label)\n #NOTE: division may be needed depending on the loss function\n # Here no division is needed:\n # default 'reduction' param in nn.CrossEntropyLoss is set to 'mean'\n #loss = loss / accum_iter\n loss.backward()\n\n if ((batch_id +1) % accum_iter == 0) or (batch_id + 1 == len(dataloader)):\n optimizer.step()\n optimizer.clear_grad()\n\n if model_ema is not None and dist.get_rank() == 0:\n model_ema.update(model)\n\n # average of output and kd_output, like model eval mode\n pred = F.softmax((output[0] + output[1]) / 2)\n if mixup_fn:\n acc = paddle.metric.accuracy(pred, label_orig)\n else:\n acc = paddle.metric.accuracy(pred, label_orig.unsqueeze(1))\n\n batch_size = paddle.to_tensor(image.shape[0])\n\n # sync from other gpus for overall loss and acc\n master_loss = loss.clone()\n master_acc = acc.clone()\n master_batch_size = batch_size.clone()\n dist.all_reduce(master_loss)\n dist.all_reduce(master_acc)\n dist.all_reduce(master_batch_size)\n master_loss = master_loss / dist.get_world_size()\n master_acc = master_acc / dist.get_world_size()\n master_train_loss_meter.update(master_loss.numpy()[0], master_batch_size.numpy()[0])\n master_train_acc_meter.update(master_acc.numpy()[0], master_batch_size.numpy()[0])\n\n train_loss_meter.update(loss.numpy()[0], batch_size.numpy()[0])\n train_acc_meter.update(acc.numpy()[0], batch_size.numpy()[0])\n\n if batch_id % debug_steps == 0:\n if local_logger:\n local_logger.info(\n f\"Epoch[{epoch:03d}/{total_epochs:03d}], \" +\n f\"Step[{batch_id:04d}/{total_batch:04d}], \" +\n f\"Avg Loss: {train_loss_meter.avg:.4f}, \" +\n f\"Avg Acc: {train_acc_meter.avg:.4f}\")\n if master_logger and dist.get_rank() == 0:\n master_logger.info(\n f\"Epoch[{epoch:03d}/{total_epochs:03d}], \" +\n f\"Step[{batch_id:04d}/{total_batch:04d}], \" +\n f\"Avg Loss: {master_train_loss_meter.avg:.4f}, \" +\n f\"Avg Acc: {master_train_acc_meter.avg:.4f}\")\n\n train_time = time.time() - time_st\n return (train_loss_meter.avg,\n train_acc_meter.avg,\n master_train_loss_meter.avg,\n master_train_acc_meter.avg,\n train_time)\n\n\ndef validate(dataloader,\n model,\n criterion,\n total_batch,\n debug_steps=100,\n local_logger=None,\n master_logger=None):\n \"\"\"Validation for whole dataset\n Args:\n dataloader: paddle.io.DataLoader, dataloader instance\n model: nn.Layer, a ViT model\n criterion: nn.criterion\n total_epoch: int, total num of epoch, for logging\n debug_steps: int, num of iters to log info, default: 100\n local_logger: logger for local process/gpu, default: None\n master_logger: logger for main process, default: None\n Returns:\n val_loss_meter.avg: float, average loss on current process/gpu\n val_acc1_meter.avg: float, average top1 accuracy on current process/gpu\n val_acc5_meter.avg: float, average top5 accuracy on current process/gpu\n master_val_loss_meter.avg: float, average loss on all processes/gpus\n master_val_acc1_meter.avg: float, average top1 accuracy on all processes/gpus\n master_val_acc5_meter.avg: float, average top5 accuracy on all processes/gpus\n val_time: float, validation time\n \"\"\"\n model.eval()\n val_loss_meter = AverageMeter()\n val_acc1_meter = AverageMeter()\n val_acc5_meter = AverageMeter()\n master_val_loss_meter = AverageMeter()\n master_val_acc1_meter = AverageMeter()\n master_val_acc5_meter = AverageMeter()\n time_st = time.time()\n\n with paddle.no_grad():\n for batch_id, data in enumerate(dataloader):\n image = data[0]\n label = data[1]\n\n output = model(image)\n loss = criterion(output, label)\n\n pred = F.softmax(output)\n acc1 = paddle.metric.accuracy(pred, label.unsqueeze(1))\n acc5 = paddle.metric.accuracy(pred, label.unsqueeze(1), k=5)\n\n batch_size = paddle.to_tensor(image.shape[0])\n\n master_loss = loss.clone()\n master_acc1 = acc1.clone()\n master_acc5 = acc5.clone()\n master_batch_size = batch_size.clone()\n\n dist.all_reduce(master_loss)\n dist.all_reduce(master_acc1)\n dist.all_reduce(master_acc5)\n dist.all_reduce(master_batch_size)\n master_loss = master_loss / dist.get_world_size()\n master_acc1 = master_acc1 / dist.get_world_size()\n master_acc5 = master_acc5 / dist.get_world_size()\n\n master_val_loss_meter.update(master_loss.numpy()[0], master_batch_size.numpy()[0])\n master_val_acc1_meter.update(master_acc1.numpy()[0], master_batch_size.numpy()[0])\n master_val_acc5_meter.update(master_acc5.numpy()[0], master_batch_size.numpy()[0])\n\n val_loss_meter.update(loss.numpy()[0], batch_size.numpy()[0])\n val_acc1_meter.update(acc1.numpy()[0], batch_size.numpy()[0])\n val_acc5_meter.update(acc5.numpy()[0], batch_size.numpy()[0])\n\n if batch_id % debug_steps == 0:\n if local_logger:\n local_logger.info(\n f\"Val Step[{batch_id:04d}/{total_batch:04d}], \" +\n f\"Avg Loss: {val_loss_meter.avg:.4f}, \" +\n f\"Avg Acc@1: {val_acc1_meter.avg:.4f}, \" +\n f\"Avg Acc@5: {val_acc5_meter.avg:.4f}\")\n if master_logger and dist.get_rank() == 0:\n master_logger.info(\n f\"Val Step[{batch_id:04d}/{total_batch:04d}], \" +\n f\"Avg Loss: {master_val_loss_meter.avg:.4f}, \" +\n f\"Avg Acc@1: {master_val_acc1_meter.avg:.4f}, \" +\n f\"Avg Acc@5: {master_val_acc5_meter.avg:.4f}\")\n val_time = time.time() - time_st\n return (val_loss_meter.avg,\n val_acc1_meter.avg,\n val_acc5_meter.avg,\n master_val_loss_meter.avg,\n master_val_acc1_meter.avg,\n master_val_acc5_meter.avg,\n val_time)\n\n\ndef main_worker(*args):\n # STEP 0: Preparation\n config = args[0]\n dist.init_parallel_env()\n last_epoch = config.TRAIN.LAST_EPOCH\n world_size = dist.get_world_size()\n local_rank = dist.get_rank()\n seed = config.SEED + local_rank\n paddle.seed(seed)\n np.random.seed(seed)\n random.seed(seed)\n # logger for each process/gpu\n local_logger = get_logger(\n filename=os.path.join(config.SAVE, 'log_{}.txt'.format(local_rank)),\n logger_name='local_logger')\n # overall logger\n if local_rank == 0:\n master_logger = get_logger(\n filename=os.path.join(config.SAVE, 'log.txt'),\n logger_name='master_logger')\n master_logger.info(f'\\n{config}')\n else:\n master_logger = None\n local_logger.info(f'----- world_size = {world_size}, local_rank = {local_rank}')\n if local_rank == 0:\n master_logger.info(f'----- world_size = {world_size}, local_rank = {local_rank}')\n \n # STEP 1: Create model\n model = build_model(config)\n # define model ema\n model_ema = None\n if not config.EVAL and config.TRAIN.MODEL_EMA and local_rank == 0:\n model_ema = ModelEma(model, decay=config.TRAIN.MODEL_EMA_DECAY)\n model = paddle.DataParallel(model)\n\n # STEP 2: Create train and val dataloader\n dataset_train, dataset_val = args[1], args[2]\n # Create training dataloader\n if not config.EVAL:\n dataloader_train = get_dataloader(config, dataset_train, 'train', True)\n total_batch_train = len(dataloader_train)\n local_logger.info(f'----- Total # of train batch (single gpu): {total_batch_train}')\n if local_rank == 0:\n master_logger.info(f'----- Total # of train batch (single gpu): {total_batch_train}')\n # Create validation dataloader\n dataloader_val = get_dataloader(config, dataset_val, 'test', True)\n total_batch_val = len(dataloader_val)\n local_logger.info(f'----- Total # of val batch (single gpu): {total_batch_val}')\n if local_rank == 0:\n master_logger.info(f'----- Total # of val batch (single gpu): {total_batch_val}')\n\n # STEP 3: Define Mixup function\n mixup_fn = None\n if config.TRAIN.MIXUP_PROB > 0 or config.TRAIN.CUTMIX_ALPHA > 0 or config.TRAIN.CUTMIX_MINMAX is not None:\n mixup_fn = Mixup(mixup_alpha=config.TRAIN.MIXUP_ALPHA,\n cutmix_alpha=config.TRAIN.CUTMIX_ALPHA,\n cutmix_minmax=config.TRAIN.CUTMIX_MINMAX,\n prob=config.TRAIN.MIXUP_PROB,\n switch_prob=config.TRAIN.MIXUP_SWITCH_PROB,\n mode=config.TRAIN.MIXUP_MODE,\n label_smoothing=config.TRAIN.SMOOTHING)\n\n # STEP 4: Define criterion\n if config.TRAIN.MIXUP_PROB > 0.:\n criterion = SoftTargetCrossEntropyLoss()\n elif config.TRAIN.SMOOTHING:\n criterion = LabelSmoothingCrossEntropyLoss()\n else:\n criterion = nn.CrossEntropyLoss()\n # only use cross entropy for val\n criterion_val = nn.CrossEntropyLoss()\n\n\n # 5. Create Teacher model\n teacher_model = None\n if not config.EVAL:\n if config.TRAIN.DISTILLATION_TYPE != 'none':\n local_logger.info(f'Creating teacher model: {config.TRAIN.TEACHER_MODEL}')\n teacher_model = build_teacher_model()\n assert os.path.isfile(config.TRAIN.TEACHER_MODEL + '.pdparams')\n teacher_model_state = paddle.load(config.TRAIN.TEACHER_MODEL + '.pdparams')\n teacher_model.set_dict(teacher_model_state)\n teacher_model.eval()\n teacher_model = paddle.DataParallel(teacher_model)\n local_logger.info(f\"----- Load teacher model state from {config.TRAIN.TEACHER_MODEL}\")\n # wrap the criterion:\n criterion = DistillationLoss(criterion,\n teacher_model,\n config.TRAIN.DISTILLATION_TYPE,\n config.TRAIN.DISTILLATION_ALPHA,\n config.TRAIN.DISTILLATION_TAU)\n else:\n raise ValueError('Distillation type cannot be None')\n\n # STEP 5: Define optimizer and lr_scheduler\n # set lr according to batch size and world size (hacked from Swin official code and modified for CSwin)\n if config.TRAIN.LINEAR_SCALED_LR is not None:\n linear_scaled_lr = (\n config.TRAIN.BASE_LR * config.DATA.BATCH_SIZE) / config.TRAIN.LINEAR_SCALED_LR\n linear_scaled_warmup_start_lr = (\n config.TRAIN.WARMUP_START_LR * config.DATA.BATCH_SIZE) / config.TRAIN.LINEAR_SCALED_LR\n linear_scaled_end_lr = (\n config.TRAIN.END_LR * config.DATA.BATCH_SIZE) / config.TRAIN.LINEAR_SCALED_LR\n \n if config.TRAIN.ACCUM_ITER > 1:\n linear_scaled_lr = linear_scaled_lr * config.TRAIN.ACCUM_ITER\n linear_scaled_warmup_start_lr = linear_scaled_warmup_start_lr * config.TRAIN.ACCUM_ITER\n linear_scaled_end_lr = linear_scaled_end_lr * config.TRAIN.ACCUM_ITER\n \n config.TRAIN.BASE_LR = linear_scaled_lr\n config.TRAIN.WARMUP_START_LR = linear_scaled_warmup_start_lr\n config.TRAIN.END_LR = linear_scaled_end_lr\n\n scheduler = None\n if config.TRAIN.LR_SCHEDULER.NAME == \"warmupcosine\":\n scheduler = WarmupCosineScheduler(learning_rate=config.TRAIN.BASE_LR,\n warmup_start_lr=config.TRAIN.WARMUP_START_LR,\n start_lr=config.TRAIN.BASE_LR,\n end_lr=config.TRAIN.END_LR,\n warmup_epochs=config.TRAIN.WARMUP_EPOCHS,\n total_epochs=config.TRAIN.NUM_EPOCHS,\n last_epoch=config.TRAIN.LAST_EPOCH,\n )\n elif config.TRAIN.LR_SCHEDULER.NAME == \"cosine\":\n scheduler = paddle.optimizer.lr.CosineAnnealingDecay(learning_rate=config.TRAIN.BASE_LR,\n T_max=config.TRAIN.NUM_EPOCHS,\n last_epoch=last_epoch)\n elif config.scheduler == \"multi-step\":\n milestones = [int(v.strip()) for v in config.TRAIN.LR_SCHEDULER.MILESTONES.split(\",\")]\n scheduler = paddle.optimizer.lr.MultiStepDecay(learning_rate=config.TRAIN.BASE_LR,\n milestones=milestones,\n gamma=config.TRAIN.LR_SCHEDULER.DECAY_RATE,\n last_epoch=last_epoch)\n else:\n local_logger.fatal(f\"Unsupported Scheduler: {config.TRAIN.LR_SCHEDULER}.\")\n if local_rank == 0:\n master_logger.fatal(f\"Unsupported Scheduler: {config.TRAIN.LR_SCHEDULER}.\")\n raise NotImplementedError(f\"Unsupported Scheduler: {config.TRAIN.LR_SCHEDULER}.\")\n\n if config.TRAIN.OPTIMIZER.NAME == \"SGD\":\n if config.TRAIN.GRAD_CLIP:\n clip = paddle.nn.ClipGradByGlobalNorm(config.TRAIN.GRAD_CLIP)\n else:\n clip = None\n optimizer = paddle.optimizer.Momentum(\n parameters=model.parameters(),\n learning_rate=scheduler if scheduler is not None else config.TRAIN.BASE_LR,\n weight_decay=config.TRAIN.WEIGHT_DECAY,\n momentum=config.TRAIN.OPTIMIZER.MOMENTUM,\n grad_clip=clip)\n elif config.TRAIN.OPTIMIZER.NAME == \"AdamW\":\n if config.TRAIN.GRAD_CLIP:\n clip = paddle.nn.ClipGradByGlobalNorm(config.TRAIN.GRAD_CLIP)\n else:\n clip = None\n optimizer = paddle.optimizer.AdamW(\n parameters=model.parameters(),\n learning_rate=scheduler if scheduler is not None else config.TRAIN.BASE_LR,\n beta1=config.TRAIN.OPTIMIZER.BETAS[0],\n beta2=config.TRAIN.OPTIMIZER.BETAS[1],\n weight_decay=config.TRAIN.WEIGHT_DECAY,\n epsilon=config.TRAIN.OPTIMIZER.EPS,\n grad_clip=clip,\n apply_decay_param_fun=get_exclude_from_weight_decay_fn([\n 'absolute_pos_embed', 'relative_position_bias_table']),\n )\n else:\n local_logger.fatal(f\"Unsupported Optimizer: {config.TRAIN.OPTIMIZER.NAME}.\")\n if local_rank == 0:\n master_logger.fatal(f\"Unsupported Optimizer: {config.TRAIN.OPTIMIZER.NAME}.\")\n raise NotImplementedError(f\"Unsupported Optimizer: {config.TRAIN.OPTIMIZER.NAME}.\")\n\n # STEP 6: Load pretrained model / load resumt model and optimizer states\n if config.MODEL.PRETRAINED:\n if (config.MODEL.PRETRAINED).endswith('.pdparams'):\n raise ValueError(f'{config.MODEL.PRETRAINED} should not contain .pdparams')\n assert os.path.isfile(config.MODEL.PRETRAINED + '.pdparams') is True\n model_state = paddle.load(config.MODEL.PRETRAINED+'.pdparams')\n model.set_dict(model_state)\n local_logger.info(f\"----- Pretrained: Load model state from {config.MODEL.PRETRAINED}\")\n if local_rank == 0:\n master_logger.info(\n f\"----- Pretrained: Load model state from {config.MODEL.PRETRAINED}\")\n\n if config.MODEL.RESUME:\n assert os.path.isfile(config.MODEL.RESUME + '.pdparams') is True\n assert os.path.isfile(config.MODEL.RESUME + '.pdopt') is True\n model_state = paddle.load(config.MODEL.RESUME + '.pdparams')\n model.set_dict(model_state)\n opt_state = paddle.load(config.MODEL.RESUME+'.pdopt')\n optimizer.set_state_dict(opt_state)\n local_logger.info(\n f\"----- Resume Training: Load model and optmizer from {config.MODEL.RESUME}\")\n if local_rank == 0:\n master_logger.info(\n f\"----- Resume Training: Load model and optmizer from {config.MODEL.RESUME}\")\n # load ema model\n if model_ema is not None and os.path.isfile(config.MODEL.RESUME + '-EMA.pdparams'):\n model_ema_state = paddle.load(config.MODEL.RESUME + '-EMA.pdparams')\n model_ema.module.set_state_dict(model_ema_state)\n local_logger.info(f'----- Load model ema from {config.MODEL.RESUME}-EMA.pdparams')\n if local_rank == 0:\n master_logger.info(f'----- Load model ema from {config.MODEL.RESUME}-EMA.pdparams')\n \n # STEP 7: Validation (eval mode)\n if config.EVAL:\n local_logger.info('----- Start Validating')\n if local_rank == 0:\n master_logger.info('----- Start Validating')\n val_loss, val_acc1, val_acc5, avg_loss, avg_acc1, avg_acc5, val_time = validate(\n dataloader=dataloader_val,\n model=model,\n criterion=criterion_val,\n total_batch=total_batch_val,\n debug_steps=config.REPORT_FREQ,\n local_logger=local_logger,\n master_logger=master_logger)\n local_logger.info(f\"Validation Loss: {val_loss:.4f}, \" +\n f\"Validation Acc@1: {val_acc1:.4f}, \" +\n f\"Validation Acc@5: {val_acc5:.4f}, \" +\n f\"time: {val_time:.2f}\")\n if local_rank == 0:\n master_logger.info(f\"Validation Loss: {avg_loss:.4f}, \" +\n f\"Validation Acc@1: {avg_acc1:.4f}, \" +\n f\"Validation Acc@5: {avg_acc5:.4f}, \" +\n f\"time: {val_time:.2f}\")\n return\n\n # STEP 8: Start training and validation (train mode)\n local_logger.info(f\"Start training from epoch {last_epoch+1}.\")\n if local_rank == 0:\n master_logger.info(f\"Start training from epoch {last_epoch+1}.\")\n for epoch in range(last_epoch+1, config.TRAIN.NUM_EPOCHS+1):\n # train\n local_logger.info(f\"Now training epoch {epoch}. LR={optimizer.get_lr():.6f}\")\n if local_rank == 0:\n master_logger.info(f\"Now training epoch {epoch}. LR={optimizer.get_lr():.6f}\")\n train_loss, train_acc, avg_loss, avg_acc, train_time = train(\n dataloader=dataloader_train,\n model=model,\n criterion=criterion,\n optimizer=optimizer,\n epoch=epoch,\n total_epochs=config.TRAIN.NUM_EPOCHS,\n total_batch=total_batch_train,\n debug_steps=config.REPORT_FREQ,\n accum_iter=config.TRAIN.ACCUM_ITER,\n model_ema=model_ema,\n mixup_fn=mixup_fn,\n amp=config.AMP,\n local_logger=local_logger,\n master_logger=master_logger)\n\n scheduler.step()\n\n local_logger.info(f\"----- Epoch[{epoch:03d}/{config.TRAIN.NUM_EPOCHS:03d}], \" +\n f\"Train Loss: {train_loss:.4f}, \" +\n f\"Train Acc: {train_acc:.4f}, \" +\n f\"time: {train_time:.2f}\")\n if local_rank == 0:\n master_logger.info(f\"----- Epoch[{epoch:03d}/{config.TRAIN.NUM_EPOCHS:03d}], \" +\n f\"Train Loss: {avg_loss:.4f}, \" +\n f\"Train Acc: {avg_acc:.4f}, \" +\n f\"time: {train_time:.2f}\")\n\n # validation\n if epoch % config.VALIDATE_FREQ == 0 or epoch == config.TRAIN.NUM_EPOCHS:\n local_logger.info(f'----- Validation after Epoch: {epoch}')\n if local_rank == 0:\n master_logger.info(f'----- Validation after Epoch: {epoch}')\n val_loss, val_acc1, val_acc5, avg_loss, avg_acc1, avg_acc5, val_time = validate(\n dataloader=dataloader_val,\n model=model,\n criterion=criterion_val,\n total_batch=total_batch_val,\n debug_steps=config.REPORT_FREQ,\n local_logger=local_logger,\n master_logger=master_logger)\n local_logger.info(f\"----- Epoch[{epoch:03d}/{config.TRAIN.NUM_EPOCHS:03d}], \" +\n f\"Validation Loss: {val_loss:.4f}, \" +\n f\"Validation Acc@1: {val_acc1:.4f}, \" +\n f\"Validation Acc@5: {val_acc5:.4f}, \" +\n f\"time: {val_time:.2f}\")\n if local_rank == 0:\n master_logger.info(f\"----- Epoch[{epoch:03d}/{config.TRAIN.NUM_EPOCHS:03d}], \" +\n f\"Validation Loss: {avg_loss:.4f}, \" +\n f\"Validation Acc@1: {avg_acc1:.4f}, \" +\n f\"Validation Acc@5: {avg_acc5:.4f}, \" +\n f\"time: {val_time:.2f}\")\n # model save\n if local_rank == 0:\n if epoch % config.SAVE_FREQ == 0 or epoch == config.TRAIN.NUM_EPOCHS:\n model_path = os.path.join(\n config.SAVE, f\"{config.MODEL.TYPE}-Epoch-{epoch}-Loss-{train_loss}\")\n paddle.save(model.state_dict(), model_path + '.pdparams')\n paddle.save(optimizer.state_dict(), model_path + '.pdopt')\n master_logger.info(f\"----- Save model: {model_path}.pdparams\")\n master_logger.info(f\"----- Save optim: {model_path}.pdopt\")\n if model_ema is not None:\n model_ema_path = os.path.join(\n config.SAVE, f\"{config.MODEL.TYPE}-Epoch-{epoch}-Loss-{train_loss}-EMA\")\n paddle.save(model_ema.state_dict(), model_ema_path + '.pdparams')\n master_logger.info(f\"----- Save ema model: {model_ema_path}.pdparams\")\n\n\ndef main():\n # config is updated by: (1) config.py, (2) yaml file, (3) arguments\n arguments = get_arguments()\n config = get_config()\n config = update_config(config, arguments)\n\n # set output folder\n if not config.EVAL:\n config.SAVE = '{}/train-{}'.format(config.SAVE, time.strftime('%Y%m%d-%H-%M-%S'))\n else:\n config.SAVE = '{}/eval-{}'.format(config.SAVE, time.strftime('%Y%m%d-%H-%M-%S'))\n\n if not os.path.exists(config.SAVE):\n os.makedirs(config.SAVE, exist_ok=True)\n\n # get dataset and start DDP\n if not config.EVAL:\n dataset_train = get_dataset(config, mode='train')\n else:\n dataset_train = None\n dataset_val = get_dataset(config, mode='val')\n config.NGPUS = len(paddle.static.cuda_places()) if config.NGPUS == -1 else config.NGPUS\n dist.spawn(main_worker, args=(config, dataset_train, dataset_val, ), nprocs=config.NGPUS)\n\n\nif __name__ == \"__main__\":\n main()\n", "# Copyright (c) 2021 PPViT Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Swin training/validation using multiple GPU \"\"\"\n\nimport sys\nimport os\nimport time\nimport logging\nimport argparse\nimport random\nimport numpy as np\nimport paddle\nimport paddle.nn as nn\nimport paddle.nn.functional as F\nimport paddle.distributed as dist\nfrom datasets import get_dataloader\nfrom datasets import get_dataset\nfrom utils import AverageMeter\nfrom utils import WarmupCosineScheduler\nfrom utils import get_exclude_from_weight_decay_fn\nfrom config import get_config\nfrom config import update_config\nfrom mixup import Mixup\nfrom losses import LabelSmoothingCrossEntropyLoss\nfrom losses import SoftTargetCrossEntropyLoss\nfrom losses import DistillationLoss\nfrom swin_transformer import build_swin as build_model\n\n\ndef get_arguments():\n \"\"\"return argumeents, this will overwrite the config after loading yaml file\"\"\"\n parser = argparse.ArgumentParser('Swin')\n parser.add_argument('-cfg', type=str, default=None)\n parser.add_argument('-dataset', type=str, default=None)\n parser.add_argument('-batch_size', type=int, default=None)\n parser.add_argument('-image_size', type=int, default=None)\n parser.add_argument('-data_path', type=str, default=None)\n parser.add_argument('-ngpus', type=int, default=None)\n parser.add_argument('-pretrained', type=str, default=None)\n parser.add_argument('-resume', type=str, default=None)\n parser.add_argument('-last_epoch', type=int, default=None)\n parser.add_argument('-eval', action='store_true')\n parser.add_argument('-amp', action='store_true')\n arguments = parser.parse_args()\n return arguments\n\n\ndef get_logger(filename, logger_name=None):\n \"\"\"set logging file and format\n Args:\n filename: str, full path of the logger file to write\n logger_name: str, the logger name, e.g., 'master_logger', 'local_logger'\n Return:\n logger: python logger\n \"\"\"\n log_format = \"%(asctime)s %(message)s\"\n logging.basicConfig(stream=sys.stdout, level=logging.INFO,\n format=log_format, datefmt=\"%m%d %I:%M:%S %p\")\n # different name is needed when creating multiple logger in one process\n logger = logging.getLogger(logger_name)\n fh = logging.FileHandler(os.path.join(filename))\n fh.setFormatter(logging.Formatter(log_format))\n logger.addHandler(fh)\n return logger\n\n\ndef train(dataloader,\n model,\n criterion,\n optimizer,\n epoch,\n total_epochs,\n total_batch,\n debug_steps=100,\n accum_iter=1,\n mixup_fn=None,\n amp=False,\n local_logger=None,\n master_logger=None):\n \"\"\"Training for one epoch\n Args:\n dataloader: paddle.io.DataLoader, dataloader instance\n model: nn.Layer, a ViT model\n criterion: nn.criterion\n epoch: int, current epoch\n total_epochs: int, total num of epochs\n total_batch: int, total num of batches for one epoch\n debug_steps: int, num of iters to log info, default: 100\n accum_iter: int, num of iters for accumulating gradients, default: 1\n mixup_fn: Mixup, mixup instance, default: None\n amp: bool, if True, use mix precision training, default: False\n local_logger: logger for local process/gpu, default: None\n master_logger: logger for main process, default: None\n Returns:\n train_loss_meter.avg: float, average loss on current process/gpu\n train_acc_meter.avg: float, average top1 accuracy on current process/gpu\n master_train_loss_meter.avg: float, average loss on all processes/gpus\n master_train_acc_meter.avg: float, average top1 accuracy on all processes/gpus\n train_time: float, training time\n \"\"\"\n model.train()\n train_loss_meter = AverageMeter()\n train_acc_meter = AverageMeter()\n master_train_loss_meter = AverageMeter()\n master_train_acc_meter = AverageMeter()\n\n if amp is True:\n scaler = paddle.amp.GradScaler(init_loss_scaling=1024)\n time_st = time.time()\n\n for batch_id, data in enumerate(dataloader):\n image = data[0]\n label = data[1]\n label_orig = label.clone()\n\n if mixup_fn is not None:\n image, label = mixup_fn(image, label_orig)\n \n if amp is True: # mixed precision training\n with paddle.amp.auto_cast():\n output = model(image)\n loss = criterion(image, output, label)\n scaled = scaler.scale(loss)\n scaled.backward()\n if ((batch_id +1) % accum_iter == 0) or (batch_id + 1 == len(dataloader)):\n scaler.minimize(optimizer, scaled)\n optimizer.clear_grad()\n else: # full precision training\n output = model(image)\n loss = criterion(output, label)\n #NOTE: division may be needed depending on the loss function\n # Here no division is needed:\n # default 'reduction' param in nn.CrossEntropyLoss is set to 'mean'\n #loss = loss / accum_iter\n loss.backward()\n\n if ((batch_id +1) % accum_iter == 0) or (batch_id + 1 == len(dataloader)):\n optimizer.step()\n optimizer.clear_grad()\n\n pred = F.softmax(output)\n if mixup_fn:\n acc = paddle.metric.accuracy(pred, label_orig)\n else:\n acc = paddle.metric.accuracy(pred, label_orig.unsqueeze(1))\n\n batch_size = paddle.to_tensor(image.shape[0])\n\n # sync from other gpus for overall loss and acc\n master_loss = loss.clone()\n master_acc = acc.clone()\n master_batch_size = batch_size.clone()\n dist.all_reduce(master_loss)\n dist.all_reduce(master_acc)\n dist.all_reduce(master_batch_size)\n master_loss = master_loss / dist.get_world_size()\n master_acc = master_acc / dist.get_world_size()\n master_train_loss_meter.update(master_loss.numpy()[0], master_batch_size.numpy()[0])\n master_train_acc_meter.update(master_acc.numpy()[0], master_batch_size.numpy()[0])\n\n train_loss_meter.update(loss.numpy()[0], batch_size.numpy()[0])\n train_acc_meter.update(acc.numpy()[0], batch_size.numpy()[0])\n\n if batch_id % debug_steps == 0:\n if local_logger:\n local_logger.info(\n f\"Epoch[{epoch:03d}/{total_epochs:03d}], \" +\n f\"Step[{batch_id:04d}/{total_batch:04d}], \" +\n f\"Avg Loss: {train_loss_meter.avg:.4f}, \" +\n f\"Avg Acc: {train_acc_meter.avg:.4f}\")\n if master_logger and dist.get_rank() == 0:\n master_logger.info(\n f\"Epoch[{epoch:03d}/{total_epochs:03d}], \" +\n f\"Step[{batch_id:04d}/{total_batch:04d}], \" +\n f\"Avg Loss: {master_train_loss_meter.avg:.4f}, \" +\n f\"Avg Acc: {master_train_acc_meter.avg:.4f}\")\n\n train_time = time.time() - time_st\n return (train_loss_meter.avg,\n train_acc_meter.avg,\n master_train_loss_meter.avg,\n master_train_acc_meter.avg,\n train_time)\n\n\ndef validate(dataloader,\n model,\n criterion,\n total_batch,\n debug_steps=100,\n local_logger=None,\n master_logger=None):\n \"\"\"Validation for whole dataset\n Args:\n dataloader: paddle.io.DataLoader, dataloader instance\n model: nn.Layer, a ViT model\n criterion: nn.criterion\n total_epoch: int, total num of epoch, for logging\n debug_steps: int, num of iters to log info, default: 100\n local_logger: logger for local process/gpu, default: None\n master_logger: logger for main process, default: None\n Returns:\n val_loss_meter.avg: float, average loss on current process/gpu\n val_acc1_meter.avg: float, average top1 accuracy on current process/gpu\n val_acc5_meter.avg: float, average top5 accuracy on current process/gpu\n master_val_loss_meter.avg: float, average loss on all processes/gpus\n master_val_acc1_meter.avg: float, average top1 accuracy on all processes/gpus\n master_val_acc5_meter.avg: float, average top5 accuracy on all processes/gpus\n val_time: float, validation time\n \"\"\"\n model.eval()\n val_loss_meter = AverageMeter()\n val_acc1_meter = AverageMeter()\n val_acc5_meter = AverageMeter()\n master_val_loss_meter = AverageMeter()\n master_val_acc1_meter = AverageMeter()\n master_val_acc5_meter = AverageMeter()\n time_st = time.time()\n\n with paddle.no_grad():\n for batch_id, data in enumerate(dataloader):\n image = data[0]\n label = data[1]\n\n output = model(image)\n loss = criterion(output, label)\n\n pred = F.softmax(output)\n acc1 = paddle.metric.accuracy(pred, label.unsqueeze(1))\n acc5 = paddle.metric.accuracy(pred, label.unsqueeze(1), k=5)\n\n batch_size = paddle.to_tensor(image.shape[0])\n\n master_loss = loss.clone()\n master_acc1 = acc1.clone()\n master_acc5 = acc5.clone()\n master_batch_size = batch_size.clone()\n\n dist.all_reduce(master_loss)\n dist.all_reduce(master_acc1)\n dist.all_reduce(master_acc5)\n dist.all_reduce(master_batch_size)\n master_loss = master_loss / dist.get_world_size()\n master_acc1 = master_acc1 / dist.get_world_size()\n master_acc5 = master_acc5 / dist.get_world_size()\n\n master_val_loss_meter.update(master_loss.numpy()[0], master_batch_size.numpy()[0])\n master_val_acc1_meter.update(master_acc1.numpy()[0], master_batch_size.numpy()[0])\n master_val_acc5_meter.update(master_acc5.numpy()[0], master_batch_size.numpy()[0])\n\n val_loss_meter.update(loss.numpy()[0], batch_size.numpy()[0])\n val_acc1_meter.update(acc1.numpy()[0], batch_size.numpy()[0])\n val_acc5_meter.update(acc5.numpy()[0], batch_size.numpy()[0])\n\n if batch_id % debug_steps == 0:\n if local_logger:\n local_logger.info(\n f\"Val Step[{batch_id:04d}/{total_batch:04d}], \" +\n f\"Avg Loss: {val_loss_meter.avg:.4f}, \" +\n f\"Avg Acc@1: {val_acc1_meter.avg:.4f}, \" +\n f\"Avg Acc@5: {val_acc5_meter.avg:.4f}\")\n if master_logger and dist.get_rank() == 0:\n master_logger.info(\n f\"Val Step[{batch_id:04d}/{total_batch:04d}], \" +\n f\"Avg Loss: {master_val_loss_meter.avg:.4f}, \" +\n f\"Avg Acc@1: {master_val_acc1_meter.avg:.4f}, \" +\n f\"Avg Acc@5: {master_val_acc5_meter.avg:.4f}\")\n val_time = time.time() - time_st\n return (val_loss_meter.avg,\n val_acc1_meter.avg,\n val_acc5_meter.avg,\n master_val_loss_meter.avg,\n master_val_acc1_meter.avg,\n master_val_acc5_meter.avg,\n val_time)\n\n\ndef main_worker(*args):\n # STEP 0: Preparation\n config = args[0]\n dist.init_parallel_env()\n last_epoch = config.TRAIN.LAST_EPOCH\n world_size = dist.get_world_size()\n local_rank = dist.get_rank()\n seed = config.SEED + local_rank\n paddle.seed(seed)\n np.random.seed(seed)\n random.seed(seed)\n # logger for each process/gpu\n local_logger = get_logger(\n filename=os.path.join(config.SAVE, 'log_{}.txt'.format(local_rank)),\n logger_name='local_logger')\n # overall logger\n if local_rank == 0:\n master_logger = get_logger(\n filename=os.path.join(config.SAVE, 'log.txt'),\n logger_name='master_logger')\n master_logger.info(f'\\n{config}')\n else:\n master_logger = None\n local_logger.info(f'----- world_size = {world_size}, local_rank = {local_rank}')\n if local_rank == 0:\n master_logger.info(f'----- world_size = {world_size}, local_rank = {local_rank}')\n \n # STEP 1: Create model\n model = build_model(config)\n model = paddle.DataParallel(model)\n\n # STEP 2: Create train and val dataloader\n dataset_train, dataset_val = args[1], args[2]\n # Create training dataloader\n if not config.EVAL:\n dataloader_train = get_dataloader(config, dataset_train, 'train', True)\n total_batch_train = len(dataloader_train)\n local_logger.info(f'----- Total # of train batch (single gpu): {total_batch_train}')\n if local_rank == 0:\n master_logger.info(f'----- Total # of train batch (single gpu): {total_batch_train}')\n # Create validation dataloader\n dataloader_val = get_dataloader(config, dataset_val, 'test', True)\n total_batch_val = len(dataloader_val)\n local_logger.info(f'----- Total # of val batch (single gpu): {total_batch_val}')\n if local_rank == 0:\n master_logger.info(f'----- Total # of val batch (single gpu): {total_batch_val}')\n\n # STEP 3: Define Mixup function\n mixup_fn = None\n if config.TRAIN.MIXUP_PROB > 0 or config.TRAIN.CUTMIX_ALPHA > 0 or config.TRAIN.CUTMIX_MINMAX is not None:\n mixup_fn = Mixup(mixup_alpha=config.TRAIN.MIXUP_ALPHA,\n cutmix_alpha=config.TRAIN.CUTMIX_ALPHA,\n cutmix_minmax=config.TRAIN.CUTMIX_MINMAX,\n prob=config.TRAIN.MIXUP_PROB,\n switch_prob=config.TRAIN.MIXUP_SWITCH_PROB,\n mode=config.TRAIN.MIXUP_MODE,\n label_smoothing=config.TRAIN.SMOOTHING,\n num_classes=config.MODEL.NUM_CLASSES)\n\n # STEP 4: Define criterion\n if config.TRAIN.MIXUP_PROB > 0.:\n criterion = SoftTargetCrossEntropyLoss()\n elif config.TRAIN.SMOOTHING:\n criterion = LabelSmoothingCrossEntropyLoss()\n else:\n criterion = nn.CrossEntropyLoss()\n # only use cross entropy for val\n criterion_val = nn.CrossEntropyLoss()\n\n # STEP 5: Define optimizer and lr_scheduler\n # set lr according to batch size and world size (hacked from official code)\n linear_scaled_lr = (config.TRAIN.BASE_LR *\n config.DATA.BATCH_SIZE * dist.get_world_size()) / 512.0\n linear_scaled_warmup_start_lr = (config.TRAIN.WARMUP_START_LR *\n config.DATA.BATCH_SIZE * dist.get_world_size()) / 512.0\n linear_scaled_end_lr = (config.TRAIN.END_LR *\n config.DATA.BATCH_SIZE * dist.get_world_size()) / 512.0\n\n if config.TRAIN.ACCUM_ITER > 1:\n linear_scaled_lr = linear_scaled_lr * config.TRAIN.ACCUM_ITER\n linear_scaled_warmup_start_lr = linear_scaled_warmup_start_lr * config.TRAIN.ACCUM_ITER\n linear_scaled_end_lr = linear_scaled_end_lr * config.TRAIN.ACCUM_ITER\n \n config.TRAIN.BASE_LR = linear_scaled_lr\n config.TRAIN.WARMUP_START_LR = linear_scaled_warmup_start_lr\n config.TRAIN.END_LR = linear_scaled_end_lr\n\n scheduler = None\n if config.TRAIN.LR_SCHEDULER.NAME == \"warmupcosine\":\n scheduler = WarmupCosineScheduler(learning_rate=config.TRAIN.BASE_LR,\n warmup_start_lr=config.TRAIN.WARMUP_START_LR,\n start_lr=config.TRAIN.BASE_LR,\n end_lr=config.TRAIN.END_LR,\n warmup_epochs=config.TRAIN.WARMUP_EPOCHS,\n total_epochs=config.TRAIN.NUM_EPOCHS,\n last_epoch=config.TRAIN.LAST_EPOCH,\n )\n elif config.TRAIN.LR_SCHEDULER.NAME == \"cosine\":\n scheduler = paddle.optimizer.lr.CosineAnnealingDecay(learning_rate=config.TRAIN.BASE_LR,\n T_max=config.TRAIN.NUM_EPOCHS,\n last_epoch=last_epoch)\n elif config.scheduler == \"multi-step\":\n milestones = [int(v.strip()) for v in config.TRAIN.LR_SCHEDULER.MILESTONES.split(\",\")]\n scheduler = paddle.optimizer.lr.MultiStepDecay(learning_rate=config.TRAIN.BASE_LR,\n milestones=milestones,\n gamma=config.TRAIN.LR_SCHEDULER.DECAY_RATE,\n last_epoch=last_epoch)\n else:\n local_logger.fatal(f\"Unsupported Scheduler: {config.TRAIN.LR_SCHEDULER}.\")\n if local_rank == 0:\n master_logger.fatal(f\"Unsupported Scheduler: {config.TRAIN.LR_SCHEDULER}.\")\n raise NotImplementedError(f\"Unsupported Scheduler: {config.TRAIN.LR_SCHEDULER}.\")\n\n if config.TRAIN.OPTIMIZER.NAME == \"SGD\":\n if config.TRAIN.GRAD_CLIP:\n clip = paddle.nn.ClipGradByGlobalNorm(config.TRAIN.GRAD_CLIP)\n else:\n clip = None\n optimizer = paddle.optimizer.Momentum(\n parameters=model.parameters(),\n learning_rate=scheduler if scheduler is not None else config.TRAIN.BASE_LR,\n weight_decay=config.TRAIN.WEIGHT_DECAY,\n momentum=config.TRAIN.OPTIMIZER.MOMENTUM,\n grad_clip=clip)\n elif config.TRAIN.OPTIMIZER.NAME == \"AdamW\":\n if config.TRAIN.GRAD_CLIP:\n clip = paddle.nn.ClipGradByGlobalNorm(config.TRAIN.GRAD_CLIP)\n else:\n clip = None\n optimizer = paddle.optimizer.AdamW(\n parameters=model.parameters(),\n learning_rate=scheduler if scheduler is not None else config.TRAIN.BASE_LR,\n beta1=config.TRAIN.OPTIMIZER.BETAS[0],\n beta2=config.TRAIN.OPTIMIZER.BETAS[1],\n weight_decay=config.TRAIN.WEIGHT_DECAY,\n epsilon=config.TRAIN.OPTIMIZER.EPS,\n grad_clip=clip,\n apply_decay_param_fun=get_exclude_from_weight_decay_fn([\n 'absolute_pos_embed', 'relative_position_bias_table']),\n )\n else:\n local_logger.fatal(f\"Unsupported Optimizer: {config.TRAIN.OPTIMIZER.NAME}.\")\n if local_rank == 0:\n master_logger.fatal(f\"Unsupported Optimizer: {config.TRAIN.OPTIMIZER.NAME}.\")\n raise NotImplementedError(f\"Unsupported Optimizer: {config.TRAIN.OPTIMIZER.NAME}.\")\n\n # STEP 6: Load pretrained model / load resumt model and optimizer states\n if config.MODEL.PRETRAINED:\n if (config.MODEL.PRETRAINED).endswith('.pdparams'):\n raise ValueError(f'{config.MODEL.PRETRAINED} should not contain .pdparams')\n assert os.path.isfile(config.MODEL.PRETRAINED + '.pdparams') is True\n model_state = paddle.load(config.MODEL.PRETRAINED+'.pdparams')\n model.set_dict(model_state)\n local_logger.info(f\"----- Pretrained: Load model state from {config.MODEL.PRETRAINED}\")\n if local_rank == 0:\n master_logger.info(\n f\"----- Pretrained: Load model state from {config.MODEL.PRETRAINED}\")\n\n if config.MODEL.RESUME:\n assert os.path.isfile(config.MODEL.RESUME+'.pdparams') is True\n assert os.path.isfile(config.MODEL.RESUME+'.pdopt') is True\n model_state = paddle.load(config.MODEL.RESUME+'.pdparams')\n model.set_dict(model_state)\n opt_state = paddle.load(config.MODEL.RESUME+'.pdopt')\n optimizer.set_state_dict(opt_state)\n local_logger.info(\n f\"----- Resume Training: Load model and optmizer from {config.MODEL.RESUME}\")\n if local_rank == 0:\n master_logger.info(\n f\"----- Resume Training: Load model and optmizer from {config.MODEL.RESUME}\")\n \n # STEP 7: Validation (eval mode)\n if config.EVAL:\n local_logger.info('----- Start Validating')\n if local_rank == 0:\n master_logger.info('----- Start Validating')\n val_loss, val_acc1, val_acc5, avg_loss, avg_acc1, avg_acc5, val_time = validate(\n dataloader=dataloader_val,\n model=model,\n criterion=criterion_val,\n total_batch=total_batch_val,\n debug_steps=config.REPORT_FREQ,\n local_logger=local_logger,\n master_logger=master_logger)\n local_logger.info(f\"Validation Loss: {val_loss:.4f}, \" +\n f\"Validation Acc@1: {val_acc1:.4f}, \" +\n f\"Validation Acc@5: {val_acc5:.4f}, \" +\n f\"time: {val_time:.2f}\")\n if local_rank == 0:\n master_logger.info(f\"Validation Loss: {avg_loss:.4f}, \" +\n f\"Validation Acc@1: {avg_acc1:.4f}, \" +\n f\"Validation Acc@5: {avg_acc5:.4f}, \" +\n f\"time: {val_time:.2f}\")\n return\n\n # STEP 8: Start training and validation (train mode)\n local_logger.info(f\"Start training from epoch {last_epoch+1}.\")\n if local_rank == 0:\n master_logger.info(f\"Start training from epoch {last_epoch+1}.\")\n for epoch in range(last_epoch+1, config.TRAIN.NUM_EPOCHS+1):\n # train\n local_logger.info(f\"Now training epoch {epoch}. LR={optimizer.get_lr():.6f}\")\n if local_rank == 0:\n master_logger.info(f\"Now training epoch {epoch}. LR={optimizer.get_lr():.6f}\")\n train_loss, train_acc, avg_loss, avg_acc, train_time = train(\n dataloader=dataloader_train,\n model=model,\n criterion=criterion,\n optimizer=optimizer,\n epoch=epoch,\n total_epochs=config.TRAIN.NUM_EPOCHS,\n total_batch=total_batch_train,\n debug_steps=config.REPORT_FREQ,\n accum_iter=config.TRAIN.ACCUM_ITER,\n mixup_fn=mixup_fn,\n amp=config.AMP,\n local_logger=local_logger,\n master_logger=master_logger)\n\n scheduler.step()\n\n local_logger.info(f\"----- Epoch[{epoch:03d}/{config.TRAIN.NUM_EPOCHS:03d}], \" +\n f\"Train Loss: {train_loss:.4f}, \" +\n f\"Train Acc: {train_acc:.4f}, \" +\n f\"time: {train_time:.2f}\")\n if local_rank == 0:\n master_logger.info(f\"----- Epoch[{epoch:03d}/{config.TRAIN.NUM_EPOCHS:03d}], \" +\n f\"Train Loss: {avg_loss:.4f}, \" +\n f\"Train Acc: {avg_acc:.4f}, \" +\n f\"time: {train_time:.2f}\")\n\n # validation\n if epoch % config.VALIDATE_FREQ == 0 or epoch == config.TRAIN.NUM_EPOCHS:\n local_logger.info(f'----- Validation after Epoch: {epoch}')\n if local_rank == 0:\n master_logger.info(f'----- Validation after Epoch: {epoch}')\n val_loss, val_acc1, val_acc5, avg_loss, avg_acc1, avg_acc5, val_time = validate(\n dataloader=dataloader_val,\n model=model,\n criterion=criterion_val,\n total_batch=total_batch_val,\n debug_steps=config.REPORT_FREQ,\n local_logger=local_logger,\n master_logger=master_logger)\n local_logger.info(f\"----- Epoch[{epoch:03d}/{config.TRAIN.NUM_EPOCHS:03d}], \" +\n f\"Validation Loss: {val_loss:.4f}, \" +\n f\"Validation Acc@1: {val_acc1:.4f}, \" +\n f\"Validation Acc@5: {val_acc5:.4f}, \" +\n f\"time: {val_time:.2f}\")\n if local_rank == 0:\n master_logger.info(f\"----- Epoch[{epoch:03d}/{config.TRAIN.NUM_EPOCHS:03d}], \" +\n f\"Validation Loss: {avg_loss:.4f}, \" +\n f\"Validation Acc@1: {avg_acc1:.4f}, \" +\n f\"Validation Acc@5: {avg_acc5:.4f}, \" +\n f\"time: {val_time:.2f}\")\n # model save\n if local_rank == 0:\n if epoch % config.SAVE_FREQ == 0 or epoch == config.TRAIN.NUM_EPOCHS:\n model_path = os.path.join(\n config.SAVE, f\"{config.MODEL.TYPE}-Epoch-{epoch}-Loss-{train_loss}\")\n paddle.save(model.state_dict(), model_path + '.pdparams')\n paddle.save(optimizer.state_dict(), model_path + '.pdopt')\n master_logger.info(f\"----- Save model: {model_path}.pdparams\")\n master_logger.info(f\"----- Save optim: {model_path}.pdopt\")\n\n\ndef main():\n # config is updated by: (1) config.py, (2) yaml file, (3) arguments\n arguments = get_arguments()\n config = get_config()\n config = update_config(config, arguments)\n\n # set output folder\n if not config.EVAL:\n config.SAVE = '{}/train-{}'.format(config.SAVE, time.strftime('%Y%m%d-%H-%M-%S'))\n else:\n config.SAVE = '{}/eval-{}'.format(config.SAVE, time.strftime('%Y%m%d-%H-%M-%S'))\n\n if not os.path.exists(config.SAVE):\n os.makedirs(config.SAVE, exist_ok=True)\n\n # get dataset and start DDP\n if not config.EVAL:\n dataset_train = get_dataset(config, mode='train')\n else:\n dataset_train = None\n dataset_val = get_dataset(config, mode='val')\n config.NGPUS = len(paddle.static.cuda_places()) if config.NGPUS == -1 else config.NGPUS\n dist.spawn(main_worker, args=(config, dataset_train, dataset_val, ), nprocs=config.NGPUS)\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "numpy.random.seed" ], [ "numpy.random.seed" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
marielacour81/CBIG
[ "511af756c6ddabbd3a9681ce3514b79ef5aaaf3f", "511af756c6ddabbd3a9681ce3514b79ef5aaaf3f", "511af756c6ddabbd3a9681ce3514b79ef5aaaf3f" ]
[ "stable_projects/predict_phenotypes/He2019_KRDNN/cbig/He2019/config.py", "stable_projects/fMRI_dynamics/Kong2021_pMFM/part2_pMFM_control_analysis/Primary_gradients/scripts/CBIG_pMFM_step19_training_Struct.py", "stable_projects/fMRI_dynamics/Kong2021_pMFM/part1_pMFM_main/scripts/CBIG_pMFM_step4_generate_simulated_fc_fcd.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nWritten by Tong He and CBIG under MIT license:\nhttps://github.com/ThomasYeoLab/CBIG/blob/master/LICENSE.md\n\"\"\"\n\nimport os\nimport numpy as np\n\n\nclass config:\n BASE_DIR = '../../../../../../data/fmri_predict_behavior'\n CUR_DIR = os.getcwd()\n INTER_DIR = os.path.join(BASE_DIR, 'He2019_data')\n GRAPH_FOLDER = os.path.join(INTER_DIR, 'graph')\n RAMDOM_SEED = 42\n OUT_PATH = 'log'\n\n # Config for HCP\n HCP_CORR_MAT = 'FC_subject_953.mat'\n HCP_SUBJECT_LIST = 'He2019_hcp_953_split.mat'\n HCP_ORIG_DIR = os.path.join(BASE_DIR, 'original_data_953')\n HCP_INTER_DIR = os.path.join(INTER_DIR, 'HCP')\n HCP_MEASURE_SETS = ['Cognitive', 'Personality_Task', 'Social_Emotion']\n HCP_NUM_FOLD = 20\n HCP_NUM_SUBJECT = 953\n HCP_N_DIMENSION = 419\n HCP_BATCH_SIZE = 128\n HCP_MEASURE_SETS_NUM = [13, 22, 23]\n HCP_N_MEASURE = int(np.sum(HCP_MEASURE_SETS_NUM))\n\n # Config for UKBB\n UKBB_CORR_MAT = 'ukbb_ht_180205_FC_55.mat'\n UKBB_SUBJECT_LIST = 'ukbb_subject_split.mat'\n UKBB_ORIG_DIR = os.path.join(BASE_DIR, 'original_data_ukbb_8868')\n UKBB_INTER_DIR = os.path.join(INTER_DIR, 'UKBB')\n UKBB_MEASURE_SETS = ['1802_8868']\n UKBB_NUM_SUBJECT = 8868\n UKBB_RUNS = 5\n UKBB_BATCH_SIZE = 128\n UKBB_EPOCHS = 200\n UKBB_EPOCHS_GCNN = 2000\n UKBB_N_DIM = 55\n\n # Config for example\n EXAMPLE_N_SUBJECT = 40\n EXAMPLE_N_FOLDS = 4\n", "# /usr/bin/env python\n'''\nWritten by Kong Xiaolu and CBIG under MIT license:\nhttps://github.com/ThomasYeoLab/CBIG/blob/master/LICENSE.md\n'''\n\nimport os\nimport scipy.io as sio\nimport numpy as np\nimport time\nimport torch\nimport CBIG_pMFM_basic_functions as fc\n\n\ndef get_init(gradient_data, highest_order, init_para):\n '''\n This function is implemented to calculate the initial parametrized coefficients\n '''\n\n n_node = gradient_data.shape[0]\n amatrix = np.zeros((n_node, highest_order + 1))\n for i in range(highest_order + 1):\n amatrix[:, i] = gradient_data**(i)\n para = np.linalg.inv(amatrix.T @ amatrix) @ amatrix.T @ init_para\n return para, amatrix\n\n\ndef CBIG_mfm_optimization_desikan_main(random_seed=1, gpu_index=0):\n '''\n This function is to implement the optimization processes of mean field model.\n The objective function is the summation of FC correlation cost and FCD KS statistics cost.\n The optimization process is highly automatic and generate 500 candidate parameter sets for\n main results.\n\n Args:\n gpu_index: index of gpu used for optimization\n random_seed: random seed for optimization\n Returns:\n None\n '''\n\n output_path = '../output/struct/training'\n if not os.path.isdir(output_path):\n os.makedirs(output_path)\n\n # Setting random seed and GPU\n torch.cuda.set_device(gpu_index)\n random_seed_cuda = random_seed\n random_seed_np = random_seed\n torch.manual_seed(random_seed_cuda)\n rng = np.random.Generator(np.random.PCG64(random_seed_np))\n\n # Initializing input parameters\n highest_order = 1\n N = 3 * (highest_order + 1) + 1\n gradient_data = sio.loadmat(\n '../../../input/Desikan_input/structual_gradient1.mat')\n gradient_data = gradient_data['structural_grad1']\n gradient_data = gradient_data[:, 0]\n n_node = gradient_data.shape[0]\n dim = n_node * 3 + 1\n\n search_range = np.zeros((dim, 2))\n search_range[0:n_node, :] = [0, 1]\n search_range[n_node:n_node * 2, :] = [0, 0.5]\n search_range[n_node * 2, :] = [1, 10]\n search_range[n_node * 2 + 1:dim, :] = [0.0005, 0.01]\n init_para = rng.uniform(0, 1, dim) * (\n search_range[:, 1] - search_range[:, 0]) + search_range[:, 0]\n start_point_w, template_mat = get_init(gradient_data, highest_order,\n init_para[0:n_node])\n start_point_i, template_mat = get_init(gradient_data, highest_order,\n init_para[n_node:n_node * 2])\n start_point_sigma, template_mat = get_init(gradient_data, highest_order,\n init_para[n_node * 2 + 1:dim])\n\n # Initializing childrens\n xmean = np.zeros(N)\n xmean[0:highest_order + 1] = start_point_w\n xmean[highest_order + 1:2 * (highest_order + 1)] = start_point_i\n xmean[2 * (highest_order + 1)] = init_para[2 * n_node]\n xmean[2 * (highest_order + 1) + 1:N] = start_point_sigma\n\n # Initializing optimization hyper-parameters\n sigma = 0.15\n sigmaS = 0.15\n stoppoint = 0.3\n maxloop = 400\n n_dup = 3\n\n # CMA-ES parameters setting\n Lambda = 500\n mu = 40\n weights = np.log(mu + 1 / 2) - np.log(np.arange(1, mu + 1))\n weights = weights / np.sum(weights)\n mueff = 1 / np.sum(weights**2)\n\n # Strategy parameter setting: adaptation\n cc = (4 + mueff / N) / (N + 4 + 2 * mueff / N)\n cs = (mueff + 2) / (N + mueff + 5)\n c1 = 2 / ((N + 1.3)**2 + mueff)\n cmu = np.minimum(1 - c1,\n 2 * (mueff - 2 + 1 / mueff) / ((N + 2)**2 + mueff))\n damps = 1 + 2 * np.maximum(0, np.sqrt((mueff - 1) / (N + 1)) - 1) + cs\n\n # Initializing dynamic strategy parameters and constants'''\n pc = np.zeros(N)\n ps = np.zeros(N)\n B = np.eye(N)\n D = np.zeros(N)\n D[0:highest_order + 1] = start_point_w[0] / 2\n D[highest_order + 1:2 * (highest_order + 1)] = start_point_i[0] / 2\n D[2 * (highest_order + 1)] = 0.4\n D[2 * (highest_order + 1) + 1:N] = 0.001 / 2\n C = np.dot(np.dot(B, np.diag(np.power(D, 2))), B.T)\n invsqrtC = np.dot(np.dot(B, np.diag(np.power(D, -1))), B.T)\n chiN = N**0.5 * (1 - 1 / (4 * N) + 1 / (21 * N ^ 2))\n\n # Evolution loop\n countloop = 0\n arx = np.zeros([N, Lambda])\n input_para = np.zeros((dim, Lambda))\n xmin = np.zeros([N + 3, maxloop])\n stop_count = 0\n while countloop < maxloop:\n\n start_time = time.time()\n\n # Generating lambda offspring\n arx[:, 0] = xmean\n j = 0\n while j < Lambda:\n arx[:, j] = xmean + sigma * np.dot(B, (D * rng.standard_normal(N)))\n input_para[0:n_node, j] = template_mat @ arx[0:highest_order +\n 1, j]\n input_para[n_node:2 *\n n_node, j] = template_mat @ arx[highest_order + 1:2 *\n (highest_order + 1), j]\n input_para[2 * n_node:2 * n_node +\n 1, j] = arx[2 * (highest_order + 1), j]\n input_para[2 * n_node +\n 1:dim, j] = template_mat @ arx[2 * (highest_order + 1) +\n 1:N, j]\n if (input_para[:, j] < search_range[:, 0]).any() or (\n input_para[:, j] > search_range[:, 1]).any():\n j = j - 1\n j = j + 1\n\n # Calculating costs of offspring\n total_cost, fc_cost, fcd_cost = fc.CBIG_combined_cost_train(\n input_para, n_dup)\n countloop = countloop + 1\n\n # Sort by total cost and compute weighted mean\n arfitsort = np.sort(total_cost)\n arindex = np.argsort(total_cost)\n xold = xmean\n xmean = np.dot(arx[:, arindex[0:mu]], weights)\n xshow = xmean - xold\n\n # Cumulation\n ps = (1 - cs) * ps + np.sqrt(cs * (2 - cs) * mueff) * np.dot(\n invsqrtC, xshow) / sigma\n hsig = (np.linalg.norm(ps) / np.sqrt(1 - (1 - cs)**\n (2 * countloop)) / chiN <\n (1.4 + 2 / (N + 1))) * 1\n pc = (1 - cc) * pc + hsig * np.sqrt(cc *\n (2 - cc) * mueff) * xshow / sigma\n\n # Adapting covariance matrix C\n artmp = (1 / sigma) * (\n arx[:, arindex[0:mu]] - np.tile(xold, [mu, 1]).T)\n C = (1 - c1 - cmu) * C + c1 * (\n np.outer(pc, pc) + (1 - hsig) * cc * (2 - cc) * C) + cmu * np.dot(\n artmp, np.dot(np.diag(weights), artmp.T))\n\n # Adapting step size\n sigma = sigma * np.exp((cs / damps) * (np.linalg.norm(ps) / chiN - 1))\n sigma = min(sigma, sigmaS)\n\n # Decomposition\n if 1 > 1 / (c1 + cmu) / N / 10:\n C = np.triu(C, k=1) + np.triu(C).T\n D, B = np.linalg.eigh(C)\n D = D.real\n B = B.real\n D = np.sqrt(D)\n invsqrtC = np.dot(B, np.dot(np.diag(D**(-1)), B.T))\n\n # Monitoring the evolution status\n ps_norm = np.linalg.norm(ps)\n print('******** Generation: ' + str(countloop) + ' ********')\n print('Norm of P-sigma: ', ps_norm)\n print('The mean of total cost: ', np.mean(arfitsort[0:mu]))\n print('Sigma: ', sigma)\n\n xmin[0:N, countloop - 1] = arx[:, arindex[0]]\n xmin[N, countloop - 1] = fc_cost[arindex[0]]\n xmin[N + 1, countloop - 1] = fcd_cost[arindex[0]]\n xmin[N + 2, countloop - 1] = np.min(total_cost)\n print('Best total cost: ', np.min(total_cost))\n print('FC correlation cost: ', fc_cost[arindex[0]])\n print('FCD KS statistics cost: ', fcd_cost[arindex[0]])\n\n elapsed_time = time.time() - start_time\n print('Elapsed time for this evolution is : ', elapsed_time)\n print('******************************************')\n\n # break\n if arfitsort[0] < stoppoint and ps_norm < 11:\n stop_count = stop_count + 1\n if stop_count >= 5 or sigma < 0.001:\n break\n\n save_name = [output_path] + ['/random_seed_', str(random_seed), '.csv']\n np.savetxt(''.join(save_name), xmin, delimiter=',')\n\n\nif __name__ == \"__main__\":\n CBIG_mfm_optimization_desikan_main(random_seed=1, gpu_index=0)\n", "# /usr/bin/env python\n'''\nWritten by Kong Xiaolu and CBIG under MIT license:\nhttps://github.com/ThomasYeoLab/CBIG/blob/master/LICENSE.md\n'''\n\nimport os\nimport numpy as np\nimport torch\nimport scipy.io as sio\nimport CBIG_pMFM_basic_functions_main as fc\nimport warnings\n\n\ndef CBIG_pMFM_generate_simualted_fc_fcd(gpu_index=0):\n '''\n This function is to generate the simulated fc and fcd based on test set\n The simulated fc and fcd are used in the analysis shown in the paper\n\n Args:\n gpu_index: index of gpu used for optimization\n Returns:\n None\n '''\n\n # Setting random seed and GPU\n torch.cuda.set_device(gpu_index)\n torch.cuda.manual_seed(1)\n\n # Create output folder\n test_file = '../output/step3_test_results/test_all.csv'\n output_path = '../output/step4_MFM_simulated_data'\n if not os.path.isdir(output_path):\n os.makedirs(output_path)\n\n n_set = 2000\n result_all = fc.csv_matrix_read(test_file)\n parameter = result_all[11:, 0]\n parameter = np.tile(parameter, [n_set, 1]).T\n parameter = torch.from_numpy(parameter).type(torch.FloatTensor).cuda()\n\n # Load data\n emp_fcd = sio.loadmat('../../input/Desikan_input/fcd_test.mat')\n emp_fcd = np.array(emp_fcd['test_aveM'])\n\n sc_mat_raw = fc.csv_matrix_read('../../input/Desikan_input/sc_test.csv')\n sc_mat = sc_mat_raw / sc_mat_raw.max() * 0.2\n sc_mat = torch.from_numpy(sc_mat).type(torch.FloatTensor).cuda()\n\n emp_fc = fc.csv_matrix_read('../../input/Desikan_input/fc_test.csv')\n emp_fc = torch.from_numpy(emp_fc).type(torch.FloatTensor).cuda()\n\n # Calculating simualted BOLD signal using MFM\n bold_d = fc.CBIG_mfm_single_simulation(parameter, sc_mat, 14.4)\n\n # Initializing the FC and FCD masks\n n_set = bold_d.shape[1]\n n_nodes = bold_d.shape[0]\n window_size = 83\n time_length = 1200 - window_size + 1\n sub_num = 10\n fc_edgenum = int(n_nodes * (n_nodes - 1) / 2)\n fc_mask = torch.triu(torch.ones(n_nodes, n_nodes), 1) == 1\n fc_maskm = torch.zeros(n_nodes * sub_num,\n n_nodes * sub_num).type(torch.cuda.ByteTensor)\n\n for i in range(sub_num):\n fc_maskm[n_nodes * i:n_nodes * (i + 1), n_nodes * i:n_nodes *\n (i + 1)] = fc_mask\n\n # Calculating CDF for simualted FCD matrices\n fcd_all = torch.ones(time_length, time_length, n_set).cpu()\n fc_mat = torch.zeros(fc_edgenum, sub_num, time_length)\n batch_num = int(n_set / sub_num)\n\n for b in range(batch_num):\n bold_temp = bold_d[:, b * sub_num:(b + 1) * sub_num, :]\n bold_tempm = bold_temp.transpose(0, 1).contiguous().view(-1, 1200)\n for i in range(0, time_length):\n bold_fc = fc.torch_corr(bold_tempm[:, i:i + window_size])\n cor_temp = bold_fc[fc_maskm]\n fc_mat[:, :, i] = torch.transpose(\n cor_temp.view(sub_num, fc_edgenum), 0, 1)\n\n for j in range(0, sub_num):\n fcd_all[:, :, j + b * sub_num] = fc.torch_corr(\n torch.transpose(fc_mat[:, j, :], 0, 1))\n\n bold_numpy = bold_d.cpu().numpy()\n fcd_numpy = fcd_all.numpy()\n\n fcd_dir = os.path.join(output_path, 'FCD')\n if not os.path.isdir(fcd_dir):\n os.makedirs(fcd_dir)\n tc_dir = os.path.join(output_path, 'TC')\n if not os.path.isdir(tc_dir):\n os.makedirs(tc_dir)\n\n count = 1\n for i in range(n_set):\n print('Generating simualted TC and FCD number: ' + str(count))\n fcd = fcd_numpy[:, :, i]\n bold = bold_numpy[:, i, :]\n if (fcd == fcd).all():\n FCD = {'FCD_mat': fcd}\n sio.savemat(\n os.path.join(fcd_dir, 'FCD_' + str(count) + '.mat'), FCD)\n BOLD = {'TC': bold}\n sio.savemat(\n os.path.join(tc_dir, 'TC_' + str(count) + '.mat'), BOLD)\n count += 1\n if count > 1000:\n break\n\n\nif __name__ == '__main__':\n warnings.filterwarnings(\"ignore\", category=RuntimeWarning)\n CBIG_pMFM_generate_simualted_fc_fcd()\n" ]
[ [ "numpy.sum" ], [ "numpy.diag", "numpy.dot", "numpy.minimum", "numpy.sqrt", "numpy.mean", "numpy.arange", "numpy.eye", "scipy.io.loadmat", "numpy.outer", "numpy.triu", "numpy.zeros", "numpy.random.PCG64", "numpy.log", "numpy.min", "numpy.linalg.inv", "numpy.power", "numpy.linalg.eigh", "numpy.argsort", "numpy.sum", "torch.cuda.set_device", "torch.manual_seed", "numpy.linalg.norm", "numpy.sort", "numpy.tile" ], [ "torch.transpose", "torch.ones", "torch.cuda.manual_seed", "torch.cuda.set_device", "torch.zeros", "scipy.io.loadmat", "numpy.tile", "torch.from_numpy", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] } ]
merz9b/FinanceHub
[ "a8e1b0e645f5811a5cf3b178a75f718ff6871769" ]
[ "trackers/Commodities/comm_futures_tracker.py" ]
[ "\"\"\"\nAuthor: Gustavo Soares\n\"\"\"\n\nimport math\nimport pandas as pd\nfrom bloomberg import BBG\nfrom pandas.tseries.offsets import BDay\nfrom datetime import timedelta\n\n\nclass CommFutureTracker(object):\n \"\"\"\n Class for creating excess return indices for commodity futures using data from bloomberg.\n A default front-month roll schedule is assumed but it can be provided by the user\n At the start date, we assume we trade 100 units of the commodity in the contract defined by the roll schedule\n We MtM the position over the month and then roll it into the next contracts as defined by the roll schedule\n Commodities belonging to the Bloomberg Commodity Index (BCOM) and the S&P GSCI Commodity Index are covered\n The S&P GSCI Commodity Index is the default roll schedule but BCOM and used-defined are also supported\n\n ROLL SCHEDULE synthax:\n The roll schedule is a list of size 12, each element correponding to a month of the year in their natural order\n The list should contain a month code refering to the maturity of the contract to be held in that month according\n to the table below:\n\n Month\t Month Code\n January\t F\n February\tG\n March\t H\n April\t J\n May\t K\n June\t M\n July\t N\n August\t Q\n September\tU\n October\t V\n November\tX\n December\tZ\n\n when the letter is followed by a + sign, it means that the maturity of the contract is in the following year\n\n Example: The roll schedule [N, N, N, N, N, Z, Z, Z, H+, H+, H+, H+] does the following:\n Holds the contracting maturinig in July of the same year for the first five months of the year,\n then rolls that position into the December contract maturinig in the same year\n and holds that position for the next three months,\n then rolls that position into the March contract maturing the following year\n and holds that position until the end of the year\n rolls that position into the March contract maturing next year,\n then rolls that position into the July contract in January\n \"\"\"\n\n # TODO: Generalize this class to incorporate the case covered by the BondFutureTracker\n # TODO: Generalize this class to cover FX futures\n # TODO: Generalize this class to cover Index futures\n\n # These are the roll schedules followed by the commodities in the Bloomberg Commodity Index\n # See https://data.bloomberglp.com/indices/sites/2/2018/02/BCOM-Methodology-January-2018_FINAL-2.pdf\n bcom_roll_schedules = {\n 'C': ['H', 'H', 'K', 'K', 'N', 'N', 'U', 'U', 'Z', 'Z', 'Z', 'H+'],\n 'S': ['H', 'H', 'K', 'K', 'N', 'N', 'X', 'X', 'X', 'X', 'F+', 'F+'],\n 'SM': ['H', 'H', 'K', 'K', 'N', 'N', 'Z', 'Z', 'Z', 'Z', 'F+', 'F+'],\n 'BO': ['H', 'H', 'K', 'K', 'N', 'N', 'Z', 'Z', 'Z', 'Z', 'F+', 'F+'],\n 'W': ['H', 'H', 'K', 'K', 'N', 'N', 'U', 'U', 'Z', 'Z', 'Z', 'H+'],\n 'KW': ['H', 'H', 'K', 'K', 'N', 'N', 'U', 'U', 'Z', 'Z', 'Z', 'H+'],\n 'CC': ['H', 'H', 'K', 'K', 'N', 'N', 'U', 'U', 'Z', 'Z', 'Z', 'H+'],\n 'CT': ['H', 'H', 'K', 'K', 'N', 'N', 'Z', 'Z', 'Z', 'Z', 'Z', 'H+'],\n 'KC': ['H', 'H', 'K', 'K', 'N', 'N', 'U', 'U', 'Z', 'Z', 'Z', 'H+'],\n 'LC': ['G', 'J', 'J', 'M', 'M', 'Q', 'Q', 'V', 'V', 'Z', 'Z', 'G+'],\n 'LH': ['G', 'J', 'J', 'M', 'M', 'N', 'Q', 'V', 'V', 'Z', 'Z', 'G+'],\n 'SB': ['H', 'H', 'K', 'K', 'N', 'N', 'V', 'V', 'V', 'H+', 'H+', 'H+'],\n 'CL': ['H', 'H', 'K', 'K', 'N', 'N', 'U', 'U', 'X', 'X', 'F+', 'F+'],\n 'CO': ['H', 'K', 'K', 'N', 'N', 'U', 'U', 'X', 'X', 'F+', 'F+', 'H+'],\n 'HO': ['H', 'H', 'K', 'K', 'N', 'N', 'U', 'U', 'X', 'X', 'F+', 'F+'],\n 'QS': ['H', 'H', 'K', 'K', 'N', 'N', 'U', 'U', 'X', 'X', 'F+', 'F+'],\n 'XB': ['H', 'H', 'K', 'K', 'N', 'N', 'U', 'U', 'X', 'X', 'F+', 'F+'],\n 'NG': ['H', 'H', 'K', 'K', 'N', 'N', 'U', 'U', 'X', 'X', 'F+', 'F+'],\n 'HG': ['H', 'H', 'K', 'K', 'N', 'N', 'U', 'U', 'Z', 'Z', 'Z', 'H+'],\n 'LN': ['H', 'H', 'K', 'K', 'N', 'N', 'U', 'U', 'X', 'X', 'F+', 'F+'],\n 'LX': ['H', 'H', 'K', 'K', 'N', 'N', 'U', 'U', 'X', 'X', 'F+', 'F+'],\n 'LA': ['H', 'H', 'K', 'K', 'N', 'N', 'U', 'U', 'X', 'X', 'F+', 'F+'],\n 'GC': ['G', 'J', 'J', 'M', 'M', 'Q', 'Q', 'Z', 'Z', 'Z', 'Z', 'G+'],\n 'SI': ['H', 'H', 'K', 'K', 'N', 'N', 'U', 'U', 'Z', 'Z', 'Z', 'H+'],\n }\n\n # These are the roll schedules followed by the commodities in the S&P GSCI Commodity Index\n # See https://www.spindices.com/documents/methodologies/methodology-sp-gsci.pdf\n gsci_roll_schedules = {\n 'C': ['H', 'H', 'K', 'K', 'N', 'N', 'U', 'U', 'Z', 'Z', 'Z', 'H+'],\n 'S': ['H', 'H', 'K', 'K', 'N', 'N', 'X', 'X', 'X', 'X', 'F+', 'F+'],\n 'W': ['H', 'H', 'K', 'K', 'N', 'N', 'U', 'U', 'Z', 'Z', 'Z', 'H+'],\n 'KW': ['H', 'H', 'K', 'K', 'N', 'N', 'U', 'U', 'Z', 'Z', 'Z', 'H+'],\n 'SB': ['H', 'H', 'K', 'K', 'N', 'N', 'V', 'V', 'V', 'H+', 'H+', 'H+'],\n 'CC': ['H', 'H', 'K', 'K', 'N', 'N', 'U', 'U', 'Z', 'Z', 'Z', 'H+'],\n 'CT': ['H', 'H', 'K', 'K', 'N', 'N', 'Z', 'Z', 'Z', 'Z', 'Z', 'H+'],\n 'KC': ['H', 'H', 'K', 'K', 'N', 'N', 'U', 'U', 'Z', 'Z', 'Z', 'H+'],\n 'OJ': ['H', 'H', 'K', 'K', 'N', 'N', 'U', 'U', 'X', 'X', 'F+', 'F+'],\n 'FC': ['H', 'H', 'K', 'K', 'Q', 'Q', 'Q', 'V', 'V', 'F+', 'F+', 'F+'],\n 'LC': ['G', 'J', 'J', 'M', 'M', 'Q', 'Q', 'V', 'V', 'Z', 'Z', 'G+'],\n 'LH': ['G', 'J', 'J', 'M', 'M', 'N', 'Q', 'V', 'V', 'Z', 'Z', 'G+'],\n 'CL': ['H', 'H', 'K', 'K', 'N', 'N', 'U', 'U', 'X', 'X', 'F+', 'F+'],\n 'CO': ['H', 'K', 'K', 'N', 'N', 'U', 'U', 'X', 'X', 'F+', 'F+', 'H+'],\n 'HO': ['H', 'H', 'K', 'K', 'N', 'N', 'U', 'U', 'X', 'X', 'F+', 'F+'],\n 'QS': ['H', 'H', 'K', 'K', 'N', 'N', 'U', 'U', 'X', 'X', 'F+', 'F+'],\n 'XB': ['H', 'H', 'K', 'K', 'N', 'N', 'U', 'U', 'X', 'X', 'F+', 'F+'],\n 'NG': ['H', 'H', 'K', 'K', 'N', 'N', 'U', 'U', 'X', 'X', 'F+', 'F+'],\n 'LX': ['H', 'H', 'K', 'K', 'N', 'N', 'U', 'U', 'X', 'X', 'F+', 'F+'],\n 'LL': ['H', 'H', 'K', 'K', 'N', 'N', 'U', 'U', 'X', 'X', 'F+', 'F+'],\n 'LN': ['H', 'H', 'K', 'K', 'N', 'N', 'U', 'U', 'X', 'X', 'F+', 'F+'],\n 'LT': ['H', 'H', 'K', 'K', 'N', 'N', 'U', 'U', 'X', 'X', 'F+', 'F+'],\n 'LP': ['H', 'H', 'K', 'K', 'N', 'N', 'U', 'U', 'X', 'X', 'F+', 'F+'],\n 'LA': ['H', 'H', 'K', 'K', 'N', 'N', 'U', 'U', 'X', 'X', 'F+', 'F+'],\n 'GC': ['G', 'J', 'J', 'M', 'M', 'Q', 'Q', 'Z', 'Z', 'Z', 'Z', 'G+'],\n 'SI': ['H', 'H', 'K', 'K', 'N', 'N', 'U', 'U', 'Z', 'Z', 'Z', 'H+'],\n 'PL': ['J', 'J', 'J', 'N', 'N', 'N', 'V', 'V', 'V', 'F+', 'F+', 'F+'],\n }\n\n def __init__(self, comm_bbg_code, start_date = '2004-01-05', end_date = 'today',\n roll_schedule = None, roll_start_bday = 5, roll_window_size = 5):\n \"\"\"\n Returns an object with the many attributes including a data frame\n - tickers: list with 2 strs with Bloomberg ticker for the spot rates and 1M forward rates\n - spot_rate: Series with the spot rate data\n - fwd: Series with the 1M fwd rate data\n - er_index: Series with the excess return index\n - ts_df: DataFrame with columns 'Spot', 'Fwd', and 'Excess Return Index'\n :param ccy_symbol: str, Currency symbol from Bloomberg\n :param start_date: str, when the tracker should start\n :param end_date: str, when the tracker should end\n \"\"\"\n\n try:\n if roll_schedule == 'BCOM':\n self.roll_schedule = self.bcom_roll_schedules[comm_bbg_code]\n else:\n self.roll_schedule = self.gsci_roll_schedules[comm_bbg_code]\n print('Assuming S&P GSCI roll schedule for %s' % comm_bbg_code)\n except:\n if type(roll_schedule) == list:\n self.roll_schedule = roll_schedule\n else:\n raise KeyError('Commodity not yet supported, please include roll schedule for %s' % comm_bbg_code)\n\n self.comm_bbg_code = comm_bbg_code\n self.roll_start_bday = roll_start_bday\n self.roll_window_size = roll_window_size\n self.start_date = (pd.to_datetime(start_date) + BDay(1)).date()\n self.end_date = pd.to_datetime(end_date).date()\n\n self._grab_bbg_data()\n self._initialize()\n self._calculate_tr_index()\n\n\n\n def _grab_bbg_data(self):\n bbg = BBG()\n self.contract_list = bbg.fetch_futures_list(generic_ticker=self.comm_bbg_code + '1 Comdty')\n self.first_notice_dates = bbg.fetch_contract_parameter(securities=self.contract_list,\n field='FUT_NOTICE_FIRST').sort_values('FUT_NOTICE_FIRST')\n\n # Grab all contract series\n df_prices = bbg.fetch_series(securities=self.contract_list,\n fields='PX_LAST',\n startdate=self.start_date,\n enddate=self.end_date)\n self.prices = df_prices.fillna(method='ffill')\n\n def _initialize(self):\n # start on 1st bday of month\n back_start_date = self.prices.loc[self.prices.index[0].replace(day=28) +\n timedelta(days=4):].index[0]\n\n self._get_contracts_for_date(back_start_date)\n self._get_contract_weights(back_start_date)\n self.price_out = self.prices.loc[back_start_date, self.contract_rolling_out]\n self.price_in = self.prices.loc[back_start_date, self.contract_rolling_in]\n\n df_tracker = pd.DataFrame(index=self.prices.loc[back_start_date:].index,\n columns=['contract_rolling_out', 'contract_rolling_in',\n 'price_out_today', 'price_in_today', 'price_out_yst', 'price_in_yst',\n 'w_out', 'w_in',\n 'holdings_out', 'holdings_in',\n 'er_index'])\n\n df_tracker.loc[back_start_date, 'er_index'] = 100\n\n df_tracker.loc[back_start_date, 'contract_rolling_out'] = self.contract_rolling_out\n df_tracker.loc[back_start_date, 'contract_rolling_in'] = self.contract_rolling_in\n df_tracker.loc[back_start_date, 'price_out_today'] = self.price_out\n df_tracker.loc[back_start_date, 'price_in_today'] = self.price_in\n df_tracker.loc[back_start_date, 'w_out'] = self.weight_out\n df_tracker.loc[back_start_date, 'w_in'] = self.weight_in\n\n holdings_out = self.weight_out * df_tracker.loc[back_start_date, 'er_index'] / self.price_out\n holdings_in = self.weight_in * df_tracker.loc[back_start_date, 'er_index'] / self.price_in\n self.holdings_out = 0 if math.isnan(holdings_out) else holdings_out\n self.holdings_in = 0 if math.isnan(holdings_in) else holdings_in\n\n df_tracker.loc[back_start_date, 'holdings_out'] = self.holdings_out\n df_tracker.loc[back_start_date, 'holdings_in'] = self.holdings_in\n\n self.df_tracker = df_tracker\n\n def _get_contracts_for_date(self,d):\n month_letter = self.roll_schedule[d.month - 1] if self.roll_schedule[d.month - 1].find('+') == -1 else \\\n self.roll_schedule[d.month - 1][0]\n year_int = d.year if self.roll_schedule[d.month - 1].find('+') == -1 else d.year + 1\n contract_rolling_out = self.comm_bbg_code + month_letter + str(year_int)[-2:] + ' Comdty'\n if contract_rolling_out not in self.contract_list:\n contract_rolling_out = self.comm_bbg_code + month_letter + str(year_int)[-1] + ' Comdty'\n\n d2 = d.replace(day=28) + timedelta(days=4)\n month_letter = self.roll_schedule[d2.month - 1] if self.roll_schedule[d2.month - 1].find('+') == -1 else \\\n self.roll_schedule[d2.month - 1][0]\n year_int = d2.year if self.roll_schedule[d2.month - 1].find('+') == -1 else d2.year + 1\n contract_rolling_in = self.comm_bbg_code + month_letter + str(year_int)[-2:] + ' Comdty'\n if contract_rolling_in not in self.contract_list:\n contract_rolling_in = self.comm_bbg_code + month_letter + str(year_int)[-1] + ' Comdty'\n self.contract_rolling_out = contract_rolling_out\n self.contract_rolling_in = contract_rolling_in\n\n def _get_contract_weights(self, d, roll_type='standard'):\n days_in_the_month = [x for x in self.prices.index if x.month == d.month and x.year == d.year]\n if roll_type == 'standard':\n start_idx = self.roll_start_bday - 1\n end_idx = self.roll_start_bday + self.roll_window_size - 2\n roll_start_date = days_in_the_month[start_idx] if len(days_in_the_month) > start_idx else days_in_the_month[\n -1]\n roll_end_date = days_in_the_month[end_idx] if len(days_in_the_month) > end_idx else days_in_the_month[-1]\n elif roll_type == 'backward_from_month_end':\n roll_start_date = days_in_the_month[self.roll_start_bday]\n roll_end_date = days_in_the_month[-1]\n else:\n raise KeyError('Roll type not supported')\n\n if d < roll_start_date:\n weight_out = 1\n elif d > roll_end_date:\n weight_out = 0\n else:\n weight_out = float(len([x for x in days_in_the_month if x > d\n and x <= roll_end_date])) / float(self.roll_window_size )\n self.weight_out = weight_out\n self.weight_in = 1 - weight_out\n\n def _calculate_tr_index(self):\n for d, dm1 in zip(self.df_tracker.index[1:], self.df_tracker.index[:-1]):\n\n self.df_tracker.loc[d, 'w_out'] = self.weight_out\n self.df_tracker.loc[d, 'w_in'] = self.weight_in\n\n self.df_tracker.loc[d, 'contract_rolling_out'] = self.contract_rolling_out\n self.df_tracker.loc[d, 'contract_rolling_in'] = self.contract_rolling_in\n\n price_out_d = self.prices.loc[:d,self.contract_rolling_out].iloc[-1]\n price_out_dm1 = self.prices.loc[:d,self.contract_rolling_out].iloc[-2]\n price_in_d = self.prices.loc[:d,self.contract_rolling_in].iloc[-1]\n price_in_dm1 = self.prices.loc[:d,self.contract_rolling_in].iloc[-2]\n\n self.df_tracker.loc[d, 'price_out_today'] = price_out_d\n self.df_tracker.loc[d, 'price_in_today'] = price_in_d\n\n self.df_tracker.loc[d, 'price_out_yst'] = price_out_dm1\n self.df_tracker.loc[d, 'price_in_yst'] = price_in_dm1\n\n self.df_tracker.loc[d, 'holdings_out'] = self.holdings_out\n self.df_tracker.loc[d, 'holdings_in'] = self.holdings_in\n\n if self.weight_in == 1:\n pnl = self.holdings_in * (price_in_d - price_in_dm1)\n else:\n pnl = self.holdings_in * (price_in_d - price_in_dm1) + self.holdings_out * (price_out_d - price_out_dm1)\n\n self.df_tracker.loc[d, 'er_index'] = self.df_tracker.loc[dm1, 'er_index'] + pnl\n\n self._get_contracts_for_date(d)\n\n if d.month != dm1.month:\n self.holdings_out = self.holdings_in\n self.holdings_in = 0\n self.weight_out = 1\n self.weight_in = 0\n\n price_out_d = self.prices.loc[:d, self.contract_rolling_out].iloc[-1]\n price_out_dm1 = self.prices.loc[:d, self.contract_rolling_out].iloc[-2]\n price_in_d = self.prices.loc[:d, self.contract_rolling_in].iloc[-1]\n price_in_dm1 = self.prices.loc[:d, self.contract_rolling_in].iloc[-2]\n\n self.df_tracker.loc[d, 'holdings_out'] = self.holdings_out\n self.df_tracker.loc[d, 'holdings_in'] = self.holdings_in\n self.df_tracker.loc[d, 'w_out'] = self.weight_out\n self.df_tracker.loc[d, 'w_in'] = self.weight_in\n self.df_tracker.loc[d, 'price_out_today'] = price_out_d\n self.df_tracker.loc[d, 'price_in_today'] = price_in_d\n self.df_tracker.loc[d, 'price_out_yst'] = price_out_dm1\n self.df_tracker.loc[d, 'price_in_yst'] = price_in_dm1\n self.df_tracker.loc[d, 'contract_rolling_out'] = self.contract_rolling_out\n self.df_tracker.loc[d, 'contract_rolling_in'] = self.contract_rolling_in\n\n else:\n self._get_contract_weights(d)\n\n holdings_out = self.weight_out * self.df_tracker.loc[d, 'er_index'] / price_out_d\n holdings_in = self.weight_in * self.df_tracker.loc[d, 'er_index'] / price_in_d\n self.holdings_out = 0 if math.isnan(holdings_out) else holdings_out\n self.holdings_in = 0 if math.isnan(holdings_in) else holdings_in\n" ]
[ [ "pandas.to_datetime", "pandas.DataFrame", "pandas.tseries.offsets.BDay" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "0.19", "0.24", "0.20", "1.0", "0.25" ], "scipy": [], "tensorflow": [] } ]
Trevor-Waite/vecstack
[ "e9185909e8ece6500139e0f4305635fc1fc9fb50" ]
[ "tests/test_func_api_classification_multiclass.py" ]
[ "#-------------------------------------------------------------------------------\n# !!! cross_val_predict uses stratified split\n#-------------------------------------------------------------------------------\n# Main concept for testing returned arrays:\n# 1). create ground truth e.g. with cross_val_predict\n# 2). run vecstack\n# 3). compare returned arrays with ground truth \n# 4). compare arrays from file with ground truth \n#-------------------------------------------------------------------------------\n\nfrom __future__ import print_function\nfrom __future__ import division\n\nimport unittest\nfrom numpy.testing import assert_array_equal\n# from numpy.testing import assert_allclose\nfrom numpy.testing import assert_equal\n\nimport os\nimport glob\nimport numpy as np\nimport scipy.stats as st\nfrom sklearn.model_selection import cross_val_predict\nfrom sklearn.model_selection import cross_val_score\n# from sklearn.model_selection import train_test_split\nfrom sklearn.model_selection import StratifiedKFold\nfrom sklearn.datasets import make_classification\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.metrics import log_loss\nfrom sklearn.metrics import make_scorer\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.naive_bayes import GaussianNB\nfrom vecstack import stacking\n\nn_classes = 3\nn_folds = 5\ntemp_dir = 'tmpdw35lg54ms80eb42'\n\nX, y = make_classification(n_samples = 500, n_features = 5, n_informative = 3, n_redundant = 1, \n n_classes = n_classes, flip_y = 0, random_state = 0)\n# X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)\n\n\n# Make train/test split by hand to avoid strange errors probably related to testing suit:\n# https://github.com/scikit-learn/scikit-learn/issues/1684\n# https://github.com/scikit-learn/scikit-learn/issues/1704\n# Note: Python 2.7, 3.4 - OK, but 3.5, 3.6 - error\n\nnp.random.seed(0)\nind = np.arange(500)\nnp.random.shuffle(ind)\n\nind_train = ind[:400]\nind_test = ind[400:]\n\nX_train = X[ind_train]\nX_test = X[ind_test]\n\ny_train = y[ind_train]\ny_test = y[ind_test]\n\n\n# Create 4-dim data\nnp.random.seed(42)\nX_train_4d = np.random.normal(size=(400, 8, 8, 3))\nX_test_4d = np.random.normal(size=(100, 8, 8, 3))\ny_train_4d = np.random.randint(n_classes, size=400)\n\n# Reshape 4-dim to 2-dim\nX_train_4d_unrolled = X_train_4d.reshape(X_train_4d.shape[0], -1)\nX_test_4d_unrolled = X_test_4d.reshape(X_test_4d.shape[0], -1)\n\n#------------------------------------------------------------------------------\n#------------------------------------------------------------------------------\n\nclass LogisticRegressionUnrolled(LogisticRegression):\n \"\"\"\n For tests related to N-dim input.\n Estimator accepts N-dim array and reshape it to 2-dim array\n \"\"\"\n def fit(self, X, y):\n return super(LogisticRegressionUnrolled, self).fit(X.reshape(X.shape[0], -1), y)\n\n def predict(self, X):\n return super(LogisticRegressionUnrolled, self).predict(X.reshape(X.shape[0], -1))\n\n def predict_proba(self, X):\n return super(LogisticRegressionUnrolled, self).predict_proba(X.reshape(X.shape[0], -1))\n\n#-------------------------------------------------------------------------------\n#-------------------------------------------------------------------------------\n\nclass TestFuncClassificationMulticlass(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls):\n try:\n os.mkdir(temp_dir)\n except:\n print('Unable to create temp dir')\n \n @classmethod\n def tearDownClass(cls):\n try:\n os.rmdir(temp_dir)\n except:\n print('Unable to remove temp dir')\n\n def tearDown(self):\n # Remove files after each test\n files = glob.glob(os.path.join(temp_dir, '*.npy'))\n files.extend(glob.glob(os.path.join(temp_dir, '*.log.txt')))\n try:\n for file in files:\n os.remove(file)\n except:\n print('Unable to remove temp file')\n \n #---------------------------------------------------------------------------\n # Test returned and saved arrays in each mode (parameter <mode>)\n # Here we also test parameter <stratified> \n #---------------------------------------------------------------------------\n\n #---------------------------------------------------------------------------\n # Predict labels\n #---------------------------------------------------------------------------\n\n def test_oof_pred_mode(self):\n\n model = LogisticRegression(random_state=0, solver='liblinear', multi_class='ovr')\n S_train_1 = cross_val_predict(model, X_train, y = y_train, cv = n_folds, \n n_jobs = 1, verbose = 0, method = 'predict').reshape(-1, 1)\n _ = model.fit(X_train, y_train)\n S_test_1 = model.predict(X_test).reshape(-1, 1)\n\n models = [LogisticRegression(random_state=0, solver='liblinear', multi_class='ovr')]\n S_train_2, S_test_2 = stacking(models, X_train, y_train, X_test, \n regression = False, n_folds = n_folds, shuffle = False, save_dir=temp_dir, \n mode = 'oof_pred', random_state = 0, verbose = 0, stratified = True)\n \n # Load OOF from file\n # Normally if cleaning is performed there is only one .npy file at given moment\n # But if we have no cleaning there may be more then one file so we take the latest\n file_name = sorted(glob.glob(os.path.join(temp_dir, '*.npy')))[-1] # take the latest file\n S = np.load(file_name, allow_pickle=True)\n S_train_3 = S[0]\n S_test_3 = S[1]\n\n assert_array_equal(S_train_1, S_train_2)\n assert_array_equal(S_test_1, S_test_2)\n \n assert_array_equal(S_train_1, S_train_3)\n assert_array_equal(S_test_1, S_test_3)\n \n def test_oof_mode(self):\n\n model = LogisticRegression(random_state=0, solver='liblinear', multi_class='ovr')\n S_train_1 = cross_val_predict(model, X_train, y = y_train, cv = n_folds, \n n_jobs = 1, verbose = 0, method = 'predict').reshape(-1, 1)\n S_test_1 = None\n\n models = [LogisticRegression(random_state=0, solver='liblinear', multi_class='ovr')]\n S_train_2, S_test_2 = stacking(models, X_train, y_train, X_test, \n regression = False, n_folds = n_folds, shuffle = False, save_dir=temp_dir, \n mode = 'oof', random_state = 0, verbose = 0, stratified = True)\n \n # Load OOF from file\n # Normally if cleaning is performed there is only one .npy file at given moment\n # But if we have no cleaning there may be more then one file so we take the latest\n file_name = sorted(glob.glob(os.path.join(temp_dir, '*.npy')))[-1] # take the latest file\n S = np.load(file_name, allow_pickle=True)\n S_train_3 = S[0]\n S_test_3 = S[1]\n\n assert_array_equal(S_train_1, S_train_2)\n assert_array_equal(S_test_1, S_test_2)\n \n assert_array_equal(S_train_1, S_train_3)\n assert_array_equal(S_test_1, S_test_3)\n \n def test_pred_mode(self):\n\n model = LogisticRegression(random_state=0, solver='liblinear', multi_class='ovr')\n S_train_1 = None\n _ = model.fit(X_train, y_train)\n S_test_1 = model.predict(X_test).reshape(-1, 1)\n\n models = [LogisticRegression(random_state=0, solver='liblinear', multi_class='ovr')]\n S_train_2, S_test_2 = stacking(models, X_train, y_train, X_test, \n regression = False, n_folds = n_folds, shuffle = False, save_dir=temp_dir, \n mode = 'pred', random_state = 0, verbose = 0, stratified = True)\n \n # Load OOF from file\n # Normally if cleaning is performed there is only one .npy file at given moment\n # But if we have no cleaning there may be more then one file so we take the latest\n file_name = sorted(glob.glob(os.path.join(temp_dir, '*.npy')))[-1] # take the latest file\n S = np.load(file_name, allow_pickle=True)\n S_train_3 = S[0]\n S_test_3 = S[1]\n\n assert_array_equal(S_train_1, S_train_2)\n assert_array_equal(S_test_1, S_test_2)\n \n assert_array_equal(S_train_1, S_train_3)\n assert_array_equal(S_test_1, S_test_3)\n \n def test_oof_pred_bag_mode(self):\n \n S_test_temp = np.zeros((X_test.shape[0], n_folds))\n # Usind StratifiedKFold because by defauld cross_val_predict uses StratifiedKFold\n kf = StratifiedKFold(n_splits = n_folds, shuffle = False, random_state = 0)\n for fold_counter, (tr_index, te_index) in enumerate(kf.split(X_train, y_train)):\n # Split data and target\n X_tr = X_train[tr_index]\n y_tr = y_train[tr_index]\n X_te = X_train[te_index]\n y_te = y_train[te_index]\n model = LogisticRegression(random_state=0, solver='liblinear', multi_class='ovr')\n _ = model.fit(X_tr, y_tr)\n S_test_temp[:, fold_counter] = model.predict(X_test)\n S_test_1 = st.mode(S_test_temp, axis = 1)[0]\n \n model = LogisticRegression(random_state=0, solver='liblinear', multi_class='ovr')\n S_train_1 = cross_val_predict(model, X_train, y = y_train, cv = n_folds, \n n_jobs = 1, verbose = 0, method = 'predict').reshape(-1, 1)\n\n models = [LogisticRegression(random_state=0, solver='liblinear', multi_class='ovr')]\n S_train_2, S_test_2 = stacking(models, X_train, y_train, X_test, \n regression = False, n_folds = n_folds, shuffle = False, save_dir=temp_dir,\n mode = 'oof_pred_bag', random_state = 0, verbose = 0, stratified = True)\n \n # Load OOF from file\n # Normally if cleaning is performed there is only one .npy file at given moment\n # But if we have no cleaning there may be more then one file so we take the latest\n file_name = sorted(glob.glob(os.path.join(temp_dir, '*.npy')))[-1] # take the latest file\n S = np.load(file_name, allow_pickle=True)\n S_train_3 = S[0]\n S_test_3 = S[1]\n\n assert_array_equal(S_train_1, S_train_2)\n assert_array_equal(S_test_1, S_test_2)\n \n assert_array_equal(S_train_1, S_train_3)\n assert_array_equal(S_test_1, S_test_3)\n \n def test_pred_bag_mode(self):\n \n S_test_temp = np.zeros((X_test.shape[0], n_folds))\n # Usind StratifiedKFold because by defauld cross_val_predict uses StratifiedKFold\n kf = StratifiedKFold(n_splits = n_folds, shuffle = False, random_state = 0)\n for fold_counter, (tr_index, te_index) in enumerate(kf.split(X_train, y_train)):\n # Split data and target\n X_tr = X_train[tr_index]\n y_tr = y_train[tr_index]\n X_te = X_train[te_index]\n y_te = y_train[te_index]\n model = LogisticRegression(random_state=0, solver='liblinear', multi_class='ovr')\n _ = model.fit(X_tr, y_tr)\n S_test_temp[:, fold_counter] = model.predict(X_test)\n S_test_1 = st.mode(S_test_temp, axis = 1)[0]\n \n S_train_1 = None\n\n models = [LogisticRegression(random_state=0, solver='liblinear', multi_class='ovr')]\n S_train_2, S_test_2 = stacking(models, X_train, y_train, X_test, \n regression = False, n_folds = n_folds, shuffle = False, save_dir=temp_dir,\n mode = 'pred_bag', random_state = 0, verbose = 0, stratified = True)\n \n # Load OOF from file\n # Normally if cleaning is performed there is only one .npy file at given moment\n # But if we have no cleaning there may be more then one file so we take the latest\n file_name = sorted(glob.glob(os.path.join(temp_dir, '*.npy')))[-1] # take the latest file\n S = np.load(file_name, allow_pickle=True)\n S_train_3 = S[0]\n S_test_3 = S[1]\n\n assert_array_equal(S_train_1, S_train_2)\n assert_array_equal(S_test_1, S_test_2)\n \n assert_array_equal(S_train_1, S_train_3)\n assert_array_equal(S_test_1, S_test_3)\n \n #---------------------------------------------------------------------------\n # Predict proba\n #---------------------------------------------------------------------------\n \n def test_oof_pred_mode_proba(self):\n\n model = LogisticRegression(random_state=0, solver='liblinear', multi_class='ovr')\n S_train_1 = cross_val_predict(model, X_train, y = y_train, cv = n_folds, \n n_jobs = 1, verbose = 0, method = 'predict_proba')\n _ = model.fit(X_train, y_train)\n S_test_1 = model.predict_proba(X_test)\n\n models = [LogisticRegression(random_state=0, solver='liblinear', multi_class='ovr')]\n S_train_2, S_test_2 = stacking(models, X_train, y_train, X_test, \n regression = False, n_folds = n_folds, shuffle = False, stratified = True,\n mode = 'oof_pred', random_state = 0, verbose = 0, needs_proba = True, save_dir=temp_dir)\n \n # Load OOF from file\n # Normally if cleaning is performed there is only one .npy file at given moment\n # But if we have no cleaning there may be more then one file so we take the latest\n file_name = sorted(glob.glob(os.path.join(temp_dir, '*.npy')))[-1] # take the latest file\n S = np.load(file_name, allow_pickle=True)\n S_train_3 = S[0]\n S_test_3 = S[1]\n\n assert_array_equal(S_train_1, S_train_2)\n assert_array_equal(S_test_1, S_test_2)\n \n assert_array_equal(S_train_1, S_train_3)\n assert_array_equal(S_test_1, S_test_3)\n \n def test_oof_mode_proba(self):\n\n model = LogisticRegression(random_state=0, solver='liblinear', multi_class='ovr')\n S_train_1 = cross_val_predict(model, X_train, y = y_train, cv = n_folds, \n n_jobs = 1, verbose = 0, method = 'predict_proba')\n S_test_1 = None\n\n models = [LogisticRegression(random_state=0, solver='liblinear', multi_class='ovr')]\n S_train_2, S_test_2 = stacking(models, X_train, y_train, X_test, \n regression = False, n_folds = n_folds, shuffle = False, stratified = True, \n mode = 'oof', random_state = 0, verbose = 0, needs_proba = True, save_dir=temp_dir)\n \n # Load OOF from file\n # Normally if cleaning is performed there is only one .npy file at given moment\n # But if we have no cleaning there may be more then one file so we take the latest\n file_name = sorted(glob.glob(os.path.join(temp_dir, '*.npy')))[-1] # take the latest file\n S = np.load(file_name, allow_pickle=True)\n S_train_3 = S[0]\n S_test_3 = S[1]\n\n assert_array_equal(S_train_1, S_train_2)\n assert_array_equal(S_test_1, S_test_2)\n \n assert_array_equal(S_train_1, S_train_3)\n assert_array_equal(S_test_1, S_test_3)\n \n def test_pred_mode_proba(self):\n\n model = LogisticRegression(random_state=0, solver='liblinear', multi_class='ovr')\n S_train_1 = None\n _ = model.fit(X_train, y_train)\n S_test_1 = model.predict_proba(X_test)\n\n models = [LogisticRegression(random_state=0, solver='liblinear', multi_class='ovr')]\n S_train_2, S_test_2 = stacking(models, X_train, y_train, X_test, \n regression = False, n_folds = n_folds, shuffle = False, stratified = True, \n mode = 'pred', random_state = 0, verbose = 0, needs_proba = True, save_dir=temp_dir)\n \n # Load OOF from file\n # Normally if cleaning is performed there is only one .npy file at given moment\n # But if we have no cleaning there may be more then one file so we take the latest\n file_name = sorted(glob.glob(os.path.join(temp_dir, '*.npy')))[-1] # take the latest file\n S = np.load(file_name, allow_pickle=True)\n S_train_3 = S[0]\n S_test_3 = S[1]\n\n assert_array_equal(S_train_1, S_train_2)\n assert_array_equal(S_test_1, S_test_2)\n \n assert_array_equal(S_train_1, S_train_3)\n assert_array_equal(S_test_1, S_test_3)\n \n def test_oof_pred_bag_mode_proba(self):\n \n S_test_1 = np.zeros((X_test.shape[0], n_classes))\n S_test_temp = np.zeros((X_test.shape[0], n_folds * n_classes))\n # Using StratifiedKFold because by defauld cross_val_predict uses StratifiedKFold\n kf = StratifiedKFold(n_splits = n_folds, shuffle = False, random_state = 0)\n for fold_counter, (tr_index, te_index) in enumerate(kf.split(X_train, y_train)):\n # Split data and target\n X_tr = X_train[tr_index]\n y_tr = y_train[tr_index]\n X_te = X_train[te_index]\n y_te = y_train[te_index]\n model = LogisticRegression(random_state=0, solver='liblinear', multi_class='ovr')\n _ = model.fit(X_tr, y_tr)\n col_slice_fold = slice(fold_counter * n_classes, fold_counter * n_classes + n_classes)\n S_test_temp[:, col_slice_fold] = model.predict_proba(X_test)\n for class_id in range(n_classes):\n S_test_1[:, class_id] = np.mean(S_test_temp[:, class_id::n_classes], axis = 1)\n \n model = LogisticRegression(random_state=0, solver='liblinear', multi_class='ovr')\n S_train_1 = cross_val_predict(model, X_train, y = y_train, cv = n_folds, \n n_jobs = 1, verbose = 0, method = 'predict_proba')\n\n models = [LogisticRegression(random_state=0, solver='liblinear', multi_class='ovr')]\n S_train_2, S_test_2 = stacking(models, X_train, y_train, X_test, \n regression = False, n_folds = n_folds, shuffle = False, save_dir=temp_dir,\n mode = 'oof_pred_bag', random_state = 0, verbose = 0, stratified = True, needs_proba = True)\n \n # Load OOF from file\n # Normally if cleaning is performed there is only one .npy file at given moment\n # But if we have no cleaning there may be more then one file so we take the latest\n file_name = sorted(glob.glob(os.path.join(temp_dir, '*.npy')))[-1] # take the latest file\n S = np.load(file_name, allow_pickle=True)\n S_train_3 = S[0]\n S_test_3 = S[1]\n \n #@@@@\n # Look at proba\n # print('\\nOne model')\n # print('etalon')\n # print(S_test_1[:2])\n # print('vecstack')\n # print(S_test_2[:2])\n #@@@@\n\n assert_array_equal(S_train_1, S_train_2)\n assert_array_equal(S_test_1, S_test_2)\n \n assert_array_equal(S_train_1, S_train_3)\n assert_array_equal(S_test_1, S_test_3)\n \n def test_pred_bag_mode_proba(self):\n \n S_test_1 = np.zeros((X_test.shape[0], n_classes))\n S_test_temp = np.zeros((X_test.shape[0], n_folds * n_classes))\n # Using StratifiedKFold because by defauld cross_val_predict uses StratifiedKFold\n kf = StratifiedKFold(n_splits = n_folds, shuffle = False, random_state = 0)\n for fold_counter, (tr_index, te_index) in enumerate(kf.split(X_train, y_train)):\n # Split data and target\n X_tr = X_train[tr_index]\n y_tr = y_train[tr_index]\n X_te = X_train[te_index]\n y_te = y_train[te_index]\n model = LogisticRegression(random_state=0, solver='liblinear', multi_class='ovr')\n _ = model.fit(X_tr, y_tr)\n col_slice_fold = slice(fold_counter * n_classes, fold_counter * n_classes + n_classes)\n S_test_temp[:, col_slice_fold] = model.predict_proba(X_test)\n for class_id in range(n_classes):\n S_test_1[:, class_id] = np.mean(S_test_temp[:, class_id::n_classes], axis = 1)\n \n S_train_1 = None\n\n models = [LogisticRegression(random_state=0, solver='liblinear', multi_class='ovr')]\n S_train_2, S_test_2 = stacking(models, X_train, y_train, X_test, \n regression = False, n_folds = n_folds, shuffle = False, save_dir=temp_dir,\n mode = 'pred_bag', random_state = 0, verbose = 0, stratified = True, needs_proba = True)\n \n # Load OOF from file\n # Normally if cleaning is performed there is only one .npy file at given moment\n # But if we have no cleaning there may be more then one file so we take the latest\n file_name = sorted(glob.glob(os.path.join(temp_dir, '*.npy')))[-1] # take the latest file\n S = np.load(file_name, allow_pickle=True)\n S_train_3 = S[0]\n S_test_3 = S[1]\n \n assert_array_equal(S_train_1, S_train_2)\n assert_array_equal(S_test_1, S_test_2)\n \n assert_array_equal(S_train_1, S_train_3)\n assert_array_equal(S_test_1, S_test_3)\n \n #---------------------------------------------------------------------------\n # Test <shuffle> and <random_state> parameters\n #---------------------------------------------------------------------------\n \n def test_oof_pred_bag_mode_shuffle(self):\n \n S_test_temp = np.zeros((X_test.shape[0], n_folds))\n # Usind StratifiedKFold because by defauld cross_val_predict uses StratifiedKFold\n kf = StratifiedKFold(n_splits = n_folds, shuffle = True, random_state = 0)\n for fold_counter, (tr_index, te_index) in enumerate(kf.split(X_train, y_train)):\n # Split data and target\n X_tr = X_train[tr_index]\n y_tr = y_train[tr_index]\n X_te = X_train[te_index]\n y_te = y_train[te_index]\n model = LogisticRegression(random_state=0, solver='liblinear', multi_class='ovr')\n _ = model.fit(X_tr, y_tr)\n S_test_temp[:, fold_counter] = model.predict(X_test)\n S_test_1 = st.mode(S_test_temp, axis = 1)[0]\n \n model = LogisticRegression(random_state=0, solver='liblinear', multi_class='ovr')\n # !!! Important. Here we pass CV-generator not number of folds <cv = kf>\n S_train_1 = cross_val_predict(model, X_train, y = y_train, cv = kf, \n n_jobs = 1, verbose = 0, method = 'predict').reshape(-1, 1)\n\n models = [LogisticRegression(random_state=0, solver='liblinear', multi_class='ovr')]\n S_train_2, S_test_2 = stacking(models, X_train, y_train, X_test, \n regression = False, n_folds = n_folds, shuffle = True, save_dir=temp_dir,\n mode = 'oof_pred_bag', random_state = 0, verbose = 0, stratified = True)\n \n # Load OOF from file\n # Normally if cleaning is performed there is only one .npy file at given moment\n # But if we have no cleaning there may be more then one file so we take the latest\n file_name = sorted(glob.glob(os.path.join(temp_dir, '*.npy')))[-1] # take the latest file\n S = np.load(file_name, allow_pickle=True)\n S_train_3 = S[0]\n S_test_3 = S[1]\n\n assert_array_equal(S_train_1, S_train_2)\n assert_array_equal(S_test_1, S_test_2)\n \n assert_array_equal(S_train_1, S_train_3)\n assert_array_equal(S_test_1, S_test_3)\n \n #---------------------------------------------------------------------------\n # Test <metric> parameter and its default values depending on <regression> parameter\n # Labels\n # Important. We use <greater_is_better = True> in <make_scorer> for any error function\n # because we need raw scores (without minus sign)\n #---------------------------------------------------------------------------\n def test_oof_mode_metric(self):\n\n model = LogisticRegression(random_state=0, solver='liblinear', multi_class='ovr')\n scorer = make_scorer(accuracy_score)\n scores = cross_val_score(model, X_train, y = y_train, cv = n_folds, \n scoring = scorer, n_jobs = 1, verbose = 0)\n mean_str_1 = '%.8f' % np.mean(scores)\n std_str_1 = '%.8f' % np.std(scores)\n \n\n models = [LogisticRegression(random_state=0, solver='liblinear', multi_class='ovr')]\n S_train, S_test = stacking(models, X_train, y_train, X_test, \n regression = False, n_folds = n_folds, save_dir=temp_dir, \n mode = 'oof', random_state = 0, verbose = 0, stratified = True)\n \n # Load mean score and std from file\n # Normally if cleaning is performed there is only one .log.txt file at given moment\n # But if we have no cleaning there may be more then one file so we take the latest\n file_name = sorted(glob.glob(os.path.join(temp_dir, '*.log.txt')))[-1] # take the latest file\n with open(file_name) as f:\n for line in f:\n if 'MEAN' in line:\n split = line.strip().split()\n break\n\n mean_str_2 = split[1][1:-1]\n std_str_2 = split[3][1:-1]\n\n assert_equal(mean_str_1, mean_str_2)\n assert_equal(std_str_1, std_str_2)\n \n #---------------------------------------------------------------------------\n # Test <metric> parameter and its default values depending on <regression> parameter\n # Proba\n # Important. We use <greater_is_better = True> in <make_scorer> for any error function\n # because we need raw scores (without minus sign)\n #---------------------------------------------------------------------------\n def test_oof_mode_metric_proba(self):\n\n model = LogisticRegression(random_state=0, solver='liblinear', multi_class='ovr')\n scorer = make_scorer(log_loss, needs_proba = True)\n scores = cross_val_score(model, X_train, y = y_train, cv = n_folds, \n scoring = scorer, n_jobs = 1, verbose = 0)\n mean_str_1 = '%.8f' % np.mean(scores)\n std_str_1 = '%.8f' % np.std(scores)\n \n\n models = [LogisticRegression(random_state=0, solver='liblinear', multi_class='ovr')]\n S_train, S_test = stacking(models, X_train, y_train, X_test, \n regression = False, n_folds = n_folds, save_dir=temp_dir, \n mode = 'oof', random_state = 0, verbose = 0, stratified = True, \n needs_proba = True)\n \n # Load mean score and std from file\n # Normally if cleaning is performed there is only one .log.txt file at given moment\n # But if we have no cleaning there may be more then one file so we take the latest\n file_name = sorted(glob.glob(os.path.join(temp_dir, '*.log.txt')))[-1] # take the latest file\n with open(file_name) as f:\n for line in f:\n if 'MEAN' in line:\n split = line.strip().split()\n break\n\n mean_str_2 = split[1][1:-1]\n std_str_2 = split[3][1:-1]\n\n assert_equal(mean_str_1, mean_str_2)\n assert_equal(std_str_1, std_str_2)\n \n #-------------------------------------------------------------------------------\n # Test several mdels in one run\n #-------------------------------------------------------------------------------\n \n def test_oof_pred_mode_2_models(self):\n\n # Model a\n model = LogisticRegression(random_state=0, solver='liblinear', multi_class='ovr')\n S_train_1_a = cross_val_predict(model, X_train, y = y_train, cv = n_folds, \n n_jobs = 1, verbose = 0, method = 'predict').reshape(-1, 1)\n _ = model.fit(X_train, y_train)\n S_test_1_a = model.predict(X_test).reshape(-1, 1)\n \n # Model b\n model = GaussianNB()\n S_train_1_b = cross_val_predict(model, X_train, y = y_train, cv = n_folds, \n n_jobs = 1, verbose = 0, method = 'predict').reshape(-1, 1)\n _ = model.fit(X_train, y_train)\n S_test_1_b = model.predict(X_test).reshape(-1, 1)\n \n S_train_1 = np.c_[S_train_1_a, S_train_1_b]\n S_test_1 = np.c_[S_test_1_a, S_test_1_b]\n\n models = [LogisticRegression(random_state=0, solver='liblinear', multi_class='ovr'),\n GaussianNB()]\n S_train_2, S_test_2 = stacking(models, X_train, y_train, X_test, \n regression = False, n_folds = n_folds, shuffle = False, save_dir=temp_dir, \n mode = 'oof_pred', random_state = 0, verbose = 0, stratified = True)\n \n # Load OOF from file\n # Normally if cleaning is performed there is only one .npy file at given moment\n # But if we have no cleaning there may be more then one file so we take the latest\n file_name = sorted(glob.glob(os.path.join(temp_dir, '*.npy')))[-1] # take the latest file\n S = np.load(file_name, allow_pickle=True)\n S_train_3 = S[0]\n S_test_3 = S[1]\n\n assert_array_equal(S_train_1, S_train_2)\n assert_array_equal(S_test_1, S_test_2)\n \n assert_array_equal(S_train_1, S_train_3)\n assert_array_equal(S_test_1, S_test_3)\n \n def test_oof_pred_bag_mode_2_models(self):\n \n # Model a\n S_test_temp = np.zeros((X_test.shape[0], n_folds))\n # Usind StratifiedKFold because by defauld cross_val_predict uses StratifiedKFold\n kf = StratifiedKFold(n_splits = n_folds, shuffle = False, random_state = 0)\n for fold_counter, (tr_index, te_index) in enumerate(kf.split(X_train, y_train)):\n # Split data and target\n X_tr = X_train[tr_index]\n y_tr = y_train[tr_index]\n X_te = X_train[te_index]\n y_te = y_train[te_index]\n model = LogisticRegression(random_state=0, solver='liblinear', multi_class='ovr')\n _ = model.fit(X_tr, y_tr)\n S_test_temp[:, fold_counter] = model.predict(X_test)\n S_test_1_a = st.mode(S_test_temp, axis = 1)[0]\n \n model = LogisticRegression(random_state=0, solver='liblinear', multi_class='ovr')\n S_train_1_a = cross_val_predict(model, X_train, y = y_train, cv = n_folds, \n n_jobs = 1, verbose = 0, method = 'predict').reshape(-1, 1)\n \n # Model b\n S_test_temp = np.zeros((X_test.shape[0], n_folds))\n # Usind StratifiedKFold because by defauld cross_val_predict uses StratifiedKFold\n kf = StratifiedKFold(n_splits = n_folds, shuffle = False, random_state = 0)\n for fold_counter, (tr_index, te_index) in enumerate(kf.split(X_train, y_train)):\n # Split data and target\n X_tr = X_train[tr_index]\n y_tr = y_train[tr_index]\n X_te = X_train[te_index]\n y_te = y_train[te_index]\n model = GaussianNB()\n _ = model.fit(X_tr, y_tr)\n S_test_temp[:, fold_counter] = model.predict(X_test)\n S_test_1_b = st.mode(S_test_temp, axis = 1)[0]\n \n model = GaussianNB()\n S_train_1_b = cross_val_predict(model, X_train, y = y_train, cv = n_folds, \n n_jobs = 1, verbose = 0, method = 'predict').reshape(-1, 1)\n \n S_train_1 = np.c_[S_train_1_a, S_train_1_b]\n S_test_1 = np.c_[S_test_1_a, S_test_1_b]\n\n models = [LogisticRegression(random_state=0, solver='liblinear', multi_class='ovr'),\n GaussianNB()]\n S_train_2, S_test_2 = stacking(models, X_train, y_train, X_test, \n regression = False, n_folds = n_folds, shuffle = False, save_dir=temp_dir,\n mode = 'oof_pred_bag', random_state = 0, verbose = 0, stratified = True)\n \n # Load OOF from file\n # Normally if cleaning is performed there is only one .npy file at given moment\n # But if we have no cleaning there may be more then one file so we take the latest\n file_name = sorted(glob.glob(os.path.join(temp_dir, '*.npy')))[-1] # take the latest file\n S = np.load(file_name, allow_pickle=True)\n S_train_3 = S[0]\n S_test_3 = S[1]\n\n assert_array_equal(S_train_1, S_train_2)\n assert_array_equal(S_test_1, S_test_2)\n \n assert_array_equal(S_train_1, S_train_3)\n assert_array_equal(S_test_1, S_test_3)\n \n \n def test_oof_pred_mode_proba_2_models(self):\n\n # Model a\n model = LogisticRegression(random_state=0, solver='liblinear', multi_class='ovr')\n S_train_1_a = cross_val_predict(model, X_train, y = y_train, cv = n_folds, \n n_jobs = 1, verbose = 0, method = 'predict_proba')\n _ = model.fit(X_train, y_train)\n S_test_1_a = model.predict_proba(X_test)\n \n # Model b\n model = GaussianNB()\n S_train_1_b = cross_val_predict(model, X_train, y = y_train, cv = n_folds, \n n_jobs = 1, verbose = 0, method = 'predict_proba')\n _ = model.fit(X_train, y_train)\n S_test_1_b = model.predict_proba(X_test)\n \n S_train_1 = np.c_[S_train_1_a, S_train_1_b]\n S_test_1 = np.c_[S_test_1_a, S_test_1_b]\n\n models = [LogisticRegression(random_state=0, solver='liblinear', multi_class='ovr'),\n GaussianNB()]\n S_train_2, S_test_2 = stacking(models, X_train, y_train, X_test, \n regression = False, n_folds = n_folds, shuffle = False, stratified = True,\n mode = 'oof_pred', random_state = 0, verbose = 0, needs_proba = True, save_dir=temp_dir)\n \n # Load OOF from file\n # Normally if cleaning is performed there is only one .npy file at given moment\n # But if we have no cleaning there may be more then one file so we take the latest\n file_name = sorted(glob.glob(os.path.join(temp_dir, '*.npy')))[-1] # take the latest file\n S = np.load(file_name, allow_pickle=True)\n S_train_3 = S[0]\n S_test_3 = S[1]\n\n assert_array_equal(S_train_1, S_train_2)\n assert_array_equal(S_test_1, S_test_2)\n \n assert_array_equal(S_train_1, S_train_3)\n assert_array_equal(S_test_1, S_test_3)\n \n \n def test_oof_pred_bag_mode_proba_2_models(self):\n \n # Model a\n S_test_1_a = np.zeros((X_test.shape[0], n_classes))\n S_test_temp = np.zeros((X_test.shape[0], n_folds * n_classes))\n # Using StratifiedKFold because by defauld cross_val_predict uses StratifiedKFold\n kf = StratifiedKFold(n_splits = n_folds, shuffle = False, random_state = 0)\n for fold_counter, (tr_index, te_index) in enumerate(kf.split(X_train, y_train)):\n # Split data and target\n X_tr = X_train[tr_index]\n y_tr = y_train[tr_index]\n X_te = X_train[te_index]\n y_te = y_train[te_index]\n model = LogisticRegression(random_state=0, solver='liblinear', multi_class='ovr')\n _ = model.fit(X_tr, y_tr)\n col_slice_fold = slice(fold_counter * n_classes, fold_counter * n_classes + n_classes)\n S_test_temp[:, col_slice_fold] = model.predict_proba(X_test)\n for class_id in range(n_classes):\n S_test_1_a[:, class_id] = np.mean(S_test_temp[:, class_id::n_classes], axis = 1)\n \n model = LogisticRegression(random_state=0, solver='liblinear', multi_class='ovr')\n S_train_1_a = cross_val_predict(model, X_train, y = y_train, cv = n_folds, \n n_jobs = 1, verbose = 0, method = 'predict_proba')\n \n # Model b\n S_test_1_b = np.zeros((X_test.shape[0], n_classes))\n S_test_temp = np.zeros((X_test.shape[0], n_folds * n_classes))\n # Using StratifiedKFold because by defauld cross_val_predict uses StratifiedKFold\n kf = StratifiedKFold(n_splits = n_folds, shuffle = False, random_state = 0)\n for fold_counter, (tr_index, te_index) in enumerate(kf.split(X_train, y_train)):\n # Split data and target\n X_tr = X_train[tr_index]\n y_tr = y_train[tr_index]\n X_te = X_train[te_index]\n y_te = y_train[te_index]\n model = GaussianNB()\n _ = model.fit(X_tr, y_tr)\n col_slice_fold = slice(fold_counter * n_classes, fold_counter * n_classes + n_classes)\n S_test_temp[:, col_slice_fold] = model.predict_proba(X_test)\n for class_id in range(n_classes):\n S_test_1_b[:, class_id] = np.mean(S_test_temp[:, class_id::n_classes], axis = 1)\n \n model = GaussianNB()\n S_train_1_b = cross_val_predict(model, X_train, y = y_train, cv = n_folds, \n n_jobs = 1, verbose = 0, method = 'predict_proba')\n \n S_train_1 = np.c_[S_train_1_a, S_train_1_b]\n S_test_1 = np.c_[S_test_1_a, S_test_1_b]\n \n \n\n models = [LogisticRegression(random_state=0, solver='liblinear', multi_class='ovr'),\n GaussianNB()]\n S_train_2, S_test_2 = stacking(models, X_train, y_train, X_test, \n regression = False, n_folds = n_folds, shuffle = False, save_dir=temp_dir,\n mode = 'oof_pred_bag', random_state = 0, verbose = 0, stratified = True, needs_proba = True)\n \n # Load OOF from file\n # Normally if cleaning is performed there is only one .npy file at given moment\n # But if we have no cleaning there may be more then one file so we take the latest\n file_name = sorted(glob.glob(os.path.join(temp_dir, '*.npy')))[-1] # take the latest file\n S = np.load(file_name, allow_pickle=True)\n S_train_3 = S[0]\n S_test_3 = S[1]\n \n #@@@@\n # Look at proba\n # print('\\nTwo models')\n # print('etalon')\n # print(S_test_1[:2])\n # print('vecstack')\n # print(S_test_2[:2])\n #@@@@\n\n assert_array_equal(S_train_1, S_train_2)\n assert_array_equal(S_test_1, S_test_2)\n \n assert_array_equal(S_train_1, S_train_3)\n assert_array_equal(S_test_1, S_test_3)\n\n\n def test_N_dim_input(self):\n \"\"\"\n This is `test_oof_pred_bag_mode` function with `LogisticRegressionUnrolled` estimator\n \"\"\"\n S_test_temp = np.zeros((X_test_4d_unrolled.shape[0], n_folds))\n # Usind StratifiedKFold because by defauld cross_val_predict uses StratifiedKFold\n kf = StratifiedKFold(n_splits = n_folds, shuffle = False, random_state = 0)\n for fold_counter, (tr_index, te_index) in enumerate(kf.split(X_train_4d_unrolled, y_train_4d)):\n # Split data and target\n X_tr = X_train_4d_unrolled[tr_index]\n y_tr = y_train_4d[tr_index]\n X_te = X_train_4d_unrolled[te_index]\n y_te = y_train_4d[te_index]\n model = LogisticRegression(random_state=0, solver='liblinear', multi_class='ovr')\n _ = model.fit(X_tr, y_tr)\n S_test_temp[:, fold_counter] = model.predict(X_test_4d_unrolled)\n S_test_1 = st.mode(S_test_temp, axis = 1)[0]\n \n model = LogisticRegression(random_state=0, solver='liblinear', multi_class='ovr')\n S_train_1 = cross_val_predict(model, X_train_4d_unrolled, y = y_train_4d, cv = n_folds,\n n_jobs = 1, verbose = 0, method = 'predict').reshape(-1, 1)\n\n models = [LogisticRegressionUnrolled(random_state=0, solver='liblinear', multi_class='ovr')]\n S_train_2, S_test_2 = stacking(models, X_train_4d, y_train_4d, X_test_4d,\n regression = False, n_folds = n_folds, shuffle = False, save_dir=temp_dir,\n mode = 'oof_pred_bag', random_state = 0, verbose = 0, stratified = True)\n\n # Load OOF from file\n # Normally if cleaning is performed there is only one .npy file at given moment\n # But if we have no cleaning there may be more then one file so we take the latest\n file_name = sorted(glob.glob(os.path.join(temp_dir, '*.npy')))[-1] # take the latest file\n S = np.load(file_name, allow_pickle=True)\n S_train_3 = S[0]\n S_test_3 = S[1]\n\n assert_array_equal(S_train_1, S_train_2)\n assert_array_equal(S_test_1, S_test_2)\n\n assert_array_equal(S_train_1, S_train_3)\n assert_array_equal(S_test_1, S_test_3)\n\n#-------------------------------------------------------------------------------\n#-------------------------------------------------------------------------------\n\nif __name__ == '__main__':\n unittest.main()\n\n#-------------------------------------------------------------------------------\n#-------------------------------------------------------------------------------\n\n" ]
[ [ "sklearn.datasets.make_classification", "numpy.mean", "numpy.random.randint", "numpy.testing.assert_equal", "numpy.arange", "sklearn.model_selection.StratifiedKFold", "numpy.std", "numpy.load", "numpy.zeros", "sklearn.naive_bayes.GaussianNB", "sklearn.metrics.make_scorer", "sklearn.model_selection.cross_val_predict", "sklearn.linear_model.LogisticRegression", "numpy.random.seed", "sklearn.model_selection.cross_val_score", "numpy.random.shuffle", "numpy.testing.assert_array_equal", "numpy.random.normal", "scipy.stats.mode" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "0.16", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "1.3", "1.8" ], "tensorflow": [] } ]
EGAlberts/some_bandits
[ "99ee49e74c85ede2d941932396245c441a4b7e9b" ]
[ "some_bandits/bandits/EXP3.py" ]
[ "import numpy as np\r\nfrom random import sample\r\nfrom some_bandits.bandit_options import bandit_args\r\nfrom some_bandits.utilities import convert_conf, save_to_pickle, calculate_utility\r\nfrom some_bandits.bandits.Bandit import Bandit\r\nfrom some_bandits.bandits.Expert import Expert\r\nfrom statistics import mean\r\n\r\nACTION = 0\r\nREWARD = 1\r\nN_K = 2\r\n\r\n#ETA = 1\r\n\r\n\r\nclass EXP3(Bandit, Expert):\r\n def __init__(self, formula): \r\n super().__init__(\"EXP3-\" + formula)\r\n self.weights, self.distribution = self.exp3_initialize(len(self.arms))\r\n self.num_arms = len(self.arms)\r\n \r\n #np.random.seed(1337)\r\n trace_len = 20000 #the total time of chosen trace in SWIM in seconds\r\n total_count = round(trace_len / 60) \r\n self.eta = 0.1 #np.sqrt(np.log(len(self.arms)) / (len(self.arms) * total_count) ) #0.1\r\n \r\n self.last_action = bandit_args[\"initial_configuration\"]\r\n self.distr_func()\r\n\r\n \r\n \r\n \r\n def exp3_initialize(self, num_arms):\r\n return [0] * num_arms, []\r\n\r\n \r\n\r\n\r\n def start_strategy(self, reward):\r\n #print(\"received this \" + str(reward))\r\n\r\n #print(\"my distribution is \")\r\n #print(self.distribution) \r\n \r\n self.update_func(reward, self.arms.index(self.last_action)) #Update weights\r\n\r\n ##print(\"now my weights are\")\r\n #print(self.weights)\r\n self.distr_func() #(re-)calculate Pt\r\n \r\n #print(\"now my distribution is \")\r\n #print(self.distribution) \r\n\r\n new_action = self.sample_action()\r\n \r\n self.last_action = self.arms[new_action]\r\n\r\n return self.arms[new_action]\r\n\r\n def propagate_reward(self, reward, chosen_action):\r\n self.update_func(reward, chosen_action)\r\n\r\n self.distr_func()\r\n\r\n # def formula_to_function(self, choice):\r\n # funcs = {\r\n # \"FH\": (fixed_horizon_Pt, fixed_horizon_up),\r\n # \"anytime\": (anytime_Pt, anytime_up)\r\n # }\r\n \r\n # func = funcs.get(choice)\r\n # ###print(func.__doc__)\r\n # return func\r\n\r\n\r\n\r\n def distr_func(self):\r\n # exp(eta * S^_t-1i) / SUMkj=1 exp(eta * S^_t-1j)\r\n\r\n sum_weights = sum([np.exp(self.eta * weight) for weight in self.weights])\r\n\r\n self.distribution.clear()\r\n #P_t = \r\n self.distribution.extend([np.exp(self.eta * weight)/sum_weights for weight in self.weights])\r\n\r\n def update_func(self, payoff, action):\r\n #S^_ti = S^_t-1i + 1 - I{A_t = i}(1 - X_t) / P_ti\r\n for weight_i in range(len(self.weights)):\r\n if(weight_i == action):\r\n self.weights[action] = self.weights[action] + 1 - ((1-payoff)/self.distribution[action]) \r\n else:\r\n self.weights[weight_i] = self.weights[weight_i] + 1\r\n return\r\n" ]
[ [ "numpy.exp" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Zdddzz/smartcontract
[ "e587c72cb76883437cd653a47cf14f038ab72f74" ]
[ "utils.py" ]
[ "# -*- coding: utf-8 -*- \n# @Time : 2020-12-25 22:28 \n# @Author : Di Zhu\n\n\nimport json\nimport logging\nimport os\nimport shutil\n\nimport torch\n\n\nclass Params(object):\n \"\"\"Class that loads hyperparameters from a json file.\n\n Example:\n ```\n params = Params(json_path)\n print(params.learning_rate)\n params.learning_rate = 0.5 # change the value of learning_rate in params\n ```\n \"\"\"\n\n def __init__(self, json_path):\n with open(json_path) as f:\n params = json.load(f)\n self.__dict__.update(params)\n\n def save(self, json_path):\n with open(json_path, 'w') as f:\n json.dump(self.__dict__, f, indent=4)\n\n def update(self, json_path):\n \"\"\"Loads parameters from json file\"\"\"\n with open(json_path) as f:\n params = json.load(f)\n self.__dict__.update(params)\n\n @property\n def dict(self):\n \"\"\"Gives dict-like access to Params instance by `params.dict['learning_rate']\"\"\"\n return self.__dict__\n\n\nclass RunningAverage(object):\n \"\"\"A simple class that maintains the running average of a quantity\n\n Example:\n ```\n loss_avg = RunningAverage()\n loss_avg.update(2)\n loss_avg.update(4)\n loss_avg() = 3\n ```\n \"\"\"\n\n def __init__(self):\n self.steps = 0\n self.total = 0\n\n def update(self, val):\n self.total += val\n self.steps += 1\n\n def __call__(self):\n return self.total / float(self.steps)\n\n\ndef set_logger(log_path):\n \"\"\"Set the logger to log info in terminal and file `log_path`.\n\n In general, it is useful to have a logger so that every output to the terminal is saved\n in a permanent file. Here we save it to `model_dir/train.log`.\n\n Example:\n ```\n logging.info(\"Starting training...\")\n ```\n\n Args:\n log_path: (string) where to log\n \"\"\"\n logger = logging.getLogger()\n logger.setLevel(logging.INFO)\n\n if not logger.handlers:\n # Logging to a file\n file_handler = logging.FileHandler(log_path)\n file_handler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s: %(message)s'))\n logger.addHandler(file_handler)\n\n # Logging to console\n stream_handler = logging.StreamHandler()\n stream_handler.setFormatter(logging.Formatter('%(message)s'))\n logger.addHandler(stream_handler)\n\n\ndef save_checkpoint(state, config, is_best, checkpoint):\n \"\"\"Saves model and training parameters at checkpoint + 'last.pth.tar'. If is_best==True, also saves\n checkpoint + 'best.pth.tar'\n\n Args:\n state: (dict) contains model's state_dict, may contain other keys such as epoch, optimizer state_dict\n is_best: (bool) True if it is the best model seen till now\n checkpoint: (string) folder where parameters are to be saved\n config: (dict) contains configurations of the model\n \"\"\"\n checkpoint_filepath = os.path.join(checkpoint, 'last.pth.tar')\n config_filepath = os.path.join(checkpoint, 'last_config.json')\n\n if not os.path.exists(checkpoint):\n print(\"Checkpoint Directory does not exist! Making directory {}\".format(checkpoint))\n os.makedirs(checkpoint)\n torch.save(state, checkpoint_filepath)\n with open(config_filepath, 'w') as f:\n json.dump(config, f)\n if is_best:\n shutil.copyfile(checkpoint_filepath, os.path.join(checkpoint, 'best.pth.tar'))\n shutil.copyfile(config_filepath, os.path.join(checkpoint, 'best_config.json'))\n\n\ndef load_checkpoint(checkpoint, model, optimizer=None):\n \"\"\"Loads model parameters (state_dict) from file_path. If optimizer is provided, loads state_dict of\n optimizer assuming it is present in checkpoint.\n\n Args:\n checkpoint: (string) filename which needs to be loaded\n model: (torch.nn.Module) model for which the parameters are loaded\n optimizer: (torch.optim) optional: resume optimizer from checkpoint\n \"\"\"\n if not os.path.exists(checkpoint):\n raise (\"File doesn't exist {}\".format(checkpoint))\n checkpoint = torch.load(checkpoint)\n model.load_state_dict(checkpoint['state_dict'])\n\n if optimizer:\n optimizer.load_state_dict(checkpoint['optim_dict'])\n\n return checkpoint\n" ]
[ [ "torch.load", "torch.save" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
zhaonat/RCWA
[ "15f4300601899d08f57c95863df88280ab6f0d21", "15f4300601899d08f57c95863df88280ab6f0d21" ]
[ "TMM_examples/TMM_bragg_multilayer.py", "convolution_matrices/fft_investigation.py" ]
[ "import os\nimport sys\nmodule_path = os.path.abspath(os.path.join('..'))\nif module_path not in sys.path:\n sys.path.append(module_path)\n\nimport numpy as np\nimport matplotlib.pyplot as plt;\nimport cmath;\nfrom TMM_functions import run_TMM_simulation as rTMM\n\n## GOAL: simulate a BRAGG MIRROR at some wavelength (1 micron)\n\n#%% DEFINE SIMULATION PARAMETers\n#% General Units\ndegrees = np.pi/180;\nL0 = 1e-6; #units of microns;\neps0 = 8.854e-12;\nmu0 = 4*np.pi*10**-7;\nc0 = 1/(np.sqrt(mu0*eps0))\nI = np.matrix(np.eye(2,2)); #unit 2x2 matrix\n\n\n## normalized units\n#z' = k0*z;\n#k = k/k0;\n\n## REFLECTION AND TRANSMSSION SPACE epsilon and mu PARAMETERS\nm_r = 1; e_r = 1; incident_medium = [e_r, m_r];\nm_t = 1; e_t = 1; transmission_medium = [e_t, m_t];\n\n## set wavelength scanning range\nwavelengths = np.linspace(0.5,1.6,500); #500 nm to 1000 nm\nkmagnitude_scan = 2 * np.pi / wavelengths; #no\nomega = c0 * kmagnitude_scan; #using the dispersion wavelengths\n\n#source parameters\ntheta = 10 * degrees; #%elevation angle\nphi = 0 * degrees; #%azimuthal angle\n\n## incident wave properties, at this point, everything is in units of k_0\nn_i = np.sqrt(e_r*m_r);\n\n#k0 = np.sqrt(kx**2+ky**2+kz**2); we know k0, theta, and phi\n\n#actually, in the definitions here, kx = k0*sin(theta)*cos(phi), so kx, ky here are normalized\nkx = n_i*np.sin(theta)*np.cos(phi); #constant in ALL LAYERS; kx = 0 for normal incidence\nky = n_i*np.sin(theta)*np.sin(phi); #constant in ALL LAYERS; ky = 0 for normal incidence\n\nkz_inc = cmath.sqrt(e_r * m_r - kx ** 2 - ky ** 2);\n\nnormal_vector = np.array([0, 0, -1]) #positive z points down;\nate_vector = np.matrix([0, 1, 0]); #vector for the out of plane E-field\n\n#ampltidue of the te vs tm modes (which are decoupled)\npte = 1; #1/np.sqrt(2);\nptm = 0; #cmath.sqrt(-1)/np.sqrt(2);\npolarization_amplitudes = [pte, ptm]\nk_inc = [kx, ky];\nprint('--------incident wave paramters----------------')\nprint('incident n_i: '+str(n_i))\nprint('kx_inc: '+str(kx)+' ky_inc: '+str(ky))\nprint('kz_inc: ' + str(kz_inc));\nprint('-----------------------------------------------')\n\n\n#thickness 0 means L = 0, which only pops up in the xponential part of the expression\nnum_pairs = 5;\nER = np.tile([12,2], num_pairs);\nUR = np.tile([1],num_pairs*2);\nbragg_wavelength = 1;\nlayer_thicknesses = 0.25*np.tile([bragg_wavelength /np.sqrt(12), bragg_wavelength /np.sqrt(2)], num_pairs); #this retains SI unit convention\nref = list(); trans = list();\n\n## run simulation\nRef, Tran = rTMM.run_TMM_simulation(wavelengths, polarization_amplitudes, theta, phi, ER, UR, layer_thicknesses,\\\n transmission_medium, incident_medium)\n\nplt.figure();\nplt.plot(wavelengths/L0, Ref);\nplt.plot(wavelengths/L0, Tran);\nplt.title('Spectrum of a Bragg Mirror')\nplt.xlabel('wavelength ($\\mu m$)')\nplt.ylabel('R/T')\nplt.legend(('Ref','Tran'))\nplt.savefig('bragg_TMM.png');\nplt.show();", "## investigating how ffts work in numpy\nimport numpy as np\nfrom numpy import fft\nimport matplotlib.pyplot as plt\n\neps_grid = np.ones((20,20));\neps_grid[5:15, 5:15] = 12;\n\ntest1 = fft.fftshift(fft.fft2(eps_grid)); #this fortunately agrees with matlab...\nplt.figure();\nplt.imshow(abs(test1))\nplt.colorbar()\nplt.show()\n\nNx = 512; Ny = 512;\ne_r = 6;\na =1; radius = 0.35;\nA = e_r*np.ones((Nx,Ny));\nci = int(Nx/2); cj= int(Ny/2);\ncr = (radius/a)*Nx;\nI,J=np.meshgrid(np.arange(A.shape[0]),np.arange(A.shape[1]));\n\ndist = np.sqrt((I-ci)**2 + (J-cj)**2);\nA[np.where(dist<cr)] = 1;\n\nAfc = np.fft.fftshift(np.fft.fft2(A));\nplt.figure();\nplt.imshow(np.log(abs(Afc)))\nplt.show()\n\n\n" ]
[ [ "numpy.matrix", "matplotlib.pyplot.legend", "numpy.sqrt", "numpy.linspace", "matplotlib.pyplot.title", "numpy.eye", "numpy.tile", "matplotlib.pyplot.savefig", "numpy.cos", "matplotlib.pyplot.plot", "numpy.sin", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.xlabel", "numpy.array", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ], [ "numpy.fft.fft2", "numpy.sqrt", "numpy.arange", "numpy.ones", "matplotlib.pyplot.colorbar", "matplotlib.pyplot.show", "numpy.where", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
GeirOwe/adventOfCode
[ "fee1420cb8ecce8b7aaf9d48472364be191ca2a2" ]
[ "day20_2021.py" ]
[ "# Day20 - 2021 Advent of code\r\n# source: https://adventofcode.com/2021/day/20\r\n\r\nimport os\r\nimport numpy as np\r\n\r\ndef clear_console():\r\n os.system('clear')\r\n print('< .... AoC 2021 Day 20, part 1 .... >')\r\n print()\r\n return\r\n\r\n# convert from pixels to binary \"...#...#.\" -> \"000100010\"\r\ndef pix_2_binary(the_9_pixels):\r\n i = 0\r\n binaryX = \"\"\r\n pix = the_9_pixels\r\n #check all fields in list. \".\" -> 0 and \"#\" -> 1\r\n while i < 9:\r\n if pix[i] == \".\": binaryX += \"0\"\r\n else: binaryX += \"1\"\r\n i += 1\r\n return binaryX\r\n\r\n# find all adjacent cells and add content of all cells to a string\r\ndef find_adjacent_pixels(grid, element):\r\n #the 9 pixels - around and including the pos - visualised here with a zero:\r\n # # . . # .\r\n # #[. . .].\r\n # #[# 0 .]#\r\n # .[. # .].\r\n # . . # # #\r\n the_9_pixels = \"\"\r\n maxRow, maxCol = grid.shape\r\n maxRow -= 1 #starting from zero\r\n maxCol -= 1 #starting from zero\r\n elementProcessed = False\r\n\r\n # check all 4 corners - use dots for all positions \"outside the grid\"\r\n if element == [0,0]: \r\n the_9_pixels = the_9_pixels + \"....\" + grid[0,0] + grid[0, 1] + \".\" + grid[1, 0] + grid[1, 1]\r\n elementProcessed = True\r\n if element == [0,maxCol]: \r\n the_9_pixels = the_9_pixels + \"...\" + grid[0, maxCol-1] + grid[0,maxCol] + \".\" + grid[1, maxCol-1] + grid[1, maxCol] + \".\"\r\n elementProcessed = True\r\n if element == [maxRow,maxCol]: \r\n the_9_pixels = the_9_pixels + grid[maxRow-1, maxCol-1] + grid[maxRow-1, maxCol] + \".\" + grid[maxRow, maxCol-1] + grid[maxRow,maxCol] + \"....\"\r\n elementProcessed = True\r\n if element == [maxRow,0]: \r\n the_9_pixels = the_9_pixels + \".\" + grid[maxRow-1, 0] + grid[maxRow-1, 1] + \".\" + grid[maxRow, 0] + grid[maxRow, 1] + \"...\"\r\n elementProcessed = True\r\n \r\n #check if first column or last column\r\n if elementProcessed != True:\r\n if element[1] == 0: #first column\r\n row = element[0]\r\n the_9_pixels = the_9_pixels + \".\"+grid[row-1, 0]+grid[row-1, 1] + \".\"+grid[row, 0]+grid[row, 1] + \".\"+grid[row+1, 0]+grid[row+1, 1]\r\n elementProcessed = True\r\n elif element[1] == maxCol: #last column\r\n row = element[0]\r\n the_9_pixels = the_9_pixels + grid[row-1, maxCol-1]+grid[row-1, maxCol]+\".\"+ grid[row, maxCol-1]+grid[row, maxCol]+\".\" + grid[row+1, maxCol-1]+grid[row+1, maxCol]+\".\"\r\n elementProcessed = True\r\n\r\n #check if first row or last row\r\n if elementProcessed != True:\r\n if element[0] == 0: #first row\r\n col = element[1]\r\n the_9_pixels = the_9_pixels + \"...\"+grid[0, col-1]+grid[0, col]+grid[0, col+1]+grid[1, col-1]+grid[1, col]+grid[1, col+1]\r\n elementProcessed = True\r\n elif element[0] == maxRow: #last row\r\n col = element[1]\r\n the_9_pixels = the_9_pixels + grid[maxRow-1, col-1]+grid[maxRow-1, col]+grid[maxRow-1, col+1]+grid[maxRow, col-1]+grid[maxRow, col]+grid[maxRow, col+1] + \"...\"\r\n elementProcessed = True\r\n \r\n #the position is in the middle if still false\r\n if elementProcessed != True:\r\n row = element[0]\r\n col = element[1]\r\n the_9_pixels = the_9_pixels + grid[row-1, col-1]+grid[row-1, col]+grid[row-1, col+1]+grid[row, col-1]+grid[row, col]+grid[row, col+1] + grid[row+1, col-1]+grid[row+1, col]+grid[row+1, col+1]\r\n\r\n return the_9_pixels\r\n\r\n# do a new step\r\ndef step_away(input_image, image_algo, valueX):\r\n maxRow, maxCol = input_image.shape\r\n #start with an empty output image\r\n data_list = [\".\"] * (maxRow * maxCol)\r\n output_image = np.array(data_list, dtype=\"str\").reshape(maxRow, maxCol)\r\n\r\n row = 0\r\n while row < maxRow: \r\n #REPEAT for all pixels i input picture\r\n # for each dot in input image - find the 9 pixels around a dot -> e.g. [...#...#.]\r\n col = 0\r\n while col < maxCol:\r\n cell = [row, col]\r\n the_9_pixels = find_adjacent_pixels(input_image, cell)\r\n # transform from pixel to binary -> e.g. [000100010]\r\n binaryX = pix_2_binary(the_9_pixels)\r\n # convert from binary to decimal -> e.g. 34\r\n numX = int(binaryX, 2)\r\n # find the symbol in that specific pos in image algo -> image_algo[34] -> \"#\"\r\n outSymbol = image_algo[numX]\r\n # add that symbol to the OUTPUT PICTURE in same position as in input image\r\n output_image[row, col] = outSymbol\r\n # if pixel is lit in output picture (i.e. \"#\") -> , valueX += 1\r\n if outSymbol == \"#\": valueX += 1\r\n #next column\r\n col += 1\r\n \r\n #next row\r\n row += 1\r\n\r\n return valueX, output_image\r\n\r\n# process the data with all indicated steps\r\ndef process_the_data(input_image, image_algo):\r\n #number of steps\r\n noSteps = 2\r\n i = 0\r\n #list_of_10 = []\r\n while i < noSteps:\r\n #valueX holds the number of lit pixels\r\n valueX = 0\r\n #do a step and see what changes are done to the input image\r\n #the resulting image from the setp is the new input image for next cycle\r\n valueX, input_image = step_away(input_image, image_algo, valueX)\r\n\r\n i += 1\r\n #print(input_image, \"\\n\")\r\n return valueX\r\n\r\n#add some empty columns\r\ndef add_cols(data_list, cols):\r\n n = 0\r\n while n < cols:\r\n data_list.append(\".\")\r\n n += 1\r\n return data_list\r\n\r\n#add some empty rows\r\ndef add_rows(data_list, rows, cols):\r\n n = 0\r\n while n < rows*cols:\r\n data_list.append(\".\")\r\n n += 1\r\n return data_list\r\n\r\ndef get_the_data():\r\n #read the test puzzle input \r\n theData = open('day20_test_puzzle_input.txt', 'r')\r\n #read the puzzle input \r\n #theData = open('day20_puzzle_input.txt', 'r')\r\n\r\n #move data into a list - read a line and remove lineshift\r\n data_list = []\r\n rows = 0 # in image\r\n firstRow = True\r\n emptyRows = 5\r\n\r\n #process each row in the data\r\n for element in theData:\r\n elementTrimmed = element.strip()\r\n if firstRow:\r\n image_algo = elementTrimmed\r\n firstRow = False\r\n elif elementTrimmed != \"\": \r\n #add X empty rows and X empty cols before or after data; to simulate the infinity outwards of the grid\r\n if rows == 0:\r\n data_list = add_rows(data_list, emptyRows, len(elementTrimmed)+emptyRows*2) # *2 -> i begynnelsen og slutten av linjen\r\n\r\n rows += 1 #one more row in input image\r\n #add each single char to a list - reformat to numpy array later\r\n i = 0\r\n while i < len(elementTrimmed):\r\n #add 5 empty cols before the data; to simulate the infinity outwards of the grid\r\n if i == 0: \r\n data_list = add_cols(data_list, emptyRows)\r\n \r\n data_list.append(elementTrimmed[i])\r\n i += 1\r\n \r\n #add 5 empty cols after the data; to simulate the infinity outwards of the grid\r\n if i == len(elementTrimmed): \r\n data_list = add_cols(data_list, emptyRows)\r\n\r\n #add 5 empty rows and 5 empty cols before or after data; to simulate the infinity outwards of the grid\r\n data_list = add_rows(data_list, emptyRows, len(elementTrimmed)+emptyRows*2)\r\n\r\n maxCols = len(elementTrimmed) + emptyRows*2\r\n maxRows = rows + emptyRows*2\r\n #create a numpy array\r\n input_image = np.array(data_list, dtype=\"str\").reshape(maxRows, maxCols)\r\n return input_image, image_algo\r\n\r\ndef start_the_engine():\r\n #get the data and read them into a list\r\n input_image, image_algo = get_the_data()\r\n \r\n #process the data and return the answer\r\n valueX = process_the_data(input_image, image_algo)\r\n\r\n # Next, you need to \r\n print('\\nHow many pixels are lit in the resulting image ->', valueX, '\\n')\r\n return \r\n\r\n#let's start\r\nif __name__ == '__main__':\r\n clear_console()\r\n start_the_engine()" ]
[ [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
BowenBao/fairseq
[ "a9f5741f58c05c581686b73465d7e3f9df5528f3", "a9f5741f58c05c581686b73465d7e3f9df5528f3", "a9f5741f58c05c581686b73465d7e3f9df5528f3", "a9f5741f58c05c581686b73465d7e3f9df5528f3" ]
[ "fairseq/data/multilingual/sampled_multi_dataset.py", "fairseq/modules/conv_tbc.py", "fairseq/checkpoint_utils.py", "fairseq/models/distributed_fairseq_model.py" ]
[ "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport datetime\nimport hashlib\nimport logging\nimport time\nfrom bisect import bisect_right\nfrom collections import OrderedDict, defaultdict\nfrom enum import Enum\nfrom typing import List\n\nimport numpy as np\nimport torch\nfrom fairseq import distributed_utils\nfrom fairseq.data import FairseqDataset, data_utils\n\n\ndef get_time_gap(s, e):\n return (\n datetime.datetime.fromtimestamp(e) - datetime.datetime.fromtimestamp(s)\n ).__str__()\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef default_virtual_size_func(datasets, ratios, max_scale_up=1.5):\n sizes = [len(d) for d in datasets]\n if ratios is None:\n return sum(sizes)\n largest_idx = np.argmax(sizes)\n largest_r = ratios[largest_idx]\n largest_s = sizes[largest_idx]\n # set virtual sizes relative to the largest dataset\n virtual_sizes = [(r / largest_r) * largest_s for r in ratios]\n vsize = sum(virtual_sizes)\n max_size = sum(sizes) * max_scale_up\n return int(vsize if vsize < max_size else max_size)\n\n\nclass CollateFormat(Enum):\n single = 1\n ordered_dict = 2\n\n\nclass SampledMultiDataset(FairseqDataset):\n \"\"\"Samples from multiple sub-datasets according to given sampling ratios.\n Args:\n datasets (\n List[~torch.utils.data.Dataset]\n or OrderedDict[str, ~torch.utils.data.Dataset]\n ): datasets\n sampling_ratios (List[float]): list of probability of each dataset to be sampled\n (default: None, which corresponds to concatenating all dataset together).\n seed (int): RNG seed to use (default: 2).\n epoch (int): starting epoch number (default: 1).\n eval_key (str, optional): a key used at evaluation time that causes\n this instance to pass-through batches from *datasets[eval_key]*.\n collate_format (CollateFormat): collater output format, either CollateFormat.ordered_dict or\n CollateFormat.single (default: CollateFormat.single) where CollateFormat.single configures\n the collater to output batches of data mixed from all sub-datasets,\n and CollateFormat.ordered_dict configures the collater to output a dictionary of batches indexed by keys\n of sub-datasets.\n Note that not all sub-datasets will present in a single batch in both formats.\n virtual_size (int, or callable): the expected virtual size of the dataset (default: default_virtual_size_func).\n split (str): the split of the data, e.g. 'train', 'valid' or 'test'.\n shared_collater (bool): whether or not to all sub-datasets have the same collater.\n shuffle (bool): whether or not to shuffle data (default: True).\n \"\"\"\n\n def __init__(\n self,\n datasets,\n sampling_ratios=None,\n seed=2,\n epoch=1,\n eval_key=None,\n collate_format=CollateFormat.single,\n virtual_size=default_virtual_size_func,\n split=\"\",\n shared_collater=False,\n shuffle=True,\n ):\n super().__init__()\n self.shared_collater = shared_collater\n self.shuffle = shuffle\n\n if isinstance(datasets, OrderedDict):\n self.keys = list(datasets.keys())\n datasets = list(datasets.values())\n elif isinstance(datasets, List):\n self.keys = list(range(len(datasets)))\n else:\n raise AssertionError()\n self.datasets = datasets\n self.split = split\n\n self.eval_key = eval_key\n if self.eval_key is not None:\n self.collate_format = CollateFormat.single\n else:\n self.collate_format = collate_format\n\n self.seed = seed\n self._cur_epoch = None\n\n self.cumulated_sizes = None\n # self.datasets[k][self._cur_indices[i]] is the data item i in this sampled dataset\n # namely, data item i is sampled from the kth sub-dataset self.datasets[k]\n # where self.cumulated_sizes[k-1] <= i < self.cumulated_sizes[k]\n self._cur_indices = None\n\n self._sizes = None\n self.virtual_size_per_dataset = None\n # caching properties\n self._reset_cached_properties()\n self.setup_sampling(sampling_ratios, virtual_size)\n self.set_epoch(epoch)\n\n def _clean_if_not_none(self, var_list):\n for v in var_list:\n if v is not None:\n del v\n\n def _reset_cached_properties(self):\n self._clean_if_not_none([self._sizes, self._cur_indices])\n self._sizes = None\n self._cur_indices = None\n\n def setup_sampling(self, sample_ratios, virtual_size):\n sizes = [len(d) for d in self.datasets]\n if sample_ratios is None:\n # default back to concating datasets\n self.sample_ratios = None\n self.virtual_size = sum(sizes)\n else:\n if not isinstance(sample_ratios, np.ndarray):\n sample_ratios = np.array(sample_ratios)\n self.sample_ratios = sample_ratios\n virtual_size = (\n default_virtual_size_func if virtual_size is None else virtual_size\n )\n self.virtual_size = (\n virtual_size(self.datasets, self.sample_ratios)\n if callable(virtual_size)\n else virtual_size\n )\n\n def adjust_sampling(self, epoch, sampling_ratios, virtual_size):\n if sampling_ratios is not None:\n sampling_ratios = self._sync_sample_ratios(sampling_ratios)\n self.setup_sampling(sampling_ratios, virtual_size)\n\n def _sync_sample_ratios(self, ratios):\n # in case the ratios are not precisely the same across processes\n # also to ensure every procresses update the ratios in the same pace\n ratios = torch.DoubleTensor(ratios)\n if torch.distributed.is_initialized():\n if torch.cuda.is_available():\n distributed_utils.all_reduce(\n ratios.cuda(), group=distributed_utils.get_data_parallel_group()\n )\n else:\n distributed_utils.all_reduce(\n ratios, group=distributed_utils.get_data_parallel_group()\n )\n ret = ratios.cpu()\n ret = ret.numpy()\n return ret\n\n def random_choice_in_dataset(self, rng, dataset, choice_size):\n if hasattr(dataset, \"random_choice_in_dataset\"):\n return dataset.random_choice_in_dataset(rng, choice_size)\n dataset_size = len(dataset)\n return rng.choice(\n dataset_size, choice_size, replace=(choice_size > dataset_size)\n )\n\n def get_virtual_indices(self, rng, datasets, sample_ratios, virtual_size):\n def get_counts(sample_ratios):\n counts = np.array([virtual_size * r for r in sample_ratios], dtype=np.int64)\n diff = virtual_size - counts.sum()\n assert diff >= 0\n # due to round-offs, the size might not match the desired sizes\n if diff > 0:\n dataset_indices = rng.choice(\n len(sample_ratios), size=diff, p=sample_ratios\n )\n for i in dataset_indices:\n counts[i] += 1\n return counts\n\n def get_in_dataset_indices(datasets, sizes, sample_ratios):\n counts = get_counts(sample_ratios)\n # uniformally sample desired counts for each dataset\n # if the desired counts are large, sample with replacement:\n indices = [\n self.random_choice_in_dataset(rng, d, c)\n for c, d in zip(counts, datasets)\n ]\n return indices\n\n sizes = [len(d) for d in datasets]\n if sample_ratios is None:\n # default back to concating datasets\n in_dataset_indices = [list(range(s)) for s in sizes]\n virtual_sizes_per_dataset = sizes\n else:\n ratios = sample_ratios / sample_ratios.sum()\n in_dataset_indices = get_in_dataset_indices(datasets, sizes, ratios)\n virtual_sizes_per_dataset = [len(d) for d in in_dataset_indices]\n virtual_sizes_per_dataset = np.array(virtual_sizes_per_dataset, np.int64)\n cumulative_sizes = np.cumsum(virtual_sizes_per_dataset)\n assert sum(virtual_sizes_per_dataset) == virtual_size\n assert cumulative_sizes[-1] == virtual_size\n if virtual_size < sum(sizes):\n logger.warning(\n f\"virtual data size ({virtual_size}) is less than real data size ({sum(sizes)}).\"\n \" If virtual size << real data size, there could be data coverage issue.\"\n )\n in_dataset_indices = np.hstack(in_dataset_indices)\n return in_dataset_indices, cumulative_sizes, virtual_sizes_per_dataset\n\n def _get_dataset_and_index(self, index):\n i = bisect_right(self.cumulated_sizes, index)\n return i, self._cur_indices[index]\n\n def __getitem__(self, index):\n # self.__getitem__(index) returns self.datasets[k][self._cur_indices[index]]\n # where k satisfies self.cumulated_sizes[k - 1] <= k < self.cumulated_sizes[k]\n ds_idx, ds_sample_idx = self._get_dataset_and_index(index)\n ret = (ds_idx, self.datasets[ds_idx][ds_sample_idx])\n return ret\n\n def num_tokens(self, index):\n return self.sizes[index].max()\n\n def num_tokens_vec(self, indices):\n sizes_vec = self.sizes[np.array(indices)]\n # max across all dimensions but first one\n return np.amax(sizes_vec, axis=tuple(range(1, len(sizes_vec.shape))))\n\n def size(self, index):\n return self.sizes[index]\n\n def __len__(self):\n return self.virtual_size\n\n def collater(self, samples, **extra_args):\n \"\"\"Merge a list of samples to form a mini-batch.\"\"\"\n if len(samples) == 0:\n return None\n if self.collate_format == \"ordered_dict\":\n collect_samples = [[] for _ in range(len(self.datasets))]\n for (i, sample) in samples:\n collect_samples[i].append(sample)\n batch = OrderedDict(\n [\n (self.keys[i], dataset.collater(collect_samples[i]))\n for i, (key, dataset) in enumerate(zip(self.keys, self.datasets))\n if len(collect_samples[i]) > 0\n ]\n )\n elif self.shared_collater:\n batch = self.datasets[0].collater([s for _, s in samples])\n else:\n samples_dict = defaultdict(list)\n pad_to_length = (\n defaultdict(int)\n if \"pad_to_length\" not in extra_args\n else extra_args[\"pad_to_length\"]\n )\n for ds_idx, s in samples:\n pad_to_length[\"source\"] = max(\n pad_to_length[\"source\"], s[\"source\"].size(0)\n )\n if s[\"target\"] is not None:\n pad_to_length[\"target\"] = max(\n pad_to_length[\"target\"], s[\"target\"].size(0)\n )\n samples_dict[ds_idx].append(s)\n batches = [\n self.datasets[i].collater(samples_dict[i], pad_to_length=pad_to_length)\n for i in range(len(self.datasets))\n if len(samples_dict[i]) > 0\n ]\n\n def straight_data(tensors):\n batch = torch.cat(tensors, dim=0)\n return batch\n\n src_lengths = straight_data(\n [b[\"net_input\"][\"src_lengths\"] for b in batches]\n )\n src_lengths, sort_order = src_lengths.sort(descending=True)\n\n def straight_order(tensors):\n batch = straight_data(tensors)\n return batch.index_select(0, sort_order)\n\n batch = {\n \"id\": straight_order([b[\"id\"] for b in batches]),\n \"nsentences\": sum(b[\"nsentences\"] for b in batches),\n \"ntokens\": sum(b[\"ntokens\"] for b in batches),\n \"net_input\": {\n \"src_tokens\": straight_order(\n [b[\"net_input\"][\"src_tokens\"] for b in batches]\n ),\n \"src_lengths\": src_lengths,\n },\n \"target\": straight_order([b[\"target\"] for b in batches])\n if batches[0][\"target\"] is not None\n else None,\n }\n if \"prev_output_tokens\" in batches[0][\"net_input\"]:\n batch[\"net_input\"][\"prev_output_tokens\"] = straight_order(\n [b[\"net_input\"][\"prev_output_tokens\"] for b in batches]\n )\n if \"src_lang_id\" in batches[0][\"net_input\"]:\n batch[\"net_input\"][\"src_lang_id\"] = straight_order(\n [b[\"net_input\"][\"src_lang_id\"] for b in batches]\n )\n if \"tgt_lang_id\" in batches[0]:\n batch[\"tgt_lang_id\"] = straight_order(\n [b[\"tgt_lang_id\"] for b in batches]\n )\n return batch\n\n @property\n def sizes(self):\n if self._sizes is not None:\n return self._sizes\n start_time = time.time()\n in_sub_dataset_indices = [\n self._cur_indices[\n 0 if i == 0 else self.cumulated_sizes[i - 1] : self.cumulated_sizes[i]\n ]\n for i in range(len(self.datasets))\n ]\n sub_dataset_sizes = [\n d.sizes[indices]\n for d, indices in zip(self.datasets, in_sub_dataset_indices)\n ]\n self._sizes = np.vstack(sub_dataset_sizes)\n logger.info(f\"sizes() calling time: {get_time_gap(start_time, time.time())}\")\n return self._sizes\n\n def ordered_indices(self):\n if self.shuffle:\n indices = np.random.permutation(len(self))\n else:\n indices = np.arange(len(self))\n\n sizes = self.sizes\n tgt_sizes = sizes[:, 1] if len(sizes.shape) > 0 and sizes.shape[1] > 1 else None\n src_sizes = (\n sizes[:, 0] if len(sizes.shape) > 0 and sizes.shape[1] > 1 else sizes\n )\n\n # sort by target length, then source length\n if tgt_sizes is not None:\n indices = indices[np.argsort(tgt_sizes[indices], kind=\"mergesort\")]\n sort_indices = indices[np.argsort(src_sizes[indices], kind=\"mergesort\")]\n return sort_indices\n\n def prefetch(self, indices):\n prefetch_indices = [[] for _ in range(len(self.datasets))]\n for i in indices:\n ds_idx, ds_sample_idx = self._get_dataset_and_index(i)\n prefetch_indices[ds_idx].append(ds_sample_idx)\n for i in range(len(prefetch_indices)):\n self.datasets[i].prefetch(prefetch_indices[i])\n\n @property\n def can_reuse_epoch_itr_across_epochs(self):\n return False\n\n def set_epoch(self, epoch):\n super().set_epoch(epoch)\n if epoch == self._cur_epoch:\n # re-enter so return\n return\n for d in self.datasets:\n if hasattr(d, \"set_epoch\"):\n d.set_epoch(epoch)\n self._cur_epoch = epoch\n self._establish_virtual_datasets()\n\n def _establish_virtual_datasets(self):\n if self.sample_ratios is None and self._cur_indices is not None:\n # not a samping dataset, no need to resample if indices are already established\n return\n self._reset_cached_properties()\n\n start_time = time.time()\n # Generate a weighted sample of indices as a function of the\n # random seed and the current epoch.\n rng = np.random.RandomState(\n [\n int(\n hashlib.sha1(\n str(self.__class__.__name__).encode(\"utf-8\")\n ).hexdigest(),\n 16,\n )\n % (2 ** 32),\n self.seed % (2 ** 32), # global seed\n self._cur_epoch, # epoch index,\n ]\n )\n self._clean_if_not_none(\n [self.cumulated_sizes, self.virtual_size_per_dataset, self._sizes]\n )\n self._sizes = None\n\n indices, cumulated_sizes, virtual_size_per_dataset = self.get_virtual_indices(\n rng, self.datasets, self.sample_ratios, self.virtual_size\n )\n self._cur_indices = indices\n self.cumulated_sizes = cumulated_sizes\n self.virtual_size_per_dataset = virtual_size_per_dataset\n\n raw_sizes = [len(d) for d in self.datasets]\n sampled_sizes = self.virtual_size_per_dataset\n logger.info(\n f\"[{self.split}] Raw sizes: {str(dict(zip(self.keys, raw_sizes)))}; \"\n f\"raw total size: {sum(raw_sizes)}\"\n )\n logger.info(\n f\"[{self.split}] Resampled sizes: {str(dict(zip(self.keys, sampled_sizes)))}; \"\n f\"resampled total size: {sum(sampled_sizes)}\"\n )\n if self.sample_ratios is not None:\n logger.info(\n f\"[{self.split}] Upsampling ratios: {str(dict(zip(self.keys, self.sample_ratios)))}\"\n )\n else:\n logger.info(f\"[{self.split}] A concat dataset\")\n logger.info(\n f\"[{self.split}] virtual dataset established time: {get_time_gap(start_time, time.time())}\"\n )\n\n def filter_indices_by_size(self, indices, max_sizes):\n \"\"\"Filter a list of sample indices. Remove those that are longer\n than specified in max_sizes.\n\n Args:\n indices (np.array): original array of sample indices\n max_sizes (int or list[int] or tuple[int]): max sample size,\n can be defined separately for src and tgt (then list or tuple)\n\n Returns:\n np.array: filtered sample array\n list: list of removed indices\n \"\"\"\n sizes = self.sizes\n tgt_sizes = sizes[:, 1] if len(sizes.shape) > 0 and sizes.shape[1] > 1 else None\n src_sizes = (\n sizes[:, 0] if len(sizes.shape) > 0 and sizes.shape[1] > 1 else sizes\n )\n\n return data_utils.filter_paired_dataset_indices_by_size(\n src_sizes, tgt_sizes, indices, max_sizes\n )\n", "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport torch\nfrom torch.nn.modules.utils import _single\nfrom torch import Tensor\n\n\nclass ConvTBC(torch.nn.Module):\n \"\"\"1D convolution over an input of shape (time x batch x channel)\n\n The implementation uses gemm to perform the convolution. This implementation\n is faster than cuDNN for small kernel sizes.\n \"\"\"\n\n def __init__(self, in_channels, out_channels, kernel_size, padding=0):\n super(ConvTBC, self).__init__()\n self.in_channels = in_channels\n self.out_channels = out_channels\n self.kernel_size = _single(kernel_size)\n self.padding = _single(padding)\n\n self.weight = torch.nn.Parameter(\n torch.Tensor(self.kernel_size[0], in_channels, out_channels)\n )\n self.bias = torch.nn.Parameter(torch.Tensor(out_channels))\n\n def conv_tbc(self, input: Tensor):\n return torch.conv_tbc(\n input.contiguous(), self.weight, self.bias, self.padding[0]\n )\n\n def forward(self, input: Tensor):\n return self.conv_tbc(input)\n\n def __repr__(self):\n s = (\n \"{name}({in_channels}, {out_channels}, kernel_size={kernel_size}\"\n \", padding={padding}\"\n )\n if self.bias is None:\n s += \", bias=False\"\n s += \")\"\n return s.format(name=self.__class__.__name__, **self.__dict__)\n", "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport ast\nimport collections\nimport contextlib\nimport logging\nimport os\nimport re\nimport traceback\nfrom collections import OrderedDict\nfrom typing import Any, Dict, Optional, Union\n\nimport torch\nfrom fairseq.dataclass.configs import CheckpointConfig, FairseqConfig\nfrom fairseq.dataclass.utils import (\n convert_namespace_to_omegaconf,\n overwrite_args_by_name,\n)\nfrom fairseq.file_io import PathManager\nfrom fairseq.models import FairseqDecoder, FairseqEncoder\nfrom omegaconf import DictConfig, open_dict\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef save_checkpoint(cfg: CheckpointConfig, trainer, epoch_itr, val_loss):\n from fairseq import meters\n\n # only one worker should attempt to create the required dir\n if cfg.distributed_rank == 0:\n os.makedirs(cfg.save_dir, exist_ok=True)\n\n prev_best = getattr(save_checkpoint, \"best\", val_loss)\n if val_loss is not None:\n best_function = max if cfg.maximize_best_checkpoint_metric else min\n save_checkpoint.best = best_function(val_loss, prev_best)\n\n if cfg.no_save:\n return\n\n trainer.consolidate_optimizer()\n\n if not trainer.is_data_parallel_master:\n return\n\n write_timer = meters.StopwatchMeter()\n write_timer.start()\n\n epoch = epoch_itr.epoch\n end_of_epoch = epoch_itr.end_of_epoch()\n updates = trainer.get_num_updates()\n\n logger.info(f\"Preparing to save checkpoint for epoch {epoch} @ {updates} updates\")\n\n def is_better(a, b):\n return a >= b if cfg.maximize_best_checkpoint_metric else a <= b\n\n suffix = cfg.checkpoint_suffix or \"\"\n checkpoint_conds = collections.OrderedDict()\n checkpoint_conds[\"checkpoint{}{}.pt\".format(epoch, suffix)] = (\n end_of_epoch and not cfg.no_epoch_checkpoints and epoch % cfg.save_interval == 0\n )\n checkpoint_conds[\"checkpoint_{}_{}{}.pt\".format(epoch, updates, suffix)] = (\n not end_of_epoch\n and cfg.save_interval_updates > 0\n and updates % cfg.save_interval_updates == 0\n )\n checkpoint_conds[\"checkpoint_best{}.pt\".format(suffix)] = val_loss is not None and (\n not hasattr(save_checkpoint, \"best\")\n or is_better(val_loss, save_checkpoint.best)\n )\n if val_loss is not None and cfg.keep_best_checkpoints > 0:\n checkpoint_conds[\n \"checkpoint.best_{}_{:.2f}.pt\".format(cfg.best_checkpoint_metric, val_loss)\n ] = not hasattr(save_checkpoint, \"best\") or is_better(\n val_loss, save_checkpoint.best\n )\n checkpoint_conds[\n \"checkpoint_last{}.pt\".format(suffix)\n ] = not cfg.no_last_checkpoints\n\n extra_state = {\"train_iterator\": epoch_itr.state_dict(), \"val_loss\": val_loss}\n if hasattr(save_checkpoint, \"best\"):\n extra_state.update({\"best\": save_checkpoint.best})\n\n checkpoints = [\n os.path.join(cfg.save_dir, fn) for fn, cond in checkpoint_conds.items() if cond\n ]\n if len(checkpoints) > 0:\n trainer.save_checkpoint(checkpoints[0], extra_state)\n for cp in checkpoints[1:]:\n assert PathManager.copy(\n checkpoints[0], cp, overwrite=True\n ), f\"Failed to copy {checkpoints[0]} to {cp}\"\n\n write_timer.stop()\n logger.info(\n \"Saved checkpoint {} (epoch {} @ {} updates, score {}) (writing took {} seconds)\".format(\n checkpoints[0], epoch, updates, val_loss, write_timer.sum\n )\n )\n\n if not end_of_epoch and cfg.keep_interval_updates > 0:\n # remove old checkpoints; checkpoints are sorted in descending order\n checkpoints = checkpoint_paths(\n cfg.save_dir, pattern=r\"checkpoint_\\d+_(\\d+)\\.pt\"\n )\n for old_chk in checkpoints[cfg.keep_interval_updates :]:\n if os.path.lexists(old_chk):\n os.remove(old_chk)\n\n if cfg.keep_last_epochs > 0:\n # remove old epoch checkpoints; checkpoints are sorted in descending order\n checkpoints = checkpoint_paths(cfg.save_dir, pattern=r\"checkpoint(\\d+)\\.pt\")\n for old_chk in checkpoints[cfg.keep_last_epochs :]:\n if os.path.lexists(old_chk):\n os.remove(old_chk)\n\n if cfg.keep_best_checkpoints > 0:\n # only keep the best N checkpoints according to validation metric\n checkpoints = checkpoint_paths(\n cfg.save_dir,\n pattern=r\"checkpoint\\.best_{}_(\\d+\\.?\\d*)\\.pt\".format(\n cfg.best_checkpoint_metric\n ),\n )\n if not cfg.maximize_best_checkpoint_metric:\n checkpoints = checkpoints[::-1]\n for old_chk in checkpoints[cfg.keep_best_checkpoints :]:\n if os.path.lexists(old_chk):\n os.remove(old_chk)\n\n\ndef load_checkpoint(cfg: CheckpointConfig, trainer, **passthrough_args):\n \"\"\"\n Load a checkpoint and restore the training iterator.\n\n *passthrough_args* will be passed through to\n ``trainer.get_train_iterator``.\n \"\"\"\n\n reset_optimizer = cfg.reset_optimizer\n reset_lr_scheduler = cfg.reset_lr_scheduler\n optimizer_overrides = ast.literal_eval(cfg.optimizer_overrides)\n reset_meters = cfg.reset_meters\n reset_dataloader = cfg.reset_dataloader\n\n if cfg.finetune_from_model is not None and (\n reset_optimizer or reset_lr_scheduler or reset_meters or reset_dataloader\n ):\n raise ValueError(\n \"--finetune-from-model can not be set together with either --reset-optimizer\"\n \" or reset_lr_scheduler or reset_meters or reset_dataloader\"\n )\n\n suffix = cfg.checkpoint_suffix\n if (\n cfg.restore_file == \"checkpoint_last.pt\"\n ): # default value of restore_file is 'checkpoint_last.pt'\n checkpoint_path = os.path.join(\n cfg.save_dir, \"checkpoint_last{}.pt\".format(suffix)\n )\n first_launch = not PathManager.exists(checkpoint_path)\n if cfg.finetune_from_model is not None and first_launch:\n # if there is no last checkpoint to restore, start the finetune from pretrained model\n # else just use usual logic to load checkpoint, e.g. restart from last checkpoint and etc.\n if PathManager.exists(cfg.finetune_from_model):\n checkpoint_path = cfg.finetune_from_model\n reset_optimizer = True\n reset_lr_scheduler = True\n reset_meters = True\n reset_dataloader = True\n logger.info(\n f\"loading pretrained model from {checkpoint_path}: \"\n \"optimizer, lr scheduler, meters, dataloader will be reset\"\n )\n else:\n raise ValueError(\n f\"--funetune-from-model {cfg.finetune_from_model} does not exist\"\n )\n elif cfg.model_parallel_size > 1:\n checkpoint_path = cfg.restore_file.replace(\".pt\", suffix + \".pt\")\n else:\n checkpoint_path = cfg.restore_file\n\n if cfg.restore_file != \"checkpoint_last.pt\" and cfg.finetune_from_model:\n raise ValueError(\n \"--finetune-from-model and --restore-file (non-default value) \"\n \"can not be specified together: \" + str(cfg)\n )\n\n extra_state = trainer.load_checkpoint(\n checkpoint_path,\n reset_optimizer,\n reset_lr_scheduler,\n optimizer_overrides,\n reset_meters=reset_meters,\n )\n\n if (\n extra_state is not None\n and \"best\" in extra_state\n and not reset_optimizer\n and not reset_meters\n ):\n save_checkpoint.best = extra_state[\"best\"]\n\n if extra_state is not None and not reset_dataloader:\n # restore iterator from checkpoint\n itr_state = extra_state[\"train_iterator\"]\n epoch_itr = trainer.get_train_iterator(\n epoch=itr_state[\"epoch\"], load_dataset=True, **passthrough_args\n )\n epoch_itr.load_state_dict(itr_state)\n else:\n epoch_itr = trainer.get_train_iterator(\n epoch=1, load_dataset=True, **passthrough_args\n )\n\n trainer.lr_step(epoch_itr.epoch)\n\n return extra_state, epoch_itr\n\n\ndef load_checkpoint_to_cpu(path, arg_overrides=None, load_on_all_ranks=False):\n \"\"\"Loads a checkpoint to CPU (with upgrading for backward compatibility).\n\n If doing single-GPU training or if the checkpoint is only being loaded by at\n most one process on each node (current default behavior is for only rank 0\n to read the checkpoint from disk), load_on_all_ranks should be False to\n avoid errors from torch.distributed not having been initialized or\n torch.distributed.barrier() hanging.\n\n If all processes on each node may be loading the checkpoint\n simultaneously, load_on_all_ranks should be set to True to avoid I/O\n conflicts.\n\n There's currently no support for > 1 but < all processes loading the\n checkpoint on each node.\n \"\"\"\n local_path = PathManager.get_local_path(path)\n # The locally cached file returned by get_local_path() may be stale for\n # remote files that are periodically updated/overwritten (ex:\n # checkpoint_last.pt) - so we remove the local copy, sync across processes\n # (if needed), and then download a fresh copy.\n if local_path != path and PathManager.path_requires_pathmanager(path):\n try:\n os.remove(local_path)\n except FileNotFoundError:\n # With potentially multiple processes removing the same file, the\n # file being missing is benign (missing_ok isn't available until\n # Python 3.8).\n pass\n if load_on_all_ranks:\n torch.distributed.barrier()\n local_path = PathManager.get_local_path(path)\n\n with open(local_path, \"rb\") as f:\n state = torch.load(f, map_location=torch.device(\"cpu\"))\n\n if \"args\" in state and state[\"args\"] is not None and arg_overrides is not None:\n args = state[\"args\"]\n for arg_name, arg_val in arg_overrides.items():\n setattr(args, arg_name, arg_val)\n\n if \"cfg\" in state and state[\"cfg\"] is not None and arg_overrides is not None:\n overwrite_args_by_name(state[\"cfg\"], arg_overrides)\n\n state = _upgrade_state_dict(state)\n return state\n\n\ndef load_model_ensemble(\n filenames,\n arg_overrides: Optional[Dict[str, Any]] = None,\n task=None,\n strict=True,\n suffix=\"\",\n num_shards=1,\n state=None,\n):\n \"\"\"Loads an ensemble of models.\n\n Args:\n filenames (List[str]): checkpoint files to load\n arg_overrides (Dict[str,Any], optional): override model args that\n were used during model training\n task (fairseq.tasks.FairseqTask, optional): task to use for loading\n \"\"\"\n assert not (\n strict and num_shards > 1\n ), \"Cannot load state dict with strict=True and checkpoint shards > 1\"\n ensemble, args, _task = load_model_ensemble_and_task(\n filenames,\n arg_overrides,\n task,\n strict,\n suffix,\n num_shards,\n state,\n )\n return ensemble, args\n\n\ndef load_model_ensemble_and_task(\n filenames,\n arg_overrides: Optional[Dict[str, Any]] = None,\n task=None,\n strict=True,\n suffix=\"\",\n num_shards=1,\n state=None,\n):\n assert state is None or len(filenames) == 1\n\n from fairseq import tasks\n\n assert not (\n strict and num_shards > 1\n ), \"Cannot load state dict with strict=True and checkpoint shards > 1\"\n ensemble = []\n cfg = None\n for filename in filenames:\n orig_filename = filename\n assert num_shards > 0\n for shard_idx in range(num_shards):\n if num_shards == 1:\n filename = filename.replace(\".pt\", suffix + \".pt\")\n else:\n filename = orig_filename[:-3] + f\"_part{shard_idx}.pt\"\n\n if not PathManager.exists(filename):\n raise IOError(\"Model file not found: {}\".format(filename))\n if state is None:\n state = load_checkpoint_to_cpu(filename, arg_overrides)\n if \"args\" in state and state[\"args\"] is not None:\n cfg = convert_namespace_to_omegaconf(state[\"args\"])\n elif \"cfg\" in state and state[\"cfg\"] is not None:\n cfg = state[\"cfg\"]\n else:\n raise RuntimeError(\n f\"Neither args nor cfg exist in state keys = {state.keys()}\"\n )\n\n if task is None:\n task = tasks.setup_task(cfg.task)\n\n # build model for ensemble\n model = task.build_model(cfg.model)\n\n model.load_state_dict(state[\"model\"], strict=strict, model_cfg=cfg.model)\n\n # reset state so it gets loaded for the next model in ensemble\n state = None\n\n ensemble.append(model)\n return ensemble, cfg, task\n\n\ndef checkpoint_paths(path, pattern=r\"checkpoint(\\d+)\\.pt\"):\n \"\"\"Retrieves all checkpoints found in `path` directory.\n\n Checkpoints are identified by matching filename to the specified pattern. If\n the pattern contains groups, the result will be sorted by the first group in\n descending order.\n \"\"\"\n pt_regexp = re.compile(pattern)\n files = os.listdir(path)\n\n entries = []\n for i, f in enumerate(files):\n m = pt_regexp.fullmatch(f)\n if m is not None:\n idx = float(m.group(1)) if len(m.groups()) > 0 else i\n entries.append((idx, m.group(0)))\n return [os.path.join(path, x[1]) for x in sorted(entries, reverse=True)]\n\n\ndef torch_persistent_save(obj, f):\n if isinstance(f, str):\n with PathManager.open(f, \"wb\") as h:\n torch_persistent_save(obj, h)\n return\n for i in range(3):\n try:\n return torch.save(obj, f)\n except Exception:\n if i == 2:\n logger.error(traceback.format_exc())\n\n\ndef save_state(\n filename,\n cfg: FairseqConfig,\n model_state_dict,\n criterion,\n optimizer,\n lr_scheduler,\n num_updates,\n optim_history=None,\n extra_state=None,\n **kwargs,\n):\n from fairseq import utils\n\n if optim_history is None:\n optim_history = []\n if extra_state is None:\n extra_state = {}\n state_dict = {\n \"cfg\": cfg,\n \"args\": kwargs.get(\"args\", None),\n \"model\": model_state_dict or {},\n \"optimizer_history\": optim_history\n + [\n {\n \"criterion_name\": criterion.__class__.__name__,\n \"optimizer_name\": optimizer.__class__.__name__,\n \"lr_scheduler_state\": lr_scheduler.state_dict(),\n \"num_updates\": num_updates,\n }\n ],\n \"extra_state\": extra_state,\n }\n if utils.has_parameters(criterion):\n state_dict[\"criterion\"] = criterion.state_dict()\n\n if cfg is None:\n cfg = state_dict[\"args\"]\n assert cfg is not None, \"must provide cfg or args\"\n\n if isinstance(cfg, DictConfig):\n no_save_optimizer_state = cfg.checkpoint.no_save_optimizer_state\n else:\n no_save_optimizer_state = cfg.no_save_optimizer_state\n if not no_save_optimizer_state:\n state_dict[\"last_optimizer_state\"] = optimizer.state_dict()\n\n # keep everything on CPU\n state_dict = utils.move_to_cpu(state_dict)\n\n if PathManager.supports_rename(filename):\n # do atomic save\n with PathManager.open(filename + \".tmp\", \"wb\") as f:\n torch_persistent_save(state_dict, f)\n PathManager.rename(filename + \".tmp\", filename)\n else:\n # fallback to non-atomic save\n with PathManager.open(filename, \"wb\") as f:\n torch_persistent_save(state_dict, f)\n\n\ndef _upgrade_state_dict(state):\n \"\"\"Helper for upgrading old model checkpoints.\"\"\"\n from fairseq import models, registry, tasks\n\n # add optimizer_history\n if \"optimizer_history\" not in state:\n state[\"optimizer_history\"] = [\n {\"criterion_name\": \"CrossEntropyCriterion\", \"best_loss\": state[\"best_loss\"]}\n ]\n state[\"last_optimizer_state\"] = state[\"optimizer\"]\n del state[\"optimizer\"]\n del state[\"best_loss\"]\n # move extra_state into sub-dictionary\n if \"epoch\" in state and \"extra_state\" not in state:\n state[\"extra_state\"] = {\n \"epoch\": state[\"epoch\"],\n \"batch_offset\": state[\"batch_offset\"],\n \"val_loss\": state[\"val_loss\"],\n }\n del state[\"epoch\"]\n del state[\"batch_offset\"]\n del state[\"val_loss\"]\n # reduce optimizer history's memory usage (only keep the last state)\n if \"optimizer\" in state[\"optimizer_history\"][-1]:\n state[\"last_optimizer_state\"] = state[\"optimizer_history\"][-1][\"optimizer\"]\n for optim_hist in state[\"optimizer_history\"]:\n del optim_hist[\"optimizer\"]\n # record the optimizer class name\n if \"optimizer_name\" not in state[\"optimizer_history\"][-1]:\n state[\"optimizer_history\"][-1][\"optimizer_name\"] = \"FairseqNAG\"\n # move best_loss into lr_scheduler_state\n if \"lr_scheduler_state\" not in state[\"optimizer_history\"][-1]:\n state[\"optimizer_history\"][-1][\"lr_scheduler_state\"] = {\n \"best\": state[\"optimizer_history\"][-1][\"best_loss\"]\n }\n del state[\"optimizer_history\"][-1][\"best_loss\"]\n # keep track of number of updates\n if \"num_updates\" not in state[\"optimizer_history\"][-1]:\n state[\"optimizer_history\"][-1][\"num_updates\"] = 0\n # old model checkpoints may not have separate source/target positions\n if hasattr(state[\"args\"], \"max_positions\") and not hasattr(\n state[\"args\"], \"max_source_positions\"\n ):\n state[\"args\"].max_source_positions = state[\"args\"].max_positions\n state[\"args\"].max_target_positions = state[\"args\"].max_positions\n # use stateful training data iterator\n if \"train_iterator\" not in state[\"extra_state\"]:\n state[\"extra_state\"][\"train_iterator\"] = {\n \"epoch\": state[\"extra_state\"][\"epoch\"],\n \"iterations_in_epoch\": state[\"extra_state\"].get(\"batch_offset\", 0),\n }\n\n # backward compatibility, cfg updates\n if \"args\" in state and state[\"args\"] is not None:\n # default to translation task\n if not hasattr(state[\"args\"], \"task\"):\n state[\"args\"].task = \"translation\"\n # --raw-text and --lazy-load are deprecated\n if getattr(state[\"args\"], \"raw_text\", False):\n state[\"args\"].dataset_impl = \"raw\"\n elif getattr(state[\"args\"], \"lazy_load\", False):\n state[\"args\"].dataset_impl = \"lazy\"\n # epochs start at 1\n if state[\"extra_state\"][\"train_iterator\"] is not None:\n state[\"extra_state\"][\"train_iterator\"][\"epoch\"] = max(\n state[\"extra_state\"][\"train_iterator\"].get(\"epoch\", 1), 1\n )\n # --remove-bpe ==> --postprocess\n if hasattr(state[\"args\"], \"remove_bpe\"):\n state[\"args\"].post_process = state[\"args\"].remove_bpe\n # --min-lr ==> --stop-min-lr\n if hasattr(state[\"args\"], \"min_lr\"):\n state[\"args\"].stop_min_lr = state[\"args\"].min_lr\n del state[\"args\"].min_lr\n # binary_cross_entropy => wav2vec criterion\n if (\n hasattr(state[\"args\"], \"criterion\")\n and state[\"args\"].criterion == \"binary_cross_entropy\"\n ):\n state[\"args\"].criterion = \"wav2vec\"\n # speech_pretraining => audio pretraining\n if (\n hasattr(state[\"args\"], \"task\")\n and state[\"args\"].task == \"speech_pretraining\"\n ):\n state[\"args\"].task = \"audio_pretraining\"\n # audio_cpc => wav2vec\n if hasattr(state[\"args\"], \"arch\") and state[\"args\"].arch == \"audio_cpc\":\n state[\"args\"].arch = \"wav2vec\"\n # convert legacy float learning rate to List[float]\n if hasattr(state[\"args\"], \"lr\") and isinstance(state[\"args\"].lr, float):\n state[\"args\"].lr = [state[\"args\"].lr]\n\n state[\"cfg\"] = convert_namespace_to_omegaconf(state[\"args\"])\n\n if \"cfg\" in state and state[\"cfg\"] is not None:\n with open_dict(state[\"cfg\"]):\n # any upgrades for Hydra-based configs\n pass\n\n return state\n\n\ndef prune_state_dict(state_dict, model_cfg: Optional[DictConfig]):\n \"\"\"Prune the given state_dict if desired for LayerDrop\n (https://arxiv.org/abs/1909.11556).\n\n Training with LayerDrop allows models to be robust to pruning at inference\n time. This function prunes state_dict to allow smaller models to be loaded\n from a larger model and re-maps the existing state_dict for this to occur.\n\n It's called by functions that load models from checkpoints and does not\n need to be called directly.\n \"\"\"\n arch = None\n if model_cfg is not None:\n arch = (\n model_cfg._name\n if isinstance(model_cfg, DictConfig)\n else getattr(model_cfg, \"arch\", None)\n )\n\n if not model_cfg or arch is None or arch == \"ptt_transformer\":\n # args should not be none, but don't crash if it is.\n return state_dict\n\n encoder_layers_to_keep = getattr(model_cfg, \"encoder_layers_to_keep\", None)\n decoder_layers_to_keep = getattr(model_cfg, \"decoder_layers_to_keep\", None)\n\n if not encoder_layers_to_keep and not decoder_layers_to_keep:\n return state_dict\n\n # apply pruning\n logger.info(\n \"Pruning model to specified layer configuration - this works best if the model was trained with LayerDrop\"\n )\n\n def create_pruning_pass(layers_to_keep, layer_name):\n keep_layers = sorted(\n int(layer_string) for layer_string in layers_to_keep.split(\",\")\n )\n mapping_dict = {}\n for i in range(len(keep_layers)):\n mapping_dict[str(keep_layers[i])] = str(i)\n\n regex = re.compile(r\"^{layer}.*\\.layers\\.(\\d+)\".format(layer=layer_name))\n return {\"substitution_regex\": regex, \"mapping_dict\": mapping_dict}\n\n pruning_passes = []\n if encoder_layers_to_keep:\n pruning_passes.append(create_pruning_pass(encoder_layers_to_keep, \"encoder\"))\n if decoder_layers_to_keep:\n pruning_passes.append(create_pruning_pass(decoder_layers_to_keep, \"decoder\"))\n\n new_state_dict = {}\n for layer_name in state_dict.keys():\n match = re.search(r\"\\.layers\\.(\\d+)\\.\", layer_name)\n # if layer has no number in it, it is a supporting layer, such as an\n # embedding\n if not match:\n new_state_dict[layer_name] = state_dict[layer_name]\n continue\n\n # otherwise, layer should be pruned.\n original_layer_number = match.group(1)\n # figure out which mapping dict to replace from\n for pruning_pass in pruning_passes:\n if original_layer_number in pruning_pass[\"mapping_dict\"] and pruning_pass[\n \"substitution_regex\"\n ].search(layer_name):\n new_layer_number = pruning_pass[\"mapping_dict\"][original_layer_number]\n substitution_match = pruning_pass[\"substitution_regex\"].search(\n layer_name\n )\n new_state_key = (\n layer_name[: substitution_match.start(1)]\n + new_layer_number\n + layer_name[substitution_match.end(1) :]\n )\n new_state_dict[new_state_key] = state_dict[layer_name]\n\n # Since layers are now pruned, *_layers_to_keep are no longer needed.\n # This is more of \"It would make it work fix\" rather than a proper fix.\n if isinstance(model_cfg, DictConfig):\n context = open_dict(model_cfg)\n else:\n context = contextlib.ExitStack()\n with context:\n if hasattr(model_cfg, \"encoder_layers_to_keep\"):\n model_cfg.encoder_layers_to_keep = None\n if hasattr(model_cfg, \"decoder_layers_to_keep\"):\n model_cfg.decoder_layers_to_keep = None\n\n return new_state_dict\n\n\ndef load_pretrained_component_from_model(\n component: Union[FairseqEncoder, FairseqDecoder], checkpoint: str\n):\n \"\"\"\n Load a pretrained FairseqEncoder or FairseqDecoder from checkpoint into the\n provided `component` object. If state_dict fails to load, there may be a\n mismatch in the architecture of the corresponding `component` found in the\n `checkpoint` file.\n \"\"\"\n if not PathManager.exists(checkpoint):\n raise IOError(\"Model file not found: {}\".format(checkpoint))\n state = load_checkpoint_to_cpu(checkpoint)\n if isinstance(component, FairseqEncoder):\n component_type = \"encoder\"\n elif isinstance(component, FairseqDecoder):\n component_type = \"decoder\"\n else:\n raise ValueError(\n \"component to load must be either a FairseqEncoder or \"\n \"FairseqDecoder. Loading other component types are not supported.\"\n )\n component_state_dict = OrderedDict()\n for key in state[\"model\"].keys():\n if key.startswith(component_type):\n # encoder.input_layers.0.0.weight --> input_layers.0.0.weight\n component_subkey = key[len(component_type) + 1 :]\n component_state_dict[component_subkey] = state[\"model\"][key]\n component.load_state_dict(component_state_dict, strict=True)\n return component\n\n\ndef verify_checkpoint_directory(save_dir: str) -> None:\n if not os.path.exists(save_dir):\n os.makedirs(save_dir, exist_ok=True)\n temp_file_path = os.path.join(save_dir, \"dummy\")\n try:\n with open(temp_file_path, \"w\"):\n pass\n except OSError as e:\n logger.warning(\n \"Unable to access checkpoint save directory: {}\".format(save_dir)\n )\n raise e\n else:\n os.remove(temp_file_path)\n", "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport inspect\nimport logging\nimport os\nimport signal\nimport threading\n\nimport torch\nimport torch.nn as nn\n\nfrom fairseq import distributed_utils\nfrom fairseq.legacy_distributed_data_parallel import LegacyDistributedDataParallel\n\n\nlogger = logging.getLogger(__name__)\n\n\n_GOSSIP_DISABLED = False\ntry:\n import gossip\nexcept ImportError:\n _GOSSIP_DISABLED = True\n\n\ndef DistributedFairseqModel(args, model, process_group):\n \"\"\"\n Wrap a *model* to support distributed data parallel training.\n\n This is similar to the built-in DistributedDataParallel, but allows\n additional configuration of the DistributedDataParallel class to\n use, and also provides easier access to the wrapped model by\n forwarding requests for missing attributes to the wrapped model.\n\n Args:\n args (argparse.Namespace): fairseq args\n model (BaseFairseqModel): model to wrap\n process_group: the c10d process group to be used for distributed data\n parallel all-reduction.\n \"\"\"\n # determine which DDP class to extend\n assert isinstance(model, nn.Module)\n if args.tpu:\n ddp_class = TPUDistributedDataParallel\n init_kwargs = dict(\n module=model,\n process_group=process_group,\n )\n elif args.distributed_wrapper == \"DDP\" and args.ddp_backend == \"c10d\":\n ddp_class = nn.parallel.DistributedDataParallel\n init_kwargs = dict(\n module=model,\n device_ids=[args.device_id],\n output_device=args.device_id,\n broadcast_buffers=args.broadcast_buffers,\n bucket_cap_mb=args.bucket_cap_mb,\n process_group=process_group,\n )\n # Maintain backward compatibility\n if \"find_unused_parameters\" in inspect.getargspec(ddp_class)[0]:\n init_kwargs[\"find_unused_parameters\"] = args.find_unused_parameters\n elif args.distributed_wrapper == \"DDP\" and args.ddp_backend == \"no_c10d\":\n ddp_class = LegacyDistributedDataParallel\n init_kwargs = dict(\n module=model,\n buffer_size=2 ** 28,\n process_group=process_group,\n )\n elif args.distributed_wrapper == \"SlowMo\":\n if _GOSSIP_DISABLED:\n raise ImportError(\n \"Cannot find gossip library. Please install from: \"\n \"github.com/facebookresearch/stochastic_gradient_push\"\n )\n ddp_class = gossip.GossipDataParallel\n\n # The values of slowmo_momentum below were obtained by tuning on the\n # En-De 16 dataset by training the transformer_wmt_en_de_large model\n if args.slowmo_momentum is None:\n if args.distributed_world_size <= 16:\n args.slowmo_momentum = 0.0\n elif args.distributed_world_size <= 32:\n args.slowmo_momentum = 0.2\n elif args.distributed_world_size <= 64:\n args.slowmo_momentum = 0.5\n else:\n args.slowmo_momentum = 0.6\n\n init_kwargs = dict(\n module=model,\n device_ids=[args.device_id],\n output_device=args.device_id,\n broadcast_buffers=args.broadcast_buffers,\n nprocs_per_node=args.nprocs_per_node,\n slowmo_momentum=args.slowmo_momentum,\n localsgd=(args.slowmo_algorithm == \"LocalSGD\"),\n localsgd_frequency=args.localsgd_frequency,\n )\n else:\n raise ValueError(\"Unknown --ddp-backend: \" + args.ddp_backend)\n\n heartbeat_timeout = getattr(args, \"heartbeat_timeout\", -1)\n\n class _DistributedFairseqModel(ddp_class):\n \"\"\"\n Extend DistributedDataParallel to check for missing attributes in the\n wrapped module and to add a timeout to kill the job if no progress is\n made (--heartbeat-timeout).\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self._heartbeat_timeout = heartbeat_timeout\n if self._heartbeat_timeout > 0:\n self._heartbeat = threading.Event()\n self._heartbeat_thread = threading.Thread(\n target=self._check_heartbeat,\n args=(os.getpid(),),\n daemon=True,\n )\n self._heartbeat_thread.start()\n else:\n self._heartbeat = None\n\n def _check_heartbeat(self, parent_pid):\n self._heartbeat.wait() # wait for the first forward pass\n while True:\n self._heartbeat.clear()\n success = self._heartbeat.wait(timeout=self._heartbeat_timeout)\n if not success:\n logger.error((\n \"Killing job for not making progress in {} seconds. \"\n \"Set --heartbeat-timeout=-1 to disable this timeout.\"\n ).format(int(self._heartbeat_timeout)))\n os.kill(parent_pid, signal.SIGKILL)\n return\n\n def __getattr__(self, name):\n wrapped_module = super().__getattr__(\"module\")\n if hasattr(wrapped_module, name):\n return getattr(wrapped_module, name)\n return super().__getattr__(name)\n\n def forward(self, *args, **kwargs):\n if self._heartbeat is not None:\n self._heartbeat.set()\n return super().forward(*args, **kwargs)\n\n return _DistributedFairseqModel(**init_kwargs)\n\n\nclass TPUDistributedDataParallel(nn.Module):\n\n def __init__(self, module, process_group):\n super().__init__()\n self.module = module\n self.process_group = process_group\n self.world_size = distributed_utils.get_world_size(self.process_group)\n\n def forward(self, *inputs, **kwargs):\n return self.module(*inputs, **kwargs)\n\n def all_reduce_grads(self):\n gradients = []\n for p in self.parameters():\n if not p.requires_grad:\n continue\n if p.grad is None:\n p.grad = torch.zeros_like(p)\n if p.grad.requires_grad:\n raise RuntimeError(\n \"TPUDistributedDataParallel only works with gradients that don't \"\n \"require grad\"\n )\n gradients.append(p.grad)\n\n import torch_xla.core.xla_model as xm\n xm.all_reduce(\n 'sum',\n gradients,\n scale=1. / self.world_size,\n groups=self.process_group[1],\n )\n" ]
[ [ "numpy.hstack", "torch.cat", "torch.distributed.is_initialized", "numpy.cumsum", "numpy.argmax", "torch.cuda.is_available", "numpy.argsort", "numpy.array", "torch.DoubleTensor", "numpy.vstack" ], [ "torch.nn.modules.utils._single", "torch.Tensor" ], [ "torch.device", "torch.distributed.barrier", "torch.save" ], [ "torch.zeros_like" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
jihyunbak/rec_to_nwb
[ "6e65f8bf0a4faa4d986483ec2442ba19d70c92a9", "6e65f8bf0a4faa4d986483ec2442ba19d70c92a9" ]
[ "rec_to_nwb/processing/nwb/components/position/time/invalid/fl_pos_invalid_time_manager.py", "rec_to_nwb/test/processing/mda/time/valid/test_flMdaValidTimeManager.py" ]
[ "import numpy as np\nfrom pynwb import NWBFile\n\nfrom rec_to_nwb.processing.exceptions.missing_data_exception import MissingDataException\nfrom rec_to_nwb.processing.nwb.components.position.time.invalid.fl_pos_invalid_time_builder import \\\n FlPosInvalidTimeBuilder\nfrom rec_to_nwb.processing.tools.beartype.beartype import beartype\nfrom rec_to_nwb.processing.tools.get_times_period_multiplier import get_times_period_multiplier\n\n\nclass FlPosInvalidTimeManager:\n \"\"\"\" Manage POS data and call FLPosInvalidTimeBuilder to create list of FLPosInvalidTime objects.\n\n Args:\n metadata (dict): Project metadata\n\n Methods:\n get_fl_pos_invalid_times()\n \"\"\"\n\n @beartype\n def __init__(self, metadata: dict):\n self.period_multiplier = get_times_period_multiplier(metadata)\n\n @beartype\n def get_fl_pos_invalid_times(self, nwb_content: NWBFile, gaps_margin: float = 0.000001) -> list:\n \"\"\" Manage POS data and call FlPosInvalidTimeBuilder for every invalid gap.\n\n Args:\n nwb_content (NWBFile): NWBFile object with MDA timestamps inside\n gaps_margin (float): Error margin for invalid gaps\n\n Raises:\n MissingDataException: If timestamps are empty\n\n Returns:\n list of FlPosInvalidTime objects\n \"\"\"\n\n timestamps = self.__get_pos_timestamps(nwb_content)\n pos_period = self.__calculate_pos_period(timestamps)\n invalid_times = self.__get_pos_invalid_times(timestamps, pos_period, gaps_margin)\n return self.__build_pos_invalid_times(invalid_times)\n\n @staticmethod\n def __get_pos_timestamps(nwb_content):\n timestamps = [\n np.array(spatial_series.timestamps)\n for spatial_series in\n nwb_content.processing['behavior'].data_interfaces['position'].spatial_series.values()\n ]\n timestamp = np.hstack(timestamps)\n\n if timestamp.any():\n return timestamp\n raise MissingDataException('POS timestamps are not found!')\n\n @staticmethod\n def __calculate_pos_period(timestamps):\n number_of_invalid_records_at_start_of_a_file = 0\n number_of_invalid_records_at_end_of_a_file = 0\n\n first_timestamp = timestamps[0]\n last_timestamp = timestamps[-1]\n\n len_of_timestamps = len(timestamps)\n while not first_timestamp >= 0:\n number_of_invalid_records_at_start_of_a_file += 1\n first_timestamp = timestamps[number_of_invalid_records_at_start_of_a_file]\n while not last_timestamp >= 0:\n number_of_invalid_records_at_end_of_a_file += 1\n last_timestamp = timestamps[(-1 - number_of_invalid_records_at_end_of_a_file)]\n return (last_timestamp - first_timestamp) / \\\n (len_of_timestamps - number_of_invalid_records_at_end_of_a_file -\n number_of_invalid_records_at_start_of_a_file)\n\n def __get_pos_invalid_times(self, timestamps, period, gaps_margin):\n min_valid_len = 3 * gaps_margin\n valid_times = self.__get_pos_valid_times(timestamps, period, gaps_margin)\n\n start_times = np.append(np.asarray(timestamps[0] + gaps_margin), (valid_times[:, 1] + 2 * gaps_margin))\n stop_times = np.append(valid_times[:, 0] - 2 * gaps_margin, np.asarray(timestamps[-1] - gaps_margin))\n\n invalid_times = (np.vstack([start_times, stop_times])).transpose()\n invalid_intervals = [invalid_time > min_valid_len for invalid_time in invalid_times[:, 1] - invalid_times[:, 0]]\n\n return invalid_times[invalid_intervals, :]\n\n def __get_pos_valid_times(self, timestamps, period, gaps_margin):\n min_valid_len = 3 * gaps_margin\n timestamps = timestamps[~np.isnan(timestamps)]\n\n gaps = np.diff(timestamps) > period * self.period_multiplier\n gap_indexes = np.asarray(np.where(gaps))\n gap_start = np.insert(gap_indexes + 1, 0, 0)\n gap_end = np.append(gap_indexes, np.asarray(len(timestamps) - 1))\n\n valid_indices = np.vstack([gap_start, gap_end]).transpose()\n valid_times = timestamps[valid_indices]\n valid_times[:, 0] = valid_times[:, 0] + gaps_margin\n valid_times[:, 1] = valid_times[:, 1] - gaps_margin\n valid_intervals = [valid_time > min_valid_len for valid_time in valid_times[:, 1] - valid_times[:, 0]]\n return valid_times[valid_intervals, :]\n\n @staticmethod\n def __build_pos_invalid_times(invalid_times):\n return [FlPosInvalidTimeBuilder.build(gap[0], gap[1]) for gap in invalid_times]\n", "from unittest import TestCase\nfrom unittest.mock import MagicMock\n\nimport numpy as np\nfrom pynwb import NWBFile\nfrom testfixtures import should_raise\n\nfrom rec_to_nwb.processing.exceptions.missing_data_exception import MissingDataException\nfrom rec_to_nwb.processing.nwb.components.mda.time.valid.fl_mda_valid_time_manager import FlMdaValidTimeManager\n\n\nclass TestMdaValidTimeManager(TestCase):\n\n def test_fl_mda_valid_time_manager_not_initialized_due_to_None_param(self):\n with self.assertRaises(TypeError):\n FlMdaValidTimeManager(None)\n\n def test_fl_mda_valid_time_manager_get_fl_mda_valid_times_with_gap_in_middle(self):\n sampling_rate = 1.0\n gaps_margin = 0.0001\n mock_array = np.ndarray(dtype='float', shape=[10,])\n array = [1, 2, 3, 4, 5, 7, 9, 10, 11, 12]\n for i, number in enumerate(array):\n mock_array[i] = number\n mock_nwb = MagicMock(spec=NWBFile)\n mock_nwb.acquisition['e-series'].timestamps = mock_array\n mock_metadata = {}\n\n fl_mda_valid_time_manager = FlMdaValidTimeManager(sampling_rate, mock_metadata)\n fl_mda_valid_times = fl_mda_valid_time_manager.get_fl_mda_valid_times(\n nwb_content=mock_nwb,\n gaps_margin=gaps_margin\n )\n\n self.assertEqual(len(fl_mda_valid_times), 2)\n self.assertEqual(round(fl_mda_valid_times[0].start_time, 4), 1.0001)\n self.assertEqual(round(fl_mda_valid_times[0].stop_time, 4), 4.9999)\n self.assertEqual(round(fl_mda_valid_times[1].start_time, 4), 9.0001)\n self.assertEqual(round(fl_mda_valid_times[1].stop_time, 4), 11.9999)\n\n def test_fl_mda_valid_time_manager_get_fl_mda_valid_times_without_gap(self):\n sampling_rate = 1.0\n gaps_margin = 0.0001\n mock_array = np.ndarray(dtype='float', shape=[10,])\n array = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n for i, number in enumerate(array):\n mock_array[i] = number\n mock_nwb = MagicMock(spec=NWBFile)\n mock_nwb.acquisition['e-series'].timestamps = mock_array\n mock_metadata = {'times_period_multiplier': 1.5}\n\n fl_mda_valid_time_manager = FlMdaValidTimeManager(sampling_rate, mock_metadata)\n fl_mda_valid_times = fl_mda_valid_time_manager.get_fl_mda_valid_times(\n nwb_content=mock_nwb,\n gaps_margin=gaps_margin\n )\n\n self.assertEqual(len(fl_mda_valid_times), 1)\n self.assertEqual(round(fl_mda_valid_times[0].start_time, 4), 1.0001)\n self.assertEqual(round(fl_mda_valid_times[0].stop_time, 4), 9.9999)\n\n def test_fl_mda_valid_time_manager_get_fl_mda_valid_times_with_gap_at_start(self):\n sampling_rate = 1.0\n gaps_margin = 0.0001\n mock_array = np.ndarray(dtype='float', shape=[10,])\n array = [1, 3, 5, 6, 7, 8, 9, 10, 11, 12]\n for i, number in enumerate(array):\n mock_array[i] = number\n mock_nwb = MagicMock(spec=NWBFile)\n mock_nwb.acquisition['e-series'].timestamps = mock_array\n mock_metadata = {'times_period_multiplier': 1.5}\n\n fl_mda_valid_time_manager = FlMdaValidTimeManager(sampling_rate, mock_metadata)\n fl_mda_valid_times = fl_mda_valid_time_manager.get_fl_mda_valid_times(\n nwb_content=mock_nwb,\n gaps_margin=gaps_margin\n )\n\n self.assertEqual(len(fl_mda_valid_times), 1)\n self.assertEqual(round(fl_mda_valid_times[0].start_time, 4), 5.0001)\n self.assertEqual(round(fl_mda_valid_times[0].stop_time, 4), 11.9999)\n\n def test_fl_mda_valid_time_manager_get_fl_mda_valid_times_with_gap_at_end(self):\n sampling_rate = 1.0\n gaps_margin = 0.0001\n mock_array = np.ndarray(dtype='float', shape=[10, ])\n array = [1, 2, 3, 4, 5, 6, 7, 8, 10, 12]\n for i, number in enumerate(array):\n mock_array[i] = number\n mock_nwb = MagicMock(spec=NWBFile)\n mock_nwb.acquisition['e-series'].timestamps = mock_array\n mock_metadata = {'times_period_multiplier': 1.5}\n\n fl_mda_valid_time_manager = FlMdaValidTimeManager(sampling_rate, mock_metadata)\n fl_mda_valid_times = fl_mda_valid_time_manager.get_fl_mda_valid_times(\n nwb_content=mock_nwb,\n gaps_margin=gaps_margin\n )\n\n self.assertEqual(len(fl_mda_valid_times), 1)\n self.assertEqual(round(fl_mda_valid_times[0].start_time, 4), 1.0001)\n self.assertEqual(round(fl_mda_valid_times[0].stop_time, 4), 7.9999)\n\n @should_raise(TypeError)\n def test_fl_mda_valid_time_manager_get_fl_mda_valid_times_failed_due_to_None_param(self):\n gaps_margin = 0.0001\n sampling_rate = 1.0\n mock_metadata = {'times_period_multiplier': 1.5}\n\n fl_mda_valid_time_manager = FlMdaValidTimeManager(sampling_rate, mock_metadata)\n fl_mda_valid_time_manager.get_fl_mda_valid_times(\n nwb_content=None,\n gaps_margin=gaps_margin\n )\n\n @should_raise(MissingDataException)\n def test_fl_mda_valid_time_manager_get_fl_mda_valid_times_failed_due_to_lack_of_timestamps(self):\n gaps_margin = 0.0001\n sampling_rate = 1.0\n mock_nwb = MagicMock(spec=NWBFile)\n mock_nwb.acquisition['e-series'].timestamps = None\n mock_metadata = {'times_period_multiplier': 1.5}\n\n fl_mda_valid_time_manager = FlMdaValidTimeManager(sampling_rate, mock_metadata)\n fl_mda_valid_time_manager.get_fl_mda_valid_times(\n nwb_content=mock_nwb,\n gaps_margin=gaps_margin\n )" ]
[ [ "numpy.hstack", "numpy.asarray", "numpy.isnan", "numpy.diff", "numpy.insert", "numpy.array", "numpy.where", "numpy.vstack" ], [ "numpy.ndarray" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
AHEAD-IntelligentCamera/deep-person-reid
[ "a412f8e11f54f78f7bbef9e60dbb2159eed09917" ]
[ "torchreid/data/transforms.py" ]
[ "from __future__ import division, print_function, absolute_import\nimport math\nimport random\nfrom collections import deque\nimport torch\nfrom PIL import Image\nfrom torchvision.transforms import (\n Resize, Compose, ToTensor, Normalize, ColorJitter, RandomHorizontalFlip, Pad\n)\n\n\nclass Random2DTranslation(object):\n \"\"\"Randomly translates the input image with a probability.\n\n Specifically, given a predefined shape (height, width), the input is first\n resized with a factor of 1.125, leading to (height*1.125, width*1.125), then\n a random crop is performed. Such operation is done with a probability.\n\n Args:\n height (int): target image height.\n width (int): target image width.\n p (float, optional): probability that this operation takes place.\n Default is 0.5.\n interpolation (int, optional): desired interpolation. Default is\n ``PIL.Image.BILINEAR``\n \"\"\"\n\n def __init__(self, height, width, p=0.5, interpolation=Image.BILINEAR):\n self.height = height\n self.width = width\n self.p = p\n self.interpolation = interpolation\n\n def __call__(self, img):\n if random.uniform(0, 1) > self.p:\n return img.resize((self.width, self.height), self.interpolation)\n\n new_width, new_height = int(round(self.width * 1.125)\n ), int(round(self.height * 1.125))\n resized_img = img.resize((new_width, new_height), self.interpolation)\n x_maxrange = new_width - self.width\n y_maxrange = new_height - self.height\n x1 = int(round(random.uniform(0, x_maxrange)))\n y1 = int(round(random.uniform(0, y_maxrange)))\n croped_img = resized_img.crop(\n (x1, y1, x1 + self.width, y1 + self.height)\n )\n return croped_img\n\n\nclass BatchConstantErasing(object):\n \"\"\"Randomly erases an image strip from entire batch.\n\n This is a hack - transforms the image after batching\n \"\"\"\n\n def __init__(\n self,\n probability=1,\n sl=0.1,\n sh=0.4,\n mean=[0.4914, 0.4822, 0.4465]\n ):\n self.probability = probability\n self.mean = mean\n self.sl = sl\n self.sh = sh\n\n def __call__(self, batch):\n if random.uniform(0, 1) > self.probability:\n return batch\n\n # Erase a strip\n h = random.uniform(self.sl, self.sh) * batch.size()[2]\n \n x1 = random.randint(0, batch.size()[2] - h)\n if batch.size()[1] == 3:\n batch[:, 0, x1:x1 + h, :] = self.mean[0]\n batch[:, 1, x1:x1 + h, :] = self.mean[1]\n batch[:, 2, x1:x1 + h, :] = self.mean[2]\n else:\n batch[:, 0, x1:x1 + h, :] = self.mean[0]\n return batch\n\n\nclass RandomErasing(object):\n \"\"\"Randomly erases an image patch.\n\n Origin: `<https://github.com/zhunzhong07/Random-Erasing>`_\n\n Reference:\n Zhong et al. Random Erasing Data Augmentation.\n\n Args:\n probability (float, optional): probability that this operation takes place.\n Default is 0.5.\n sl (float, optional): min erasing area.\n sh (float, optional): max erasing area.\n r1 (float, optional): min aspect ratio.\n mean (list, optional): erasing value.\n \"\"\"\n\n def __init__(\n self,\n probability=0.5,\n sl=0.02,\n sh=0.4,\n r1=0.3,\n mean=[0.4914, 0.4822, 0.4465]\n ):\n self.probability = probability\n self.mean = mean\n self.sl = sl\n self.sh = sh\n self.r1 = r1\n\n def __call__(self, img):\n if random.uniform(0, 1) > self.probability:\n return img\n\n for attempt in range(100):\n area = img.size()[1] * img.size()[2]\n\n target_area = random.uniform(self.sl, self.sh) * area\n aspect_ratio = random.uniform(self.r1, 1 / self.r1)\n\n h = int(round(math.sqrt(target_area * aspect_ratio)))\n w = int(round(math.sqrt(target_area / aspect_ratio)))\n\n if w < img.size()[2] and h < img.size()[1]:\n x1 = random.randint(0, img.size()[1] - h)\n y1 = random.randint(0, img.size()[2] - w)\n if img.size()[0] == 3:\n img[0, x1:x1 + h, y1:y1 + w] = self.mean[0]\n img[1, x1:x1 + h, y1:y1 + w] = self.mean[1]\n img[2, x1:x1 + h, y1:y1 + w] = self.mean[2]\n else:\n img[0, x1:x1 + h, y1:y1 + w] = self.mean[0]\n return img\n\n return img\n\n\nclass ColorAugmentation(object):\n \"\"\"Randomly alters the intensities of RGB channels.\n\n Reference:\n Krizhevsky et al. ImageNet Classification with Deep ConvolutionalNeural\n Networks. NIPS 2012.\n\n Args:\n p (float, optional): probability that this operation takes place.\n Default is 0.5.\n \"\"\"\n\n def __init__(self, p=0.5):\n self.p = p\n self.eig_vec = torch.Tensor(\n [\n [0.4009, 0.7192, -0.5675],\n [-0.8140, -0.0045, -0.5808],\n [0.4203, -0.6948, -0.5836],\n ]\n )\n self.eig_val = torch.Tensor([[0.2175, 0.0188, 0.0045]])\n\n def _check_input(self, tensor):\n assert tensor.dim() == 3 and tensor.size(0) == 3\n\n def __call__(self, tensor):\n if random.uniform(0, 1) > self.p:\n return tensor\n alpha = torch.normal(mean=torch.zeros_like(self.eig_val)) * 0.1\n quatity = torch.mm(self.eig_val * alpha, self.eig_vec)\n tensor = tensor + quatity.view(3, 1, 1)\n return tensor\n\n\nclass RandomPatch(object):\n \"\"\"Random patch data augmentation.\n\n There is a patch pool that stores randomly extracted pathces from person images.\n \n For each input image, RandomPatch\n 1) extracts a random patch and stores the patch in the patch pool;\n 2) randomly selects a patch from the patch pool and pastes it on the\n input (at random position) to simulate occlusion.\n\n Reference:\n - Zhou et al. Omni-Scale Feature Learning for Person Re-Identification. ICCV, 2019.\n - Zhou et al. Learning Generalisable Omni-Scale Representations\n for Person Re-Identification. TPAMI, 2021.\n \"\"\"\n\n def __init__(\n self,\n prob_happen=0.5,\n pool_capacity=50000,\n min_sample_size=100,\n patch_min_area=0.1,\n patch_max_area=0.6,\n patch_min_ratio=0.1,\n prob_rotate=0.5,\n prob_flip_leftright=0.5,\n ):\n self.prob_happen = prob_happen\n\n self.patch_min_area = patch_min_area\n self.patch_max_area = patch_max_area\n self.patch_min_ratio = patch_min_ratio\n\n self.prob_rotate = prob_rotate\n self.prob_flip_leftright = prob_flip_leftright\n\n self.patchpool = deque(maxlen=pool_capacity)\n self.min_sample_size = min_sample_size\n\n def generate_wh(self, W, H):\n area = W * H\n for attempt in range(100):\n target_area = random.uniform(\n self.patch_min_area, self.patch_max_area\n ) * area\n aspect_ratio = random.uniform(\n self.patch_min_ratio, 1. / self.patch_min_ratio\n )\n h = int(round(math.sqrt(target_area * aspect_ratio)))\n w = int(round(math.sqrt(target_area / aspect_ratio)))\n if w < W and h < H:\n return w, h\n return None, None\n\n def transform_patch(self, patch):\n if random.uniform(0, 1) > self.prob_flip_leftright:\n patch = patch.transpose(Image.FLIP_LEFT_RIGHT)\n if random.uniform(0, 1) > self.prob_rotate:\n patch = patch.rotate(random.randint(-10, 10))\n return patch\n\n def __call__(self, img):\n W, H = img.size # original image size\n\n # collect new patch\n w, h = self.generate_wh(W, H)\n if w is not None and h is not None:\n x1 = random.randint(0, W - w)\n y1 = random.randint(0, H - h)\n new_patch = img.crop((x1, y1, x1 + w, y1 + h))\n self.patchpool.append(new_patch)\n\n if len(self.patchpool) < self.min_sample_size:\n return img\n\n if random.uniform(0, 1) > self.prob_happen:\n return img\n\n # paste a randomly selected patch on a random position\n patch = random.sample(self.patchpool, 1)[0]\n patchW, patchH = patch.size\n x1 = random.randint(0, W - patchW)\n y1 = random.randint(0, H - patchH)\n patch = self.transform_patch(patch)\n img.paste(patch, (x1, y1))\n\n return img\n\n\ndef build_transforms(\n height,\n width,\n transforms='random_flip',\n norm_mean=[0.485, 0.456, 0.406],\n norm_std=[0.229, 0.224, 0.225],\n **kwargs\n):\n \"\"\"Builds train and test transform functions.\n\n Args:\n height (int): target image height.\n width (int): target image width.\n transforms (str or list of str, optional): transformations applied to model training.\n Default is 'random_flip'.\n norm_mean (list or None, optional): normalization mean values. Default is ImageNet means.\n norm_std (list or None, optional): normalization standard deviation values. Default is\n ImageNet standard deviation values.\n \"\"\"\n if transforms is None:\n transforms = []\n\n if isinstance(transforms, str):\n transforms = [transforms]\n\n if not isinstance(transforms, list):\n raise ValueError(\n 'transforms must be a list of strings, but found to be {}'.format(\n type(transforms)\n )\n )\n\n if len(transforms) > 0:\n transforms = [t.lower() for t in transforms]\n\n if norm_mean is None or norm_std is None:\n norm_mean = [0.485, 0.456, 0.406] # imagenet mean\n norm_std = [0.229, 0.224, 0.225] # imagenet std\n normalize = Normalize(mean=norm_mean, std=norm_std)\n\n print('Building train transforms ...')\n transform_tr = []\n\n print('+ resize to {}x{}'.format(height, width))\n transform_tr += [Resize((height, width))]\n\n if 'random_flip' in transforms:\n print('+ random flip')\n transform_tr += [RandomHorizontalFlip()]\n\n if 'random_crop' in transforms:\n print(\n '+ random crop (enlarge to {}x{} and '\n 'crop {}x{})'.format(\n int(round(height * 1.125)), int(round(width * 1.125)), height,\n width\n )\n )\n transform_tr += [Random2DTranslation(height, width)]\n\n if 'random_patch' in transforms:\n print('+ random patch')\n transform_tr += [RandomPatch()]\n\n if 'color_jitter' in transforms:\n print('+ color jitter')\n transform_tr += [\n ColorJitter(brightness=0.2, contrast=0.15, saturation=0, hue=0)\n ]\n\n if 'pad' in transforms:\n print('+ pad')\n transform_tr += [Pad(10)]\n\n print('+ to torch tensor of range [0, 1]')\n transform_tr += [ToTensor()]\n\n print('+ normalization (mean={}, std={})'.format(norm_mean, norm_std))\n transform_tr += [normalize]\n\n if 'random_erase' in transforms:\n print('+ random erase')\n transform_tr += [RandomErasing(mean=norm_mean)]\n\n transform_tr = Compose(transform_tr)\n\n print('Building test transforms ...')\n print('+ resize to {}x{}'.format(height, width))\n print('+ to torch tensor of range [0, 1]')\n print('+ normalization (mean={}, std={})'.format(norm_mean, norm_std))\n\n transform_te = Compose([\n Resize((height, width)),\n ToTensor(),\n normalize,\n ])\n\n return transform_tr, transform_te\n" ]
[ [ "torch.mm", "torch.zeros_like", "torch.Tensor" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
andreas-paul/map-of-mars
[ "71205dafdafccdeac76f05aff1ede5fb1911a5a2" ]
[ "main.py" ]
[ "#%%\nimport rasterio\nfrom pathlib import Path\nimport matplotlib.pyplot as plt\nfrom blend_modes import blending_functions\nfrom utils import get_dem_data, load_shapefile, crop_raster, save_raster, make_ramp\n\n\n\ndem_path: Path = Path(\"https://planetarymaps.usgs.gov/mosaic/Mars_MGS_MOLA_DEM_mosaic_global_463m.tif\")\ncropped_raster_path: Path = Path(\"data/RGB-byte-masked-mola-dem.tif\")\noutput_raster_path: Path = Path(\"data/mola_dem.tif\")\nshp_path: Path = Path(\"data/extent.shp\")\n\n\nget_dem_data(dem_path, output_raster_path)\nshapes = load_shapefile(shp_path)\nimg, transform, meta = crop_raster(output_raster_path, shapes)\nsave_raster(cropped_raster_path, meta, img)\n\ncm_reds = make_ramp(['#bd4628', '#c15033', '#c45a3f', '#c8644b', '#cc6e56', '#cf7962', \n '#d3836e', '#d78d7a', '#db9785', '#dea291', '#e2ac9d', '#e6b6a8', \n '#e9c0b4', '#edcac0', '#f1d5cc', '#f4dfd7', '#f8e9e3', '#fcf3ef', '#fffdfa'])\n\ncm_grays = make_ramp(['#000000', '#000000', '#FFFFFF'] )\n\nsrc = rasterio.open(\"data/RGB-byte-masked-mola-dem.tif\")\nplt.imshow(src.read(1), cmap=cm_reds)\nplt.axis('off')\nplt.Axes(fig, [0,0,1,1]) # Remove whitespace\n\nsrc = rasterio.open(\"data/RGB-byte-masked-mola-dem.tif\")\nplt.imshow(src.read(1), cmap=cm_grays)\nplt.axis('off')\nplt.Axes(fig, [0,0,1,1])\n" ]
[ [ "matplotlib.pyplot.Axes", "matplotlib.pyplot.axis" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ZSL98/ETBA
[ "618317698adb9e372fb11dc0c3a01f856e0759b0", "618317698adb9e372fb11dc0c3a01f856e0759b0", "618317698adb9e372fb11dc0c3a01f856e0759b0" ]
[ "Inference/src/recursive_placement/ocrnet_with_exit.py", "Inference/src/exit_placement/modules/waspVideo.py", "Inference/src/exit_placement/ocrnet_with_exit.py" ]
[ "##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n## Created by: Shulai Zhang\n## Microsoft Research\n## [email protected]\n## Copyright (c) 2021\n##\n## This source code is licensed under the MIT-style license found in the\n## LICENSE file in the root directory of this source tree \n##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\nimport pdb\nimport torch\nimport torch.nn as nn\nfrom torch.nn import functional as F\n\nimport sys\nsys.path.append(\"/home/slzhang/projects/ETBA/Train/openseg\")\nfrom lib.models.backbones.backbone_selector import BackboneSelector\nfrom lib.models.tools.module_helper import ModuleHelper\nfrom lib.models.backbones.resnet.resnet_backbone_with_exit import backbone_s1, backbone_s2\nfrom lib.models.backbones.resnet.resnet_models import ResNetModels, ResNet, Bottleneck\nfrom lib.models.backbones.resnet.resnet_backbone import DilatedResnetBackbone\n\n\nclass SpatialOCRNet_s1(nn.Module):\n \"\"\"\n Object-Contextual Representations for Semantic Segmentation,\n Yuan, Yuhui and Chen, Xilin and Wang, Jingdong\n \"\"\"\n def __init__(self, split_point):\n self.inplanes = 128\n super(SpatialOCRNet_s1, self).__init__()\n self.num_classes = 19\n self.backbone_s1 = backbone_s1(start_point=split_point, end_point=split_point)\n # extra added layers\n in_channels = [1024, 2048]\n self.conv_3x3_s1 = nn.Sequential(\n nn.Conv2d(in_channels[1], 512, kernel_size=3, stride=1, padding=1),\n ModuleHelper.BNReLU(512, bn_type=\"torchbn\"),\n )\n\n from lib.models.modules.spatial_ocr_block import SpatialGather_Module, SpatialOCR_Module\n self.spatial_context_head_s1 = SpatialGather_Module(self.num_classes)\n self.spatial_ocr_head_s1 = SpatialOCR_Module(in_channels=512, \n key_channels=256, \n out_channels=512,\n scale=1,\n dropout=0.05, \n bn_type=\"torchbn\")\n\n self.head_s1 = nn.Conv2d(512, self.num_classes, kernel_size=1, stride=1, padding=0, bias=True)\n self.dsn_head_s1 = nn.Sequential(\n nn.Conv2d(in_channels[0], 512, kernel_size=3, stride=1, padding=1),\n ModuleHelper.BNReLU(512, bn_type=\"torchbn\"),\n nn.Dropout2d(0.05),\n nn.Conv2d(512, self.num_classes, kernel_size=1, stride=1, padding=0, bias=True)\n )\n\n def forward(self, x_):\n x = self.backbone_s1(x_)\n x1_dsn = self.dsn_head_s1(x[-2])\n x1 = self.conv_3x3_s1(x[-1])\n # print(x1_dsn.shape)\n # print(x1.shape)\n context1 = self.spatial_context_head_s1(x1, x1_dsn)\n x1 = self.spatial_ocr_head_s1(x1, context1)\n x1 = self.head_s1(x1)\n x1_dsn = F.interpolate(x1_dsn, size=(x_.size(2), x_.size(3)), mode=\"bilinear\", align_corners=True)\n x1 = F.interpolate(x1, size=(x_.size(2), x_.size(3)), mode=\"bilinear\", align_corners=True)\n\n return x1_dsn, x1\n\n\nclass SpatialOCRNet_s2(nn.Module):\n \"\"\"\n Object-Contextual Representations for Semantic Segmentation,\n Yuan, Yuhui and Chen, Xilin and Wang, Jingdong\n \"\"\"\n def __init__(self, split_point):\n self.inplanes = 128\n super(SpatialOCRNet_s2, self).__init__()\n self.num_classes = 19\n self.backbone_s2 = backbone_s2(start_point=split_point, end_point=split_point)\n # extra added layers\n in_channels = [1024, 2048]\n self.conv_3x3_s1 = nn.Sequential(\n nn.Conv2d(in_channels[1], 512, kernel_size=3, stride=1, padding=1),\n ModuleHelper.BNReLU(512, bn_type=\"torchbn\"),\n )\n\n from lib.models.modules.spatial_ocr_block import SpatialGather_Module, SpatialOCR_Module\n self.spatial_context_head_s1 = SpatialGather_Module(self.num_classes)\n self.spatial_ocr_head_s1 = SpatialOCR_Module(in_channels=512, \n key_channels=256, \n out_channels=512,\n scale=1,\n dropout=0.05, \n bn_type=\"torchbn\")\n\n self.head_s1 = nn.Conv2d(512, self.num_classes, kernel_size=1, stride=1, padding=0, bias=True)\n self.dsn_head_s1 = nn.Sequential(\n nn.Conv2d(in_channels[0], 512, kernel_size=3, stride=1, padding=1),\n ModuleHelper.BNReLU(512, bn_type=\"torchbn\"),\n nn.Dropout2d(0.05),\n nn.Conv2d(512, self.num_classes, kernel_size=1, stride=1, padding=0, bias=True)\n )\n\n def forward(self, x_):\n x = self.backbone_s2(x_)\n # print(x[-1].shape)\n # print(x[-2].shape)\n x1_dsn = self.dsn_head_s1(x[-2])\n x1 = self.conv_3x3_s1(x[-1])\n # print(x1_dsn.shape)\n # print(x1.shape)\n context1 = self.spatial_context_head_s1(x1, x1_dsn)\n x1 = self.spatial_ocr_head_s1(x1, context1)\n x1 = self.head_s1(x1)\n x1_dsn = F.interpolate(x1_dsn, size=(x_.size(2), x_.size(3)), mode=\"bilinear\", align_corners=True)\n x1 = F.interpolate(x1, size=(x_.size(2), x_.size(3)), mode=\"bilinear\", align_corners=True)\n\n return x1_dsn, x1\n\n\nclass SpatialOCRNet(nn.Module):\n \"\"\"\n Object-Contextual Representations for Semantic Segmentation,\n Yuan, Yuhui and Chen, Xilin and Wang, Jingdong\n \"\"\"\n\n def __init__(self):\n self.inplanes = 128\n super(SpatialOCRNet, self).__init__()\n self.num_classes = 19\n\n orig_resnet = ResNet(\n Bottleneck,\n [3, 4, 23, 3],\n deep_base=False,\n bn_type=\"torchbn\"\n )\n multi_grid = [1, 1, 1]\n arch_net = DilatedResnetBackbone(\n orig_resnet, dilate_scale=8, multi_grid=multi_grid\n )\n self.backbone = arch_net\n\n # extra added layers\n in_channels = [1024, 2048]\n self.conv_3x3 = nn.Sequential(\n nn.Conv2d(in_channels[1], 512, kernel_size=3, stride=1, padding=1),\n ModuleHelper.BNReLU(512, bn_type=\"torchbn\"),\n )\n\n from lib.models.modules.spatial_ocr_block import (\n SpatialGather_Module,\n SpatialOCR_Module,\n )\n\n self.spatial_context_head = SpatialGather_Module(self.num_classes)\n self.spatial_ocr_head = SpatialOCR_Module(\n in_channels=512,\n key_channels=256,\n out_channels=512,\n scale=1,\n dropout=0.05,\n bn_type=\"torchbn\",\n )\n\n self.head = nn.Conv2d(\n 512, self.num_classes, kernel_size=1, stride=1, padding=0, bias=True\n )\n self.dsn_head = nn.Sequential(\n nn.Conv2d(in_channels[0], 512, kernel_size=3, stride=1, padding=1),\n ModuleHelper.BNReLU(512, bn_type=\"torchbn\"),\n nn.Dropout2d(0.05),\n nn.Conv2d(\n 512, self.num_classes, kernel_size=1, stride=1, padding=0, bias=True\n ),\n )\n\n def forward(self, x_):\n x = self.backbone(x_)\n x_dsn = self.dsn_head(x[-2])\n x = self.conv_3x3(x[-1])\n context = self.spatial_context_head(x, x_dsn)\n x = self.spatial_ocr_head(x, context)\n x = self.head(x)\n x_dsn = F.interpolate(\n x_dsn, size=(x_.size(2), x_.size(3)), mode=\"bilinear\", align_corners=True\n )\n x = F.interpolate(\n x, size=(x_.size(2), x_.size(3)), mode=\"bilinear\", align_corners=True\n )\n return x_dsn, x\n\n\nclass SpatialOCRNet_with_exit(nn.Module):\n \"\"\"\n Object-Contextual Representations for Semantic Segmentation,\n Yuan, Yuhui and Chen, Xilin and Wang, Jingdong\n \"\"\"\n def __init__(self):\n self.inplanes = 128\n super(SpatialOCRNet_with_exit, self).__init__()\n self.num_classes = 19\n self.backbone_s1 = backbone_s1(start_point=8, end_point=8)\n self.backbone_s2 = backbone_s2(start_point=8, end_point=8)\n self.init_weights()\n\n # extra added layers\n in_channels = [1024, 2048]\n self.conv_3x3_s1 = nn.Sequential(\n nn.Conv2d(in_channels[1], 512, kernel_size=3, stride=1, padding=1),\n ModuleHelper.BNReLU(512, bn_type=\"torchbn\"),\n )\n \n self.conv_3x3_s2 = nn.Sequential(\n nn.Conv2d(in_channels[1], 512, kernel_size=3, stride=1, padding=1),\n ModuleHelper.BNReLU(512, bn_type=\"torchbn\"),\n )\n\n from lib.models.modules.spatial_ocr_block import SpatialGather_Module, SpatialOCR_Module\n self.spatial_context_head_s1 = SpatialGather_Module(self.num_classes)\n self.spatial_ocr_head_s1 = SpatialOCR_Module(in_channels=512, \n key_channels=256, \n out_channels=512,\n scale=1,\n dropout=0.05, \n bn_type=\"torchbn\")\n\n self.head_s1 = nn.Conv2d(512, self.num_classes, kernel_size=1, stride=1, padding=0, bias=True)\n self.dsn_head_s1 = nn.Sequential(\n nn.Conv2d(in_channels[0], 512, kernel_size=3, stride=1, padding=1),\n ModuleHelper.BNReLU(512, bn_type=\"torchbn\"),\n nn.Dropout2d(0.05),\n nn.Conv2d(512, self.num_classes, kernel_size=1, stride=1, padding=0, bias=True)\n )\n\n self.spatial_context_head_s2 = SpatialGather_Module(self.num_classes)\n self.spatial_ocr_head_s2 = SpatialOCR_Module(in_channels=512, \n key_channels=256, \n out_channels=512,\n scale=1,\n dropout=0.05, \n bn_type=\"torchbn\")\n\n self.head_s2 = nn.Conv2d(512, self.num_classes, kernel_size=1, stride=1, padding=0, bias=True)\n self.dsn_head_s2 = nn.Sequential(\n nn.Conv2d(in_channels[0], 512, kernel_size=3, stride=1, padding=1),\n ModuleHelper.BNReLU(512, bn_type=\"torchbn\"),\n nn.Dropout2d(0.05),\n nn.Conv2d(512, self.num_classes, kernel_size=1, stride=1, padding=0, bias=True)\n )\n\n def init_weights(self):\n checkpoint = torch.load(\"/home/slzhang/projects/ETBA/Train/openseg.pytorch/pretrained_model/resnet101-5d3b4d8f.pth\")\n dict_s1 = self.backbone_s1.state_dict().copy()\n dict_s2 = self.backbone_s2.state_dict().copy()\n\n for k,v in checkpoint.items():\n for k_s1,v_s1 in self.backbone_s1.state_dict().items():\n if k == k_s1[4:]:\n dict_s1[k_s1] = checkpoint[k]\n\n for k_s2,v_v2 in self.backbone_s2.state_dict().items():\n if k.split(\".\")[0] == k_s2.split(\".\")[0] and k.split(\".\")[2:] == k_s2.split(\".\")[2:] and k.split(\".\")[0] == \"layer3\" and int(k.split(\".\")[1]) == int(k_s2.split(\".\")[1])+self.backbone_s2.end_point-7:\n dict_s2[k_s2] = checkpoint[k]\n elif k.split(\".\") == k_s2.split(\".\")[1:]:\n dict_s2[k_s2] = checkpoint[k]\n\n self.backbone_s1.load_state_dict(dict_s1)\n self.backbone_s2.load_state_dict(dict_s2)\n\n def forward(self, x_):\n x = self.backbone_s1(x_)\n # print(x[-1].shape)\n # print(x[-2].shape)\n x1_dsn = self.dsn_head_s1(x[-2])\n x1 = self.conv_3x3_s1(x[-1])\n # print(x1_dsn.shape)\n # print(x1.shape)\n context1 = self.spatial_context_head_s1(x1, x1_dsn)\n x1 = self.spatial_ocr_head_s1(x1, context1)\n x1 = self.head_s1(x1)\n x1_dsn = F.interpolate(x1_dsn, size=(x_.size(2), x_.size(3)), mode=\"bilinear\", align_corners=True)\n x1 = F.interpolate(x1, size=(x_.size(2), x_.size(3)), mode=\"bilinear\", align_corners=True)\n\n x2 = self.backbone_s2(x[-2])\n # print(x2[-1].shape)\n # print(x2[-2].shape)\n x2_dsn = self.dsn_head_s2(x2[-2])\n x2 = self.conv_3x3_s2(x2[-1])\n # print(x2_dsn.shape)\n # print(x2.shape)\n context2 = self.spatial_context_head_s2(x2, x2_dsn)\n x2 = self.spatial_ocr_head_s2(x2, context2)\n x2 = self.head_s2(x2)\n x2_dsn = F.interpolate(x2_dsn, size=(x_.size(2), x_.size(3)), mode=\"bilinear\", align_corners=True)\n x2 = F.interpolate(x2, size=(x_.size(2), x_.size(3)), mode=\"bilinear\", align_corners=True)\n return x1_dsn, x1, x2_dsn, x2\n", "import math\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nclass _AtrousModule(nn.Module):\n def __init__(self, inplanes, planes, kernel_size, padding, dilation, BatchNorm):\n super(_AtrousModule, self).__init__()\n self.atrous_conv = nn.Conv2d(inplanes, planes, kernel_size=kernel_size,\n stride=1, padding=padding, dilation=dilation, bias=False)\n self.bn = BatchNorm(planes)\n self.relu = nn.ReLU()\n\n self._init_weight()\n\n def forward(self, x):\n x = self.atrous_conv(x)\n x = self.bn(x)\n\n return self.relu(x)\n\n def _init_weight(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n torch.nn.init.kaiming_normal_(m.weight)\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n\nclass wasp(nn.Module):\n def __init__(self, backbone, output_stride, BatchNorm):\n super(wasp, self).__init__()\n if backbone == 'drn':\n inplanes = 512\n elif backbone == 'mobilenet':\n inplanes = 320\n else:\n inplanes = 2048\n if output_stride == 16:\n #dilations = [ 6, 12, 18, 24]\n dilations = [24, 18, 12, 6]\n #dilations = [6, 6, 6, 6]\n elif output_stride == 8:\n dilations = [48, 36, 24, 12]\n else:\n raise NotImplementedError\n\n self.aspp1 = _AtrousModule(inplanes, 256, 1, padding=0, dilation=dilations[0], BatchNorm=BatchNorm)\n self.aspp2 = _AtrousModule(256, 256, 3, padding=dilations[1], dilation=dilations[1], BatchNorm=BatchNorm)\n self.aspp3 = _AtrousModule(256, 256, 3, padding=dilations[2], dilation=dilations[2], BatchNorm=BatchNorm)\n self.aspp4 = _AtrousModule(256, 256, 3, padding=dilations[3], dilation=dilations[3], BatchNorm=BatchNorm)\n\n self.global_avg_pool = nn.Sequential(nn.AdaptiveAvgPool2d((1, 1)),\n nn.Conv2d(inplanes, 256, 1, stride=1, bias=False),\n #nn.BatchNorm2d(256),\n nn.ReLU())\n self.conv1 = nn.Conv2d(1280, 256, 1, bias=False)\n self.conv2 = nn.Conv2d(256,256,1,bias=False)\n self.bn1 = BatchNorm(256)\n self.relu = nn.ReLU()\n self.dropout = nn.Dropout(0.5)\n self._init_weight()\n\n def forward(self, x):\n x1 = self.aspp1(x)\n x2 = self.aspp2(x1)\n x3 = self.aspp3(x2)\n x4 = self.aspp4(x3)\n\n x1 = self.conv2(x1)\n x2 = self.conv2(x2)\n x3 = self.conv2(x3)\n x4 = self.conv2(x4)\n \n x1 = self.conv2(x1)\n x2 = self.conv2(x2)\n x3 = self.conv2(x3)\n x4 = self.conv2(x4)\n\n x5 = self.global_avg_pool(x)\n x5 = F.interpolate(x5, size=x4.size()[2:], mode='bilinear', align_corners=True)\n x = torch.cat((x1, x2, x3, x4, x5), dim=1)\n\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n\n return self.dropout(x)\n\n def _init_weight(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n # n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n # m.weight.data.normal_(0, math.sqrt(2. / n))\n torch.nn.init.kaiming_normal_(m.weight)\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n\n\ndef build_wasp(backbone, output_stride, BatchNorm):\n return wasp(backbone, output_stride, BatchNorm)\n", "##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n## Created by: Shulai Zhang\n## Microsoft Research\n## [email protected]\n## Copyright (c) 2021\n##\n## This source code is licensed under the MIT-style license found in the\n## LICENSE file in the root directory of this source tree \n##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\nimport pdb\nimport torch\nimport torch.nn as nn\nfrom torch.nn import functional as F\n\nimport sys\nsys.path.append(\"/home/slzhang/projects/ETBA/Train/openseg\")\nfrom lib.models.backbones.backbone_selector import BackboneSelector\nfrom lib.models.tools.module_helper import ModuleHelper\nfrom lib.models.backbones.resnet.resnet_backbone_with_exit import backbone_s1, backbone_s2\nfrom lib.models.backbones.resnet.resnet_models import ResNetModels, ResNet, Bottleneck\nfrom lib.models.backbones.resnet.resnet_backbone import DilatedResnetBackbone\n\n\nclass SpatialOCRNet_s1(nn.Module):\n \"\"\"\n Object-Contextual Representations for Semantic Segmentation,\n Yuan, Yuhui and Chen, Xilin and Wang, Jingdong\n \"\"\"\n def __init__(self, split_point):\n self.inplanes = 128\n super(SpatialOCRNet_s1, self).__init__()\n self.num_classes = 19\n self.backbone_s1 = backbone_s1(start_point=split_point, end_point=split_point)\n # extra added layers\n in_channels = [1024, 2048]\n self.conv_3x3_s1 = nn.Sequential(\n nn.Conv2d(in_channels[1], 512, kernel_size=3, stride=1, padding=1),\n ModuleHelper.BNReLU(512, bn_type=\"torchbn\"),\n )\n\n from lib.models.modules.spatial_ocr_block import SpatialGather_Module, SpatialOCR_Module\n self.spatial_context_head_s1 = SpatialGather_Module(self.num_classes)\n self.spatial_ocr_head_s1 = SpatialOCR_Module(in_channels=512, \n key_channels=256, \n out_channels=512,\n scale=1,\n dropout=0.05, \n bn_type=\"torchbn\")\n\n self.head_s1 = nn.Conv2d(512, self.num_classes, kernel_size=1, stride=1, padding=0, bias=True)\n self.dsn_head_s1 = nn.Sequential(\n nn.Conv2d(in_channels[0], 512, kernel_size=3, stride=1, padding=1),\n ModuleHelper.BNReLU(512, bn_type=\"torchbn\"),\n nn.Dropout2d(0.05),\n nn.Conv2d(512, self.num_classes, kernel_size=1, stride=1, padding=0, bias=True)\n )\n\n def forward(self, x_):\n x = self.backbone_s1(x_)\n x1_dsn = self.dsn_head_s1(x[-2])\n x1 = self.conv_3x3_s1(x[-1])\n # print(x1_dsn.shape)\n # print(x1.shape)\n context1 = self.spatial_context_head_s1(x1, x1_dsn)\n x1 = self.spatial_ocr_head_s1(x1, context1)\n x1 = self.head_s1(x1)\n x1_dsn = F.interpolate(x1_dsn, size=(x_.size(2), x_.size(3)), mode=\"bilinear\", align_corners=True)\n x1 = F.interpolate(x1, size=(x_.size(2), x_.size(3)), mode=\"bilinear\", align_corners=True)\n\n return x1_dsn, x1\n\n\nclass SpatialOCRNet_s2(nn.Module):\n \"\"\"\n Object-Contextual Representations for Semantic Segmentation,\n Yuan, Yuhui and Chen, Xilin and Wang, Jingdong\n \"\"\"\n def __init__(self, split_point):\n self.inplanes = 128\n super(SpatialOCRNet_s2, self).__init__()\n self.num_classes = 19\n self.backbone_s2 = backbone_s2(start_point=split_point, end_point=split_point)\n # extra added layers\n in_channels = [1024, 2048]\n self.conv_3x3_s1 = nn.Sequential(\n nn.Conv2d(in_channels[1], 512, kernel_size=3, stride=1, padding=1),\n ModuleHelper.BNReLU(512, bn_type=\"torchbn\"),\n )\n\n from lib.models.modules.spatial_ocr_block import SpatialGather_Module, SpatialOCR_Module\n self.spatial_context_head_s1 = SpatialGather_Module(self.num_classes)\n self.spatial_ocr_head_s1 = SpatialOCR_Module(in_channels=512, \n key_channels=256, \n out_channels=512,\n scale=1,\n dropout=0.05, \n bn_type=\"torchbn\")\n\n self.head_s1 = nn.Conv2d(512, self.num_classes, kernel_size=1, stride=1, padding=0, bias=True)\n self.dsn_head_s1 = nn.Sequential(\n nn.Conv2d(in_channels[0], 512, kernel_size=3, stride=1, padding=1),\n ModuleHelper.BNReLU(512, bn_type=\"torchbn\"),\n nn.Dropout2d(0.05),\n nn.Conv2d(512, self.num_classes, kernel_size=1, stride=1, padding=0, bias=True)\n )\n\n def forward(self, x_):\n x = self.backbone_s2(x_)\n # print(x[-1].shape)\n # print(x[-2].shape)\n x1_dsn = self.dsn_head_s1(x[-2])\n x1 = self.conv_3x3_s1(x[-1])\n # print(x1_dsn.shape)\n # print(x1.shape)\n context1 = self.spatial_context_head_s1(x1, x1_dsn)\n x1 = self.spatial_ocr_head_s1(x1, context1)\n x1 = self.head_s1(x1)\n x1_dsn = F.interpolate(x1_dsn, size=(x_.size(2), x_.size(3)), mode=\"bilinear\", align_corners=True)\n x1 = F.interpolate(x1, size=(x_.size(2), x_.size(3)), mode=\"bilinear\", align_corners=True)\n\n return x1_dsn, x1\n\n\nclass SpatialOCRNet(nn.Module):\n \"\"\"\n Object-Contextual Representations for Semantic Segmentation,\n Yuan, Yuhui and Chen, Xilin and Wang, Jingdong\n \"\"\"\n\n def __init__(self):\n self.inplanes = 128\n super(SpatialOCRNet, self).__init__()\n self.num_classes = 19\n\n orig_resnet = ResNet(\n Bottleneck,\n [3, 4, 23, 3],\n deep_base=False,\n bn_type=\"torchbn\"\n )\n multi_grid = [1, 1, 1]\n arch_net = DilatedResnetBackbone(\n orig_resnet, dilate_scale=8, multi_grid=multi_grid\n )\n self.backbone = arch_net\n\n # extra added layers\n in_channels = [1024, 2048]\n self.conv_3x3 = nn.Sequential(\n nn.Conv2d(in_channels[1], 512, kernel_size=3, stride=1, padding=1),\n ModuleHelper.BNReLU(512, bn_type=\"torchbn\"),\n )\n\n from lib.models.modules.spatial_ocr_block import (\n SpatialGather_Module,\n SpatialOCR_Module,\n )\n\n self.spatial_context_head = SpatialGather_Module(self.num_classes)\n self.spatial_ocr_head = SpatialOCR_Module(\n in_channels=512,\n key_channels=256,\n out_channels=512,\n scale=1,\n dropout=0.05,\n bn_type=\"torchbn\",\n )\n\n self.head = nn.Conv2d(\n 512, self.num_classes, kernel_size=1, stride=1, padding=0, bias=True\n )\n self.dsn_head = nn.Sequential(\n nn.Conv2d(in_channels[0], 512, kernel_size=3, stride=1, padding=1),\n ModuleHelper.BNReLU(512, bn_type=\"torchbn\"),\n nn.Dropout2d(0.05),\n nn.Conv2d(\n 512, self.num_classes, kernel_size=1, stride=1, padding=0, bias=True\n ),\n )\n\n def forward(self, x_):\n x = self.backbone(x_)\n x_dsn = self.dsn_head(x[-2])\n x = self.conv_3x3(x[-1])\n context = self.spatial_context_head(x, x_dsn)\n x = self.spatial_ocr_head(x, context)\n x = self.head(x)\n x_dsn = F.interpolate(\n x_dsn, size=(x_.size(2), x_.size(3)), mode=\"bilinear\", align_corners=True\n )\n x = F.interpolate(\n x, size=(x_.size(2), x_.size(3)), mode=\"bilinear\", align_corners=True\n )\n return x_dsn, x\n\n\nclass SpatialOCRNet_with_exit(nn.Module):\n \"\"\"\n Object-Contextual Representations for Semantic Segmentation,\n Yuan, Yuhui and Chen, Xilin and Wang, Jingdong\n \"\"\"\n def __init__(self):\n self.inplanes = 128\n super(SpatialOCRNet_with_exit, self).__init__()\n self.num_classes = 19\n self.backbone_s1 = backbone_s1(start_point=8, end_point=8)\n self.backbone_s2 = backbone_s2(start_point=8, end_point=8)\n self.init_weights()\n\n # extra added layers\n in_channels = [1024, 2048]\n self.conv_3x3_s1 = nn.Sequential(\n nn.Conv2d(in_channels[1], 512, kernel_size=3, stride=1, padding=1),\n ModuleHelper.BNReLU(512, bn_type=\"torchbn\"),\n )\n \n self.conv_3x3_s2 = nn.Sequential(\n nn.Conv2d(in_channels[1], 512, kernel_size=3, stride=1, padding=1),\n ModuleHelper.BNReLU(512, bn_type=\"torchbn\"),\n )\n\n from lib.models.modules.spatial_ocr_block import SpatialGather_Module, SpatialOCR_Module\n self.spatial_context_head_s1 = SpatialGather_Module(self.num_classes)\n self.spatial_ocr_head_s1 = SpatialOCR_Module(in_channels=512, \n key_channels=256, \n out_channels=512,\n scale=1,\n dropout=0.05, \n bn_type=\"torchbn\")\n\n self.head_s1 = nn.Conv2d(512, self.num_classes, kernel_size=1, stride=1, padding=0, bias=True)\n self.dsn_head_s1 = nn.Sequential(\n nn.Conv2d(in_channels[0], 512, kernel_size=3, stride=1, padding=1),\n ModuleHelper.BNReLU(512, bn_type=\"torchbn\"),\n nn.Dropout2d(0.05),\n nn.Conv2d(512, self.num_classes, kernel_size=1, stride=1, padding=0, bias=True)\n )\n\n self.spatial_context_head_s2 = SpatialGather_Module(self.num_classes)\n self.spatial_ocr_head_s2 = SpatialOCR_Module(in_channels=512, \n key_channels=256, \n out_channels=512,\n scale=1,\n dropout=0.05, \n bn_type=\"torchbn\")\n\n self.head_s2 = nn.Conv2d(512, self.num_classes, kernel_size=1, stride=1, padding=0, bias=True)\n self.dsn_head_s2 = nn.Sequential(\n nn.Conv2d(in_channels[0], 512, kernel_size=3, stride=1, padding=1),\n ModuleHelper.BNReLU(512, bn_type=\"torchbn\"),\n nn.Dropout2d(0.05),\n nn.Conv2d(512, self.num_classes, kernel_size=1, stride=1, padding=0, bias=True)\n )\n\n def init_weights(self):\n checkpoint = torch.load(\"/home/slzhang/projects/ETBA/Train/openseg.pytorch/pretrained_model/resnet101-5d3b4d8f.pth\")\n dict_s1 = self.backbone_s1.state_dict().copy()\n dict_s2 = self.backbone_s2.state_dict().copy()\n\n for k,v in checkpoint.items():\n for k_s1,v_s1 in self.backbone_s1.state_dict().items():\n if k == k_s1[4:]:\n dict_s1[k_s1] = checkpoint[k]\n\n for k_s2,v_v2 in self.backbone_s2.state_dict().items():\n if k.split(\".\")[0] == k_s2.split(\".\")[0] and k.split(\".\")[2:] == k_s2.split(\".\")[2:] and k.split(\".\")[0] == \"layer3\" and int(k.split(\".\")[1]) == int(k_s2.split(\".\")[1])+self.backbone_s2.end_point-7:\n dict_s2[k_s2] = checkpoint[k]\n elif k.split(\".\") == k_s2.split(\".\")[1:]:\n dict_s2[k_s2] = checkpoint[k]\n\n self.backbone_s1.load_state_dict(dict_s1)\n self.backbone_s2.load_state_dict(dict_s2)\n\n def forward(self, x_):\n x = self.backbone_s1(x_)\n # print(x[-1].shape)\n # print(x[-2].shape)\n x1_dsn = self.dsn_head_s1(x[-2])\n x1 = self.conv_3x3_s1(x[-1])\n # print(x1_dsn.shape)\n # print(x1.shape)\n context1 = self.spatial_context_head_s1(x1, x1_dsn)\n x1 = self.spatial_ocr_head_s1(x1, context1)\n x1 = self.head_s1(x1)\n x1_dsn = F.interpolate(x1_dsn, size=(x_.size(2), x_.size(3)), mode=\"bilinear\", align_corners=True)\n x1 = F.interpolate(x1, size=(x_.size(2), x_.size(3)), mode=\"bilinear\", align_corners=True)\n\n x2 = self.backbone_s2(x[-2])\n # print(x2[-1].shape)\n # print(x2[-2].shape)\n x2_dsn = self.dsn_head_s2(x2[-2])\n x2 = self.conv_3x3_s2(x2[-1])\n # print(x2_dsn.shape)\n # print(x2.shape)\n context2 = self.spatial_context_head_s2(x2, x2_dsn)\n x2 = self.spatial_ocr_head_s2(x2, context2)\n x2 = self.head_s2(x2)\n x2_dsn = F.interpolate(x2_dsn, size=(x_.size(2), x_.size(3)), mode=\"bilinear\", align_corners=True)\n x2 = F.interpolate(x2, size=(x_.size(2), x_.size(3)), mode=\"bilinear\", align_corners=True)\n return x1_dsn, x1, x2_dsn, x2\n" ]
[ [ "torch.nn.Conv2d", "torch.nn.Dropout2d", "torch.load" ], [ "torch.nn.Dropout", "torch.cat", "torch.nn.Conv2d", "torch.nn.AdaptiveAvgPool2d", "torch.nn.ReLU", "torch.nn.init.kaiming_normal_" ], [ "torch.nn.Conv2d", "torch.nn.Dropout2d", "torch.load" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
aishwarya8615/MRNET-GAN
[ "a66b4c83ef5566e95e7587f7ddad43b9ecc263d8" ]
[ "load_data_tf.py" ]
[ "import numpy as np\nimport pandas as pd\nimport tensorflow as tf\n\n\nclass MRNetDataset():\n def __init__(self, root_dir, task, plane, train=True, transform=None, weights=None):\n self.task = task\n self.plane = plane\n self.root_dir = root_dir\n self.train = train\n if self.train:\n self.folder_path = self.root_dir + 'train/{0}/'.format(plane)\n self.records = pd.read_csv(\n self.root_dir + 'train-{0}.csv'.format(task), header=None, names=['id', 'label'])\n else:\n transform = None\n self.folder_path = self.root_dir + 'valid/{0}/'.format(plane)\n self.records = pd.read_csv(\n self.root_dir + 'valid-{0}.csv'.format(task), header=None, names=['id', 'label'])\n\n self.records['id'] = self.records['id'].map(\n lambda i: '0' * (4 - len(str(i))) + str(i))\n self.paths = [self.folder_path + filename +\n '.npy' for filename in self.records['id'].tolist()]\n self.labels = self.records['label'].tolist()\n\n self.transform = transform\n if weights is None:\n pos = np.sum(self.labels)\n neg = len(self.labels) - pos\n self.weights = [1, neg / pos]\n else:\n self.weights = weights\n\n def __len__(self):\n return len(self.paths)\n\n def __getitem__(self, index):\n array = np.load(self.paths[index])\n label = self.labels[index]\n # label = torch.FloatTensor([label])\n label = tf.constant(label, tf.float32)\n # print \"label torch is \", label.numpy()\n if self.transform:\n array = self.transform(array)\n\n else:\n array = np.stack((array,)*3, axis=1)\n # array1 = torch.FloatTensor(array)\n array = tf.constant(array, tf.float32)\n\n if label.numpy() == 1:\n weight = np.array([self.weights[1]])\n # weight = torch.FloatTensor(weight)\n weight = tf.constant(weight, tf.float32)\n\n else:\n weight = np.array([self.weights[0]])\n # weight = torch.FloatTensor(weight)\n weight = tf.constant(weight, tf.float32)\n\n return array, label, weight\n\n" ]
[ [ "tensorflow.constant", "numpy.stack", "numpy.load", "numpy.array", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "1.12", "2.6", "2.2", "1.4", "2.3", "2.4", "2.9", "1.5", "1.7", "2.5", "0.12", "1.0", "2.8", "1.2", "2.10" ] } ]
NucciTheBoss/pytorch_geometric
[ "e220a2c08fa1b2f1672d616c22eac2a67b5c8967", "e220a2c08fa1b2f1672d616c22eac2a67b5c8967", "e220a2c08fa1b2f1672d616c22eac2a67b5c8967", "e220a2c08fa1b2f1672d616c22eac2a67b5c8967", "e220a2c08fa1b2f1672d616c22eac2a67b5c8967", "e220a2c08fa1b2f1672d616c22eac2a67b5c8967", "e220a2c08fa1b2f1672d616c22eac2a67b5c8967", "e220a2c08fa1b2f1672d616c22eac2a67b5c8967", "e220a2c08fa1b2f1672d616c22eac2a67b5c8967", "e220a2c08fa1b2f1672d616c22eac2a67b5c8967", "e220a2c08fa1b2f1672d616c22eac2a67b5c8967", "e220a2c08fa1b2f1672d616c22eac2a67b5c8967", "e220a2c08fa1b2f1672d616c22eac2a67b5c8967", "e220a2c08fa1b2f1672d616c22eac2a67b5c8967", "e220a2c08fa1b2f1672d616c22eac2a67b5c8967", "e220a2c08fa1b2f1672d616c22eac2a67b5c8967", "e220a2c08fa1b2f1672d616c22eac2a67b5c8967", "e220a2c08fa1b2f1672d616c22eac2a67b5c8967" ]
[ "benchmark/runtime/dgl/train.py", "examples/pytorch_lightning/graph_sage.py", "test/visualization/test_influence.py", "test/transforms/test_normalize_scale.py", "test/nn/conv/test_hypergraph_conv.py", "test/loader/test_hgt_loader.py", "torch_geometric/datasets/airports.py", "torch_geometric/datasets/modelnet.py", "test/nn/conv/test_x_conv.py", "benchmark/kernel/edge_pool.py", "torch_geometric/nn/conv/hgt_conv.py", "torch_geometric/nn/models/attentive_fp.py", "test/data/test_temporal.py", "torch_geometric/loader/link_neighbor_loader.py", "torch_geometric/graphgym/models/act.py", "examples/film.py", "torch_geometric/datasets/rel_link_pred_dataset.py", "torch_geometric/datasets/geometry.py" ]
[ "import time\n\nimport torch\nimport torch.nn.functional as F\n\n\ndef train_runtime(model, data, epochs, device):\n if hasattr(data, 'features'):\n x = torch.tensor(data.features, dtype=torch.float, device=device)\n else:\n x = None\n mask = data.train_mask if hasattr(data, 'train_mask') else data.train_idx\n y = torch.tensor(data.labels, dtype=torch.long, device=device)[mask]\n\n model = model.to(device)\n model.train()\n optimizer = torch.optim.Adam(model.parameters(), lr=0.01)\n\n if torch.cuda.is_available():\n torch.cuda.synchronize()\n t_start = time.perf_counter()\n\n for epoch in range(epochs):\n optimizer.zero_grad()\n out = model(x)\n loss = F.nll_loss(out[mask], y.view(-1))\n loss.backward()\n optimizer.step()\n\n if torch.cuda.is_available():\n torch.cuda.synchronize()\n t_end = time.perf_counter()\n\n return t_end - t_start\n", "import os.path as osp\n\nimport pytorch_lightning as pl\nimport torch\nimport torch.nn.functional as F\nfrom torch.nn import BatchNorm1d\nfrom torchmetrics import Accuracy\n\nfrom torch_geometric import seed_everything\nfrom torch_geometric.data import LightningNodeData\nfrom torch_geometric.datasets import Reddit\nfrom torch_geometric.nn import GraphSAGE\n\n\nclass Model(pl.LightningModule):\n def __init__(self, in_channels: int, out_channels: int,\n hidden_channels: int = 256, num_layers: int = 2,\n dropout: float = 0.5):\n super().__init__()\n self.gnn = GraphSAGE(in_channels, hidden_channels, num_layers,\n out_channels, dropout=dropout,\n norm=BatchNorm1d(hidden_channels))\n\n self.train_acc = Accuracy()\n self.val_acc = Accuracy()\n self.test_acc = Accuracy()\n\n def forward(self, x, edge_index):\n return self.gnn(x, edge_index)\n\n def training_step(self, data, batch_idx):\n y_hat = self(data.x, data.edge_index)[:data.batch_size]\n y = data.y[:data.batch_size]\n loss = F.cross_entropy(y_hat, y)\n self.train_acc(y_hat.softmax(dim=-1), y)\n self.log('train_acc', self.train_acc, prog_bar=True, on_step=False,\n on_epoch=True, batch_size=y_hat.size(0))\n return loss\n\n def validation_step(self, data, batch_idx):\n y_hat = self(data.x, data.edge_index)[:data.batch_size]\n y = data.y[:data.batch_size]\n self.val_acc(y_hat.softmax(dim=-1), y)\n self.log('val_acc', self.val_acc, prog_bar=True, on_step=False,\n on_epoch=True, batch_size=y_hat.size(0))\n\n def test_step(self, data, batch_idx):\n y_hat = self(data.x, data.edge_index)[:data.batch_size]\n y = data.y[:data.batch_size]\n self.test_acc(y_hat.softmax(dim=-1), y)\n self.log('test_acc', self.test_acc, prog_bar=True, on_step=False,\n on_epoch=True, batch_size=y_hat.size(0))\n\n def configure_optimizers(self):\n return torch.optim.Adam(self.parameters(), lr=0.01)\n\n\ndef main():\n seed_everything(42)\n\n dataset = Reddit(osp.join('data', 'Reddit'))\n data = dataset[0]\n\n datamodule = LightningNodeData(data, data.train_mask, data.val_mask,\n data.test_mask, loader='neighbor',\n num_neighbors=[25, 10], batch_size=1024,\n num_workers=8)\n\n model = Model(dataset.num_node_features, dataset.num_classes)\n\n devices = torch.cuda.device_count()\n strategy = pl.strategies.DDPSpawnStrategy(find_unused_parameters=False)\n checkpoint = pl.callbacks.ModelCheckpoint(monitor='val_acc', save_top_k=1)\n trainer = pl.Trainer(strategy=strategy, accelerator='gpu', devices=devices,\n max_epochs=20, callbacks=[checkpoint])\n\n trainer.fit(model, datamodule)\n trainer.test(ckpt_path='best', datamodule=datamodule)\n\n\nif __name__ == '__main__':\n main()\n", "import torch\n\nfrom torch_geometric.datasets import KarateClub\nfrom torch_geometric.nn import GCNConv\nfrom torch_geometric.visualization import influence\n\n\nclass Net(torch.nn.Module):\n def __init__(self, in_channels, out_channels):\n super().__init__()\n self.conv1 = GCNConv(in_channels, out_channels)\n self.conv2 = GCNConv(out_channels, out_channels)\n\n def forward(self, x, edge_index):\n x = torch.nn.functional.relu(self.conv1(x, edge_index))\n x = self.conv2(x, edge_index)\n return x\n\n\ndef test_influence():\n data = KarateClub()[0]\n x = torch.randn(data.num_nodes, 8)\n\n out = influence(Net(x.size(1), 16), x, data.edge_index)\n assert out.size() == (data.num_nodes, data.num_nodes)\n assert torch.allclose(out.sum(dim=-1), torch.ones(data.num_nodes),\n atol=1e-04)\n", "import torch\n\nfrom torch_geometric.data import Data\nfrom torch_geometric.transforms import NormalizeScale\n\n\ndef test_normalize_scale():\n assert NormalizeScale().__repr__() == 'NormalizeScale()'\n\n pos = torch.randn((10, 3))\n\n data = Data(pos=pos)\n data = NormalizeScale()(data)\n assert len(data) == 1\n assert data.pos.min().item() > -1\n assert data.pos.max().item() < 1\n", "import torch\n\nfrom torch_geometric.nn import HypergraphConv\n\n\ndef test_hypergraph_conv_with_more_nodes_than_edges():\n in_channels, out_channels = (16, 32)\n hyperedge_index = torch.tensor([[0, 0, 1, 1, 2, 3], [0, 1, 0, 1, 0, 1]])\n hyperedge_weight = torch.tensor([1.0, 0.5])\n num_nodes = hyperedge_index[0].max().item() + 1\n num_edges = hyperedge_index[1].max().item() + 1\n x = torch.randn((num_nodes, in_channels))\n hyperedge_attr = torch.randn((num_edges, in_channels))\n\n conv = HypergraphConv(in_channels, out_channels)\n assert conv.__repr__() == 'HypergraphConv(16, 32)'\n out = conv(x, hyperedge_index)\n assert out.size() == (num_nodes, out_channels)\n out = conv(x, hyperedge_index, hyperedge_weight)\n assert out.size() == (num_nodes, out_channels)\n\n conv = HypergraphConv(in_channels, out_channels, use_attention=True,\n heads=2)\n out = conv(x, hyperedge_index, hyperedge_attr=hyperedge_attr)\n assert out.size() == (num_nodes, 2 * out_channels)\n out = conv(x, hyperedge_index, hyperedge_weight, hyperedge_attr)\n assert out.size() == (num_nodes, 2 * out_channels)\n\n conv = HypergraphConv(in_channels, out_channels, use_attention=True,\n heads=2, concat=False, dropout=0.5)\n out = conv(x, hyperedge_index, hyperedge_weight, hyperedge_attr)\n assert out.size() == (num_nodes, out_channels)\n\n\ndef test_hypergraph_conv_with_more_edges_than_nodes():\n in_channels, out_channels = (16, 32)\n hyperedge_index = torch.tensor([[0, 0, 1, 1, 2, 3, 3, 3, 2, 1, 2],\n [0, 1, 2, 1, 2, 1, 0, 3, 3, 4, 4]])\n hyperedge_weight = torch.tensor([1.0, 0.5, 0.8, 0.2, 0.7])\n num_nodes = hyperedge_index[0].max().item() + 1\n x = torch.randn((num_nodes, in_channels))\n\n conv = HypergraphConv(in_channels, out_channels)\n assert conv.__repr__() == 'HypergraphConv(16, 32)'\n out = conv(x, hyperedge_index)\n assert out.size() == (num_nodes, out_channels)\n out = conv(x, hyperedge_index, hyperedge_weight)\n assert out.size() == (num_nodes, out_channels)\n", "import numpy as np\nimport torch\nfrom torch_sparse import SparseTensor\n\nfrom torch_geometric.data import HeteroData\nfrom torch_geometric.loader import HGTLoader\nfrom torch_geometric.nn import GraphConv, to_hetero\nfrom torch_geometric.utils import k_hop_subgraph\n\n\ndef get_edge_index(num_src_nodes, num_dst_nodes, num_edges):\n row = torch.randint(num_src_nodes, (num_edges, ), dtype=torch.long)\n col = torch.randint(num_dst_nodes, (num_edges, ), dtype=torch.long)\n return torch.stack([row, col], dim=0)\n\n\ndef is_subset(subedge_index, edge_index, src_idx, dst_idx):\n num_nodes = int(edge_index.max()) + 1\n idx = num_nodes * edge_index[0] + edge_index[1]\n subidx = num_nodes * src_idx[subedge_index[0]] + dst_idx[subedge_index[1]]\n mask = torch.from_numpy(np.isin(subidx, idx))\n return int(mask.sum()) == mask.numel()\n\n\ndef test_hgt_loader():\n torch.manual_seed(12345)\n\n data = HeteroData()\n\n data['paper'].x = torch.arange(100)\n data['author'].x = torch.arange(100, 300)\n\n data['paper', 'paper'].edge_index = get_edge_index(100, 100, 500)\n data['paper', 'paper'].edge_attr = torch.arange(500)\n data['paper', 'author'].edge_index = get_edge_index(100, 200, 1000)\n data['paper', 'author'].edge_attr = torch.arange(500, 1500)\n data['author', 'paper'].edge_index = get_edge_index(200, 100, 1000)\n data['author', 'paper'].edge_attr = torch.arange(1500, 2500)\n\n r1, c1 = data['paper', 'paper'].edge_index\n r2, c2 = data['paper', 'author'].edge_index + torch.tensor([[0], [100]])\n r3, c3 = data['author', 'paper'].edge_index + torch.tensor([[100], [0]])\n full_adj = SparseTensor(\n row=torch.cat([r1, r2, r3]),\n col=torch.cat([c1, c2, c3]),\n value=torch.arange(2500),\n )\n\n batch_size = 20\n loader = HGTLoader(data, num_samples=[5] * 4, batch_size=batch_size,\n input_nodes='paper')\n assert str(loader) == 'HGTLoader()'\n assert len(loader) == (100 + batch_size - 1) // batch_size\n\n for batch in loader:\n assert isinstance(batch, HeteroData)\n\n # Test node type selection:\n assert set(batch.node_types) == {'paper', 'author'}\n\n assert len(batch['paper']) == 2\n assert batch['paper'].x.size() == (40, ) # 20 + 4 * 5\n assert batch['paper'].batch_size == batch_size\n assert batch['paper'].x.min() >= 0 and batch['paper'].x.max() < 100\n\n assert len(batch['author']) == 1\n assert batch['author'].x.size() == (20, ) # 4 * 5\n assert batch['author'].x.min() >= 100 and batch['author'].x.max() < 300\n\n # Test edge type selection:\n assert set(batch.edge_types) == {('paper', 'to', 'paper'),\n ('paper', 'to', 'author'),\n ('author', 'to', 'paper')}\n\n assert len(batch['paper', 'paper']) == 2\n row, col = batch['paper', 'paper'].edge_index\n value = batch['paper', 'paper'].edge_attr\n adj = full_adj[batch['paper'].x, batch['paper'].x]\n assert row.min() >= 0 and row.max() < 40\n assert col.min() >= 0 and col.max() < 40\n assert value.min() >= 0 and value.max() < 500\n assert adj.nnz() == row.size(0)\n assert torch.allclose(row.unique(), adj.storage.row().unique())\n assert torch.allclose(col.unique(), adj.storage.col().unique())\n assert torch.allclose(value.unique(), adj.storage.value().unique())\n\n assert is_subset(batch['paper', 'paper'].edge_index,\n data['paper', 'paper'].edge_index, batch['paper'].x,\n batch['paper'].x)\n\n assert len(batch['paper', 'author']) == 2\n row, col = batch['paper', 'author'].edge_index\n value = batch['paper', 'author'].edge_attr\n adj = full_adj[batch['paper'].x, batch['author'].x]\n assert row.min() >= 0 and row.max() < 40\n assert col.min() >= 0 and col.max() < 20\n assert value.min() >= 500 and value.max() < 1500\n assert adj.nnz() == row.size(0)\n assert torch.allclose(row.unique(), adj.storage.row().unique())\n assert torch.allclose(col.unique(), adj.storage.col().unique())\n assert torch.allclose(value.unique(), adj.storage.value().unique())\n\n assert is_subset(batch['paper', 'author'].edge_index,\n data['paper', 'author'].edge_index, batch['paper'].x,\n batch['author'].x - 100)\n\n assert len(batch['author', 'paper']) == 2\n row, col = batch['author', 'paper'].edge_index\n value = batch['author', 'paper'].edge_attr\n adj = full_adj[batch['author'].x, batch['paper'].x]\n assert row.min() >= 0 and row.max() < 20\n assert col.min() >= 0 and col.max() < 40\n assert value.min() >= 1500 and value.max() < 2500\n assert adj.nnz() == row.size(0)\n assert torch.allclose(row.unique(), adj.storage.row().unique())\n assert torch.allclose(col.unique(), adj.storage.col().unique())\n assert torch.allclose(value.unique(), adj.storage.value().unique())\n\n assert is_subset(batch['author', 'paper'].edge_index,\n data['author', 'paper'].edge_index,\n batch['author'].x - 100, batch['paper'].x)\n\n # Test for isolated nodes (there shouldn't exist any):\n n_id = torch.cat([batch['paper'].x, batch['author'].x])\n row, col, _ = full_adj[n_id, n_id].coo()\n assert torch.cat([row, col]).unique().numel() >= 59\n\n\ndef test_hgt_loader_on_cora(get_dataset):\n dataset = get_dataset(name='Cora')\n data = dataset[0]\n data.edge_weight = torch.rand(data.num_edges)\n\n hetero_data = HeteroData()\n hetero_data['paper'].x = data.x\n hetero_data['paper'].n_id = torch.arange(data.num_nodes)\n hetero_data['paper', 'paper'].edge_index = data.edge_index\n hetero_data['paper', 'paper'].edge_weight = data.edge_weight\n\n split_idx = torch.arange(5, 8)\n\n # Sample the complete two-hop neighborhood:\n loader = HGTLoader(hetero_data, num_samples=[data.num_nodes] * 2,\n batch_size=split_idx.numel(),\n input_nodes=('paper', split_idx))\n assert len(loader) == 1\n\n hetero_batch = next(iter(loader))\n batch_size = hetero_batch['paper'].batch_size\n\n n_id, _, _, e_mask = k_hop_subgraph(split_idx, num_hops=2,\n edge_index=data.edge_index,\n num_nodes=data.num_nodes)\n\n n_id = n_id.sort()[0]\n assert n_id.tolist() == hetero_batch['paper'].n_id.sort()[0].tolist()\n assert hetero_batch['paper', 'paper'].num_edges == int(e_mask.sum())\n\n class GNN(torch.nn.Module):\n def __init__(self, in_channels, hidden_channels, out_channels):\n super().__init__()\n self.conv1 = GraphConv(in_channels, hidden_channels)\n self.conv2 = GraphConv(hidden_channels, out_channels)\n\n def forward(self, x, edge_index, edge_weight):\n x = self.conv1(x, edge_index, edge_weight).relu()\n x = self.conv2(x, edge_index, edge_weight).relu()\n return x\n\n model = GNN(dataset.num_features, 16, dataset.num_classes)\n hetero_model = to_hetero(model, hetero_data.metadata())\n\n out1 = model(data.x, data.edge_index, data.edge_weight)[split_idx]\n out2 = hetero_model(hetero_batch.x_dict, hetero_batch.edge_index_dict,\n hetero_batch.edge_weight_dict)['paper'][:batch_size]\n assert torch.allclose(out1, out2, atol=1e-6)\n", "import os.path as osp\nfrom typing import Callable, List, Optional\n\nimport torch\nfrom torch_sparse import coalesce\n\nfrom torch_geometric.data import Data, InMemoryDataset, download_url\n\n\nclass Airports(InMemoryDataset):\n r\"\"\"The Airports dataset from the `\"struc2vec: Learning Node\n Representations from Structural Identity\"\n <https://arxiv.org/abs/1704.03165>`_ paper, where nodes denote airports\n and labels correspond to activity levels.\n Features are given by one-hot encoded node identifiers, as described in the\n `\"GraLSP: Graph Neural Networks with Local Structural Patterns\"\n ` <https://arxiv.org/abs/1911.07675>`_ paper.\n\n Args:\n root (string): Root directory where the dataset should be saved.\n name (string): The name of the dataset (:obj:`\"USA\"`, :obj:`\"Brazil\"`,\n :obj:`\"Europe\"`).\n transform (callable, optional): A function/transform that takes in an\n :obj:`torch_geometric.data.Data` object and returns a transformed\n version. The data object will be transformed before every access.\n (default: :obj:`None`)\n pre_transform (callable, optional): A function/transform that takes in\n an :obj:`torch_geometric.data.Data` object and returns a\n transformed version. The data object will be transformed before\n being saved to disk. (default: :obj:`None`)\n \"\"\"\n edge_url = ('https://github.com/leoribeiro/struc2vec/'\n 'raw/master/graph/{}-airports.edgelist')\n label_url = ('https://github.com/leoribeiro/struc2vec/'\n 'raw/master/graph/labels-{}-airports.txt')\n\n def __init__(self, root: str, name: str,\n transform: Optional[Callable] = None,\n pre_transform: Optional[Callable] = None):\n self.name = name.lower()\n assert self.name in ['usa', 'brazil', 'europe']\n super().__init__(root, transform, pre_transform)\n self.data, self.slices = torch.load(self.processed_paths[0])\n\n @property\n def raw_dir(self) -> str:\n return osp.join(self.root, self.name, 'raw')\n\n @property\n def processed_dir(self) -> str:\n return osp.join(self.root, self.name, 'processed')\n\n @property\n def raw_file_names(self) -> List[str]:\n return [\n f'{self.name}-airports.edgelist',\n f'labels-{self.name}-airports.txt',\n ]\n\n @property\n def processed_file_names(self) -> str:\n return 'data.pt'\n\n def download(self):\n download_url(self.edge_url.format(self.name), self.raw_dir)\n download_url(self.label_url.format(self.name), self.raw_dir)\n\n def process(self):\n index_map, ys = {}, []\n with open(self.raw_paths[1], 'r') as f:\n data = f.read().split('\\n')[1:-1]\n for i, row in enumerate(data):\n idx, y = row.split()\n index_map[int(idx)] = i\n ys.append(int(y))\n y = torch.tensor(ys, dtype=torch.long)\n x = torch.eye(y.size(0))\n\n edge_indices = []\n with open(self.raw_paths[0], 'r') as f:\n data = f.read().split('\\n')[:-1]\n for row in data:\n src, dst = row.split()\n edge_indices.append([index_map[int(src)], index_map[int(dst)]])\n edge_index = torch.tensor(edge_indices).t().contiguous()\n edge_index, _ = coalesce(edge_index, None, y.size(0), y.size(0))\n\n data = Data(x=x, edge_index=edge_index, y=y)\n data = data if self.pre_transform is None else self.pre_transform(data)\n torch.save(self.collate([data]), self.processed_paths[0])\n\n def __repr__(self) -> str:\n return f'{self.name.capitalize()}Airports()'\n", "import glob\nimport os\nimport os.path as osp\nimport shutil\n\nimport torch\n\nfrom torch_geometric.data import InMemoryDataset, download_url, extract_zip\nfrom torch_geometric.io import read_off\n\n\nclass ModelNet(InMemoryDataset):\n r\"\"\"The ModelNet10/40 datasets from the `\"3D ShapeNets: A Deep\n Representation for Volumetric Shapes\"\n <https://people.csail.mit.edu/khosla/papers/cvpr2015_wu.pdf>`_ paper,\n containing CAD models of 10 and 40 categories, respectively.\n\n .. note::\n\n Data objects hold mesh faces instead of edge indices.\n To convert the mesh to a graph, use the\n :obj:`torch_geometric.transforms.FaceToEdge` as :obj:`pre_transform`.\n To convert the mesh to a point cloud, use the\n :obj:`torch_geometric.transforms.SamplePoints` as :obj:`transform` to\n sample a fixed number of points on the mesh faces according to their\n face area.\n\n Args:\n root (string): Root directory where the dataset should be saved.\n name (string, optional): The name of the dataset (:obj:`\"10\"` for\n ModelNet10, :obj:`\"40\"` for ModelNet40). (default: :obj:`\"10\"`)\n train (bool, optional): If :obj:`True`, loads the training dataset,\n otherwise the test dataset. (default: :obj:`True`)\n transform (callable, optional): A function/transform that takes in an\n :obj:`torch_geometric.data.Data` object and returns a transformed\n version. The data object will be transformed before every access.\n (default: :obj:`None`)\n pre_transform (callable, optional): A function/transform that takes in\n an :obj:`torch_geometric.data.Data` object and returns a\n transformed version. The data object will be transformed before\n being saved to disk. (default: :obj:`None`)\n pre_filter (callable, optional): A function that takes in an\n :obj:`torch_geometric.data.Data` object and returns a boolean\n value, indicating whether the data object should be included in the\n final dataset. (default: :obj:`None`)\n \"\"\"\n\n urls = {\n '10':\n 'http://vision.princeton.edu/projects/2014/3DShapeNets/ModelNet10.zip',\n '40': 'http://modelnet.cs.princeton.edu/ModelNet40.zip'\n }\n\n def __init__(self, root, name='10', train=True, transform=None,\n pre_transform=None, pre_filter=None):\n assert name in ['10', '40']\n self.name = name\n super().__init__(root, transform, pre_transform, pre_filter)\n path = self.processed_paths[0] if train else self.processed_paths[1]\n self.data, self.slices = torch.load(path)\n\n @property\n def raw_file_names(self):\n return [\n 'bathtub', 'bed', 'chair', 'desk', 'dresser', 'monitor',\n 'night_stand', 'sofa', 'table', 'toilet'\n ]\n\n @property\n def processed_file_names(self):\n return ['training.pt', 'test.pt']\n\n def download(self):\n path = download_url(self.urls[self.name], self.root)\n extract_zip(path, self.root)\n os.unlink(path)\n folder = osp.join(self.root, f'ModelNet{self.name}')\n shutil.rmtree(self.raw_dir)\n os.rename(folder, self.raw_dir)\n\n # Delete osx metadata generated during compression of ModelNet10\n metadata_folder = osp.join(self.root, '__MACOSX')\n if osp.exists(metadata_folder):\n shutil.rmtree(metadata_folder)\n\n def process(self):\n torch.save(self.process_set('train'), self.processed_paths[0])\n torch.save(self.process_set('test'), self.processed_paths[1])\n\n def process_set(self, dataset):\n categories = glob.glob(osp.join(self.raw_dir, '*', ''))\n categories = sorted([x.split(os.sep)[-2] for x in categories])\n\n data_list = []\n for target, category in enumerate(categories):\n folder = osp.join(self.raw_dir, category, dataset)\n paths = glob.glob(f'{folder}/{category}_*.off')\n for path in paths:\n data = read_off(path)\n data.y = torch.tensor([target])\n data_list.append(data)\n\n if self.pre_filter is not None:\n data_list = [d for d in data_list if self.pre_filter(d)]\n\n if self.pre_transform is not None:\n data_list = [self.pre_transform(d) for d in data_list]\n\n return self.collate(data_list)\n\n def __repr__(self) -> str:\n return f'{self.__class__.__name__}{self.name}({len(self)})'\n", "import torch\n\nfrom torch_geometric.nn import XConv\nfrom torch_geometric.testing import is_full_test, withPackage\n\n\n@withPackage('torch_cluster')\ndef test_x_conv():\n x = torch.randn(8, 16)\n pos = torch.rand(8, 3)\n batch = torch.tensor([0, 0, 0, 0, 1, 1, 1, 1])\n\n conv = XConv(16, 32, dim=3, kernel_size=2, dilation=2)\n assert conv.__repr__() == 'XConv(16, 32)'\n\n torch.manual_seed(12345)\n out1 = conv(x, pos)\n assert out1.size() == (8, 32)\n\n torch.manual_seed(12345)\n out2 = conv(x, pos, batch)\n assert out2.size() == (8, 32)\n\n if is_full_test():\n jit = torch.jit.script(conv)\n\n torch.manual_seed(12345)\n assert jit(x, pos).tolist() == out1.tolist()\n\n torch.manual_seed(12345)\n assert jit(x, pos, batch).tolist() == out2.tolist()\n", "import torch\nimport torch.nn.functional as F\nfrom torch.nn import Linear\n\nfrom torch_geometric.nn import (\n EdgePooling,\n GraphConv,\n JumpingKnowledge,\n global_mean_pool,\n)\n\n\nclass EdgePool(torch.nn.Module):\n def __init__(self, dataset, num_layers, hidden):\n super().__init__()\n self.conv1 = GraphConv(dataset.num_features, hidden, aggr='mean')\n self.convs = torch.nn.ModuleList()\n self.pools = torch.nn.ModuleList()\n self.convs.extend([\n GraphConv(hidden, hidden, aggr='mean')\n for i in range(num_layers - 1)\n ])\n self.pools.extend(\n [EdgePooling(hidden) for i in range((num_layers) // 2)])\n self.jump = JumpingKnowledge(mode='cat')\n self.lin1 = Linear(num_layers * hidden, hidden)\n self.lin2 = Linear(hidden, dataset.num_classes)\n\n def reset_parameters(self):\n self.conv1.reset_parameters()\n for conv in self.convs:\n conv.reset_parameters()\n for pool in self.pools:\n pool.reset_parameters()\n self.lin1.reset_parameters()\n self.lin2.reset_parameters()\n\n def forward(self, data):\n x, edge_index, batch = data.x, data.edge_index, data.batch\n x = F.relu(self.conv1(x, edge_index))\n xs = [global_mean_pool(x, batch)]\n for i, conv in enumerate(self.convs):\n x = F.relu(conv(x, edge_index))\n xs += [global_mean_pool(x, batch)]\n if i % 2 == 0 and i < len(self.convs) - 1:\n pool = self.pools[i // 2]\n x, edge_index, batch, _ = pool(x, edge_index, batch=batch)\n x = self.jump(xs)\n x = F.relu(self.lin1(x))\n x = F.dropout(x, p=0.5, training=self.training)\n x = self.lin2(x)\n return F.log_softmax(x, dim=-1)\n\n def __repr__(self):\n return self.__class__.__name__\n", "import math\nfrom typing import Dict, List, Optional, Union\n\nimport torch\nimport torch.nn.functional as F\nfrom torch import Tensor\nfrom torch.nn import Parameter\nfrom torch_sparse import SparseTensor\n\nfrom torch_geometric.nn.conv import MessagePassing\nfrom torch_geometric.nn.dense import Linear\nfrom torch_geometric.nn.inits import glorot, ones, reset\nfrom torch_geometric.typing import EdgeType, Metadata, NodeType\nfrom torch_geometric.utils import softmax\n\n\ndef group(xs: List[Tensor], aggr: Optional[str]) -> Optional[Tensor]:\n if len(xs) == 0:\n return None\n elif aggr is None:\n return torch.stack(xs, dim=1)\n elif len(xs) == 1:\n return xs[0]\n else:\n out = torch.stack(xs, dim=0)\n out = getattr(torch, aggr)(out, dim=0)\n out = out[0] if isinstance(out, tuple) else out\n return out\n\n\nclass HGTConv(MessagePassing):\n r\"\"\"The Heterogeneous Graph Transformer (HGT) operator from the\n `\"Heterogeneous Graph Transformer\" <https://arxiv.org/abs/2003.01332>`_\n paper.\n\n .. note::\n\n For an example of using HGT, see `examples/hetero/hgt_dblp.py\n <https://github.com/pyg-team/pytorch_geometric/blob/master/examples/\n hetero/hgt_dblp.py>`_.\n\n Args:\n in_channels (int or Dict[str, int]): Size of each input sample of every\n node type, or :obj:`-1` to derive the size from the first input(s)\n to the forward method.\n out_channels (int): Size of each output sample.\n metadata (Tuple[List[str], List[Tuple[str, str, str]]]): The metadata\n of the heterogeneous graph, *i.e.* its node and edge types given\n by a list of strings and a list of string triplets, respectively.\n See :meth:`torch_geometric.data.HeteroData.metadata` for more\n information.\n heads (int, optional): Number of multi-head-attentions.\n (default: :obj:`1`)\n group (string, optional): The aggregation scheme to use for grouping\n node embeddings generated by different relations.\n (:obj:`\"sum\"`, :obj:`\"mean\"`, :obj:`\"min\"`, :obj:`\"max\"`).\n (default: :obj:`\"sum\"`)\n **kwargs (optional): Additional arguments of\n :class:`torch_geometric.nn.conv.MessagePassing`.\n \"\"\"\n def __init__(\n self,\n in_channels: Union[int, Dict[str, int]],\n out_channels: int,\n metadata: Metadata,\n heads: int = 1,\n group: str = \"sum\",\n **kwargs,\n ):\n super().__init__(aggr='add', node_dim=0, **kwargs)\n\n if not isinstance(in_channels, dict):\n in_channels = {node_type: in_channels for node_type in metadata[0]}\n\n self.in_channels = in_channels\n self.out_channels = out_channels\n self.heads = heads\n self.group = group\n\n self.k_lin = torch.nn.ModuleDict()\n self.q_lin = torch.nn.ModuleDict()\n self.v_lin = torch.nn.ModuleDict()\n self.a_lin = torch.nn.ModuleDict()\n self.skip = torch.nn.ParameterDict()\n for node_type, in_channels in self.in_channels.items():\n self.k_lin[node_type] = Linear(in_channels, out_channels)\n self.q_lin[node_type] = Linear(in_channels, out_channels)\n self.v_lin[node_type] = Linear(in_channels, out_channels)\n self.a_lin[node_type] = Linear(out_channels, out_channels)\n self.skip[node_type] = Parameter(torch.Tensor(1))\n\n self.a_rel = torch.nn.ParameterDict()\n self.m_rel = torch.nn.ParameterDict()\n self.p_rel = torch.nn.ParameterDict()\n dim = out_channels // heads\n for edge_type in metadata[1]:\n edge_type = '__'.join(edge_type)\n self.a_rel[edge_type] = Parameter(torch.Tensor(heads, dim, dim))\n self.m_rel[edge_type] = Parameter(torch.Tensor(heads, dim, dim))\n self.p_rel[edge_type] = Parameter(torch.Tensor(heads))\n\n self.reset_parameters()\n\n def reset_parameters(self):\n reset(self.k_lin)\n reset(self.q_lin)\n reset(self.v_lin)\n reset(self.a_lin)\n ones(self.skip)\n ones(self.p_rel)\n glorot(self.a_rel)\n glorot(self.m_rel)\n\n def forward(\n self,\n x_dict: Dict[NodeType, Tensor],\n edge_index_dict: Union[Dict[EdgeType, Tensor],\n Dict[EdgeType, SparseTensor]] # Support both.\n ) -> Dict[NodeType, Optional[Tensor]]:\n r\"\"\"\n Args:\n x_dict (Dict[str, Tensor]): A dictionary holding input node\n features for each individual node type.\n edge_index_dict: (Dict[str, Union[Tensor, SparseTensor]]): A\n dictionary holding graph connectivity information for each\n individual edge type, either as a :obj:`torch.LongTensor` of\n shape :obj:`[2, num_edges]` or a\n :obj:`torch_sparse.SparseTensor`.\n\n :rtype: :obj:`Dict[str, Optional[Tensor]]` - The ouput node embeddings\n for each node type.\n In case a node type does not receive any message, its output will\n be set to :obj:`None`.\n \"\"\"\n\n H, D = self.heads, self.out_channels // self.heads\n\n k_dict, q_dict, v_dict, out_dict = {}, {}, {}, {}\n\n # Iterate over node-types:\n for node_type, x in x_dict.items():\n k_dict[node_type] = self.k_lin[node_type](x).view(-1, H, D)\n q_dict[node_type] = self.q_lin[node_type](x).view(-1, H, D)\n v_dict[node_type] = self.v_lin[node_type](x).view(-1, H, D)\n out_dict[node_type] = []\n\n # Iterate over edge-types:\n for edge_type, edge_index in edge_index_dict.items():\n src_type, _, dst_type = edge_type\n edge_type = '__'.join(edge_type)\n\n a_rel = self.a_rel[edge_type]\n k = (k_dict[src_type].transpose(0, 1) @ a_rel).transpose(1, 0)\n\n m_rel = self.m_rel[edge_type]\n v = (v_dict[src_type].transpose(0, 1) @ m_rel).transpose(1, 0)\n\n # propagate_type: (k: Tensor, q: Tensor, v: Tensor, rel: Tensor)\n out = self.propagate(edge_index, k=k, q=q_dict[dst_type], v=v,\n rel=self.p_rel[edge_type], size=None)\n out_dict[dst_type].append(out)\n\n # Iterate over node-types:\n for node_type, outs in out_dict.items():\n out = group(outs, self.group)\n\n if out is None:\n out_dict[node_type] = None\n continue\n\n out = self.a_lin[node_type](F.gelu(out))\n if out.size(-1) == x_dict[node_type].size(-1):\n alpha = self.skip[node_type].sigmoid()\n out = alpha * out + (1 - alpha) * x_dict[node_type]\n out_dict[node_type] = out\n\n return out_dict\n\n def message(self, k_j: Tensor, q_i: Tensor, v_j: Tensor, rel: Tensor,\n index: Tensor, ptr: Optional[Tensor],\n size_i: Optional[int]) -> Tensor:\n\n alpha = (q_i * k_j).sum(dim=-1) * rel\n alpha = alpha / math.sqrt(q_i.size(-1))\n alpha = softmax(alpha, index, ptr, size_i)\n out = v_j * alpha.view(-1, self.heads, 1)\n return out.view(-1, self.out_channels)\n\n def __repr__(self) -> str:\n return (f'{self.__class__.__name__}(-1, {self.out_channels}, '\n f'heads={self.heads})')\n", "from typing import Optional\n\nimport torch\nimport torch.nn.functional as F\nfrom torch import Tensor\nfrom torch.nn import GRUCell, Linear, Parameter\n\nfrom torch_geometric.nn import GATConv, MessagePassing, global_add_pool\nfrom torch_geometric.typing import Adj, OptTensor\nfrom torch_geometric.utils import softmax\n\nfrom ..inits import glorot, zeros\n\n\nclass GATEConv(MessagePassing):\n def __init__(self, in_channels: int, out_channels: int, edge_dim: int,\n dropout: float = 0.0):\n super().__init__(aggr='add', node_dim=0)\n\n self.dropout = dropout\n\n self.att_l = Parameter(torch.Tensor(1, out_channels))\n self.att_r = Parameter(torch.Tensor(1, in_channels))\n\n self.lin1 = Linear(in_channels + edge_dim, out_channels, False)\n self.lin2 = Linear(out_channels, out_channels, False)\n\n self.bias = Parameter(torch.Tensor(out_channels))\n\n self.reset_parameters()\n\n def reset_parameters(self):\n glorot(self.att_l)\n glorot(self.att_r)\n glorot(self.lin1.weight)\n glorot(self.lin2.weight)\n zeros(self.bias)\n\n def forward(self, x: Tensor, edge_index: Adj, edge_attr: Tensor) -> Tensor:\n out = self.propagate(edge_index, x=x, edge_attr=edge_attr)\n out += self.bias\n return out\n\n def message(self, x_j: Tensor, x_i: Tensor, edge_attr: Tensor,\n index: Tensor, ptr: OptTensor,\n size_i: Optional[int]) -> Tensor:\n\n x_j = F.leaky_relu_(self.lin1(torch.cat([x_j, edge_attr], dim=-1)))\n alpha_j = (x_j * self.att_l).sum(dim=-1)\n alpha_i = (x_i * self.att_r).sum(dim=-1)\n alpha = alpha_j + alpha_i\n alpha = F.leaky_relu_(alpha)\n alpha = softmax(alpha, index, ptr, size_i)\n alpha = F.dropout(alpha, p=self.dropout, training=self.training)\n return self.lin2(x_j) * alpha.unsqueeze(-1)\n\n\nclass AttentiveFP(torch.nn.Module):\n r\"\"\"The Attentive FP model for molecular representation learning from the\n `\"Pushing the Boundaries of Molecular Representation for Drug Discovery\n with the Graph Attention Mechanism\"\n <https://pubs.acs.org/doi/10.1021/acs.jmedchem.9b00959>`_ paper, based on\n graph attention mechanisms.\n\n Args:\n in_channels (int): Size of each input sample.\n hidden_channels (int): Hidden node feature dimensionality.\n out_channels (int): Size of each output sample.\n edge_dim (int): Edge feature dimensionality.\n num_layers (int): Number of GNN layers.\n num_timesteps (int): Number of iterative refinement steps for global\n readout.\n dropout (float, optional): Dropout probability. (default: :obj:`0.0`)\n\n \"\"\"\n def __init__(self, in_channels: int, hidden_channels: int,\n out_channels: int, edge_dim: int, num_layers: int,\n num_timesteps: int, dropout: float = 0.0):\n super().__init__()\n\n self.num_layers = num_layers\n self.num_timesteps = num_timesteps\n self.dropout = dropout\n\n self.lin1 = Linear(in_channels, hidden_channels)\n\n conv = GATEConv(hidden_channels, hidden_channels, edge_dim, dropout)\n gru = GRUCell(hidden_channels, hidden_channels)\n self.atom_convs = torch.nn.ModuleList([conv])\n self.atom_grus = torch.nn.ModuleList([gru])\n for _ in range(num_layers - 1):\n conv = GATConv(hidden_channels, hidden_channels, dropout=dropout,\n add_self_loops=False, negative_slope=0.01)\n self.atom_convs.append(conv)\n self.atom_grus.append(GRUCell(hidden_channels, hidden_channels))\n\n self.mol_conv = GATConv(hidden_channels, hidden_channels,\n dropout=dropout, add_self_loops=False,\n negative_slope=0.01)\n self.mol_gru = GRUCell(hidden_channels, hidden_channels)\n\n self.lin2 = Linear(hidden_channels, out_channels)\n\n self.reset_parameters()\n\n def reset_parameters(self):\n self.lin1.reset_parameters()\n for conv, gru in zip(self.atom_convs, self.atom_grus):\n conv.reset_parameters()\n gru.reset_parameters()\n self.mol_conv.reset_parameters()\n self.mol_gru.reset_parameters()\n self.lin2.reset_parameters()\n\n def forward(self, x, edge_index, edge_attr, batch):\n \"\"\"\"\"\"\n # Atom Embedding:\n x = F.leaky_relu_(self.lin1(x))\n\n h = F.elu_(self.atom_convs[0](x, edge_index, edge_attr))\n h = F.dropout(h, p=self.dropout, training=self.training)\n x = self.atom_grus[0](h, x).relu_()\n\n for conv, gru in zip(self.atom_convs[1:], self.atom_grus[1:]):\n h = F.elu_(conv(x, edge_index))\n h = F.dropout(h, p=self.dropout, training=self.training)\n x = gru(h, x).relu_()\n\n # Molecule Embedding:\n row = torch.arange(batch.size(0), device=batch.device)\n edge_index = torch.stack([row, batch], dim=0)\n\n out = global_add_pool(x, batch).relu_()\n for t in range(self.num_timesteps):\n h = F.elu_(self.mol_conv((x, out), edge_index))\n h = F.dropout(h, p=self.dropout, training=self.training)\n out = self.mol_gru(h, out).relu_()\n\n # Predictor:\n out = F.dropout(out, p=self.dropout, training=self.training)\n return self.lin2(out)\n", "import torch\n\nfrom torch_geometric.data import TemporalData\n\n\ndef get_temporal_data(num_events, msg_channels):\n return TemporalData(\n src=torch.arange(num_events),\n dst=torch.arange(num_events, num_events * 2),\n t=torch.arange(0, num_events * 1000, step=1000),\n msg=torch.randn(num_events, msg_channels),\n y=torch.randint(0, 2, (num_events, )),\n )\n\n\ndef test_temporal_data():\n data = get_temporal_data(num_events=3, msg_channels=16)\n assert str(data) == (\"TemporalData(src=[3], dst=[3], t=[3], \"\n \"msg=[3, 16], y=[3])\")\n\n assert data.num_nodes == 6\n assert data.num_events == len(data) == 3\n\n assert data.src.tolist() == [0, 1, 2]\n assert data['src'].tolist() == [0, 1, 2]\n\n assert sorted(data.keys) == ['dst', 'msg', 'src', 't', 'y']\n assert sorted(data.to_dict().keys()) == sorted(data.keys)\n\n data_tuple = data.to_namedtuple()\n assert len(data_tuple) == 5\n assert data_tuple.src is not None\n assert data_tuple.dst is not None\n assert data_tuple.t is not None\n assert data_tuple.msg is not None\n assert data_tuple.y is not None\n\n assert data.__cat_dim__('src', data.src) == 0\n assert data.__inc__('src', data.src) == 6\n\n clone = data.clone()\n assert clone != data\n assert len(clone) == len(data)\n assert clone.src.data_ptr() != data.src.data_ptr()\n assert clone.src.tolist() == data.src.tolist()\n assert clone.dst.data_ptr() != data.dst.data_ptr()\n assert clone.dst.tolist() == data.dst.tolist()\n\n key = value = 'test_value'\n data[key] = value\n assert data[key] == value\n assert data.test_value == value\n del data[key]\n del data[key] # Deleting unset attributes should work as well.\n\n assert data.get(key, 10) == 10\n\n\ndef test_train_val_test_split():\n data = get_temporal_data(num_events=100, msg_channels=16)\n\n train_data, val_data, test_data = data.train_val_test_split(\n val_ratio=0.2, test_ratio=0.15)\n\n assert len(train_data) == 65\n assert len(val_data) == 20\n assert len(test_data) == 15\n\n assert train_data.t.max() < val_data.t.min()\n assert val_data.t.max() < test_data.t.min()\n\n\ndef test_temporal_indexing():\n data = get_temporal_data(num_events=10, msg_channels=16)\n\n elem = data[0]\n assert isinstance(elem, TemporalData)\n assert len(elem) == 1\n assert elem.src.tolist() == data.src[0:1].tolist()\n assert elem.dst.tolist() == data.dst[0:1].tolist()\n assert elem.t.tolist() == data.t[0:1].tolist()\n assert elem.msg.tolist() == data.msg[0:1].tolist()\n assert elem.y.tolist() == data.y[0:1].tolist()\n\n subset = data[0:5]\n assert isinstance(subset, TemporalData)\n assert len(subset) == 5\n assert subset.src.tolist() == data.src[0:5].tolist()\n assert subset.dst.tolist() == data.dst[0:5].tolist()\n assert subset.t.tolist() == data.t[0:5].tolist()\n assert subset.msg.tolist() == data.msg[0:5].tolist()\n assert subset.y.tolist() == data.y[0:5].tolist()\n\n index = [0, 4, 8]\n subset = data[torch.tensor(index)]\n assert isinstance(subset, TemporalData)\n assert len(subset) == 3\n assert subset.src.tolist() == data.src[0::4].tolist()\n assert subset.dst.tolist() == data.dst[0::4].tolist()\n assert subset.t.tolist() == data.t[0::4].tolist()\n assert subset.msg.tolist() == data.msg[0::4].tolist()\n assert subset.y.tolist() == data.y[0::4].tolist()\n\n mask = [True, False, True, False, True, False, True, False, True, False]\n subset = data[torch.tensor(mask)]\n assert isinstance(subset, TemporalData)\n assert len(subset) == 5\n assert subset.src.tolist() == data.src[0::2].tolist()\n assert subset.dst.tolist() == data.dst[0::2].tolist()\n assert subset.t.tolist() == data.t[0::2].tolist()\n assert subset.msg.tolist() == data.msg[0::2].tolist()\n assert subset.y.tolist() == data.y[0::2].tolist()\n", "from typing import Any, Callable, Iterator, List, Optional, Tuple, Union\n\nimport torch\nfrom torch import Tensor\n\nfrom torch_geometric.data import Data, HeteroData\nfrom torch_geometric.loader.base import DataLoaderIterator\nfrom torch_geometric.loader.neighbor_loader import NeighborSampler\nfrom torch_geometric.loader.utils import filter_data, filter_hetero_data\nfrom torch_geometric.typing import InputEdges, NumNeighbors, OptTensor\n\n\nclass LinkNeighborSampler(NeighborSampler):\n def __init__(self, data, *args, neg_sampling_ratio: float = 0.0, **kwargs):\n super().__init__(data, *args, **kwargs)\n self.neg_sampling_ratio = neg_sampling_ratio\n\n if issubclass(self.data_cls, Data):\n self.num_src_nodes = self.num_dst_nodes = data.num_nodes\n else:\n self.num_src_nodes = data[self.input_type[0]].num_nodes\n self.num_dst_nodes = data[self.input_type[-1]].num_nodes\n\n def _create_label(self, edge_label_index, edge_label):\n device = edge_label_index.device\n\n num_pos_edges = edge_label_index.size(1)\n num_neg_edges = int(num_pos_edges * self.neg_sampling_ratio)\n\n if num_neg_edges == 0:\n return edge_label_index, edge_label\n\n if edge_label is None:\n edge_label = torch.ones(num_pos_edges, device=device)\n else:\n assert edge_label.dtype == torch.long\n edge_label = edge_label + 1\n\n neg_row = torch.randint(self.num_src_nodes, (num_neg_edges, ))\n neg_col = torch.randint(self.num_dst_nodes, (num_neg_edges, ))\n neg_edge_label_index = torch.stack([neg_row, neg_col], dim=0)\n\n neg_edge_label = edge_label.new_zeros((num_neg_edges, ) +\n edge_label.size()[1:])\n\n edge_label_index = torch.cat([\n edge_label_index,\n neg_edge_label_index,\n ], dim=1)\n\n edge_label = torch.cat([edge_label, neg_edge_label], dim=0)\n\n return edge_label_index, edge_label\n\n def __call__(self, query: List[Tuple[Tensor]]):\n query = [torch.tensor(s) for s in zip(*query)]\n if len(query) == 2:\n edge_label_index = torch.stack(query, dim=0)\n edge_label = None\n else:\n edge_label_index = torch.stack(query[:2], dim=0)\n edge_label = query[2]\n\n edge_label_index, edge_label = self._create_label(\n edge_label_index, edge_label)\n\n if issubclass(self.data_cls, Data):\n sample_fn = torch.ops.torch_sparse.neighbor_sample\n\n query_nodes = edge_label_index.view(-1)\n query_nodes, reverse = query_nodes.unique(return_inverse=True)\n edge_label_index = reverse.view(2, -1)\n\n node, row, col, edge = sample_fn(\n self.colptr,\n self.row,\n query_nodes,\n self.num_neighbors,\n self.replace,\n self.directed,\n )\n\n return node, row, col, edge, edge_label_index, edge_label\n\n elif issubclass(self.data_cls, HeteroData):\n sample_fn = torch.ops.torch_sparse.hetero_neighbor_sample\n\n if self.input_type[0] != self.input_type[-1]:\n query_src = edge_label_index[0]\n query_src, reverse_src = query_src.unique(return_inverse=True)\n query_dst = edge_label_index[1]\n query_dst, reverse_dst = query_dst.unique(return_inverse=True)\n edge_label_index = torch.stack([reverse_src, reverse_dst], 0)\n query_node_dict = {\n self.input_type[0]: query_src,\n self.input_type[-1]: query_dst,\n }\n else: # Merge both source and destination node indices:\n query_nodes = edge_label_index.view(-1)\n query_nodes, reverse = query_nodes.unique(return_inverse=True)\n edge_label_index = reverse.view(2, -1)\n query_node_dict = {self.input_type[0]: query_nodes}\n\n node_dict, row_dict, col_dict, edge_dict = sample_fn(\n self.node_types,\n self.edge_types,\n self.colptr_dict,\n self.row_dict,\n query_node_dict,\n self.num_neighbors,\n self.num_hops,\n self.replace,\n self.directed,\n )\n\n return (node_dict, row_dict, col_dict, edge_dict, edge_label_index,\n edge_label)\n\n\nclass LinkNeighborLoader(torch.utils.data.DataLoader):\n r\"\"\"A link-based data loader derived as an extension of the node-based\n :class:`torch_geometric.loader.NeighborLoader`.\n This loader allows for mini-batch training of GNNs on large-scale graphs\n where full-batch training is not feasible.\n\n More specifically, this loader first selects a sample of edges from the\n set of input edges :obj:`edge_label_index` (which may or not be edges in\n the original graph) and then constructs a subgraph from all the nodes\n present in this list by sampling :obj:`num_neighbors` neighbors in each\n iteration.\n\n .. code-block:: python\n\n from torch_geometric.datasets import Planetoid\n from torch_geometric.loader import NeighborLoader\n\n data = Planetoid(path, name='Cora')[0]\n\n loader = LinkNeighborLoader(\n data,\n # Sample 30 neighbors for each node for 2 iterations\n num_neighbors=[30] * 2,\n # Use a batch size of 128 for sampling training nodes\n batch_size=128,\n edge_label_index=data.edge_index,\n )\n\n sampled_data = next(iter(loader))\n print(sampled_data)\n >>> Data(x=[1368, 1433], edge_index=[2, 3103], y=[1368],\n train_mask=[1368], val_mask=[1368], test_mask=[1368],\n edge_label_index=[2, 128])\n\n It is additionally possible to provide edge labels for sampled edges, which\n are then added to the batch:\n\n .. code-block:: python\n\n loader = LinkNeighborLoader(\n data,\n num_neighbors=[30] * 2,\n batch_size=128,\n edge_label_index=data.edge_index,\n edge_label=torch.ones(data.edge_index.size(1))\n )\n\n sampled_data = next(iter(loader))\n print(sampled_data)\n >>> Data(x=[1368, 1433], edge_index=[2, 3103], y=[1368],\n train_mask=[1368], val_mask=[1368], test_mask=[1368],\n edge_label_index=[2, 128], edge_label=[128])\n\n The rest of the functionality mirrors that of\n :class:`~torch_geometric.loader.NeighborLoader`, including support for\n heterogenous graphs.\n\n .. note::\n :obj:`neg_sampling_ratio` is currently implemented in an approximate\n way, *i.e.* negative edges may contain false negatives.\n\n Args:\n data (torch_geometric.data.Data or torch_geometric.data.HeteroData):\n The :class:`~torch_geometric.data.Data` or\n :class:`~torch_geometric.data.HeteroData` graph object.\n num_neighbors (List[int] or Dict[Tuple[str, str, str], List[int]]): The\n number of neighbors to sample for each node in each iteration.\n In heterogeneous graphs, may also take in a dictionary denoting\n the amount of neighbors to sample for each individual edge type.\n If an entry is set to :obj:`-1`, all neighbors will be included.\n edge_label_index (Tensor or EdgeType or Tuple[EdgeType, Tensor]):\n The edge indices for which neighbors are sampled to create\n mini-batches.\n If set to :obj:`None`, all edges will be considered.\n In heterogeneous graphs, needs to be passed as a tuple that holds\n the edge type and corresponding edge indices.\n (default: :obj:`None`)\n edge_label (Tensor): The labels of edge indices for which neighbors are\n sampled. Must be the same length as the :obj:`edge_label_index`.\n If set to :obj:`None` then no labels are returned in the batch.\n replace (bool, optional): If set to :obj:`True`, will sample with\n replacement. (default: :obj:`False`)\n directed (bool, optional): If set to :obj:`False`, will include all\n edges between all sampled nodes. (default: :obj:`True`)\n transform (Callable, optional): A function/transform that takes in\n a sampled mini-batch and returns a transformed version.\n (default: :obj:`None`)\n neg_sampling_ratio (float, optional): The ratio of sampled negative\n edges to the number of positive edges.\n If :obj:`edge_label` does not exist, it will be automatically\n created and represents a binary classification task\n (:obj:`1` = edge, :obj:`0` = no edge).\n If :obj:`edge_label` exists, it has to be a categorical label from\n :obj:`0` to :obj:`num_classes - 1`.\n After negative sampling, label :obj:`0` represents negative edges,\n and labels :obj:`1` to :obj:`num_classes` represent the labels of\n positive edges.\n Note that returned labels are of type :obj:`torch.float` for binary\n classification (to facilitate the ease-of-use of\n :meth:`F.binary_cross_entropy`) and of type\n :obj:`torch.long` for multi-class classification (to facilitate the\n ease-of-use of :meth:`F.cross_entropy`). (default: :obj:`0.0`).\n **kwargs (optional): Additional arguments of\n :class:`torch.utils.data.DataLoader`, such as :obj:`batch_size`,\n :obj:`shuffle`, :obj:`drop_last` or :obj:`num_workers`.\n \"\"\"\n def __init__(\n self,\n data: Union[Data, HeteroData],\n num_neighbors: NumNeighbors,\n edge_label_index: InputEdges = None,\n edge_label: OptTensor = None,\n replace: bool = False,\n directed: bool = True,\n transform: Callable = None,\n neighbor_sampler: Optional[LinkNeighborSampler] = None,\n neg_sampling_ratio: float = 0.0,\n **kwargs,\n ):\n # Remove for PyTorch Lightning:\n if 'dataset' in kwargs:\n del kwargs['dataset']\n if 'collate_fn' in kwargs:\n del kwargs['collate_fn']\n\n self.data = data\n\n # Save for PyTorch Lightning < 1.6:\n self.num_neighbors = num_neighbors\n self.edge_label_index = edge_label_index\n self.edge_label = edge_label\n self.replace = replace\n self.directed = directed\n self.transform = transform\n self.neighbor_sampler = neighbor_sampler\n self.neg_sampling_ratio = neg_sampling_ratio\n\n edge_type, edge_label_index = get_edge_label_index(\n data, edge_label_index)\n\n if neighbor_sampler is None:\n self.neighbor_sampler = LinkNeighborSampler(\n data, num_neighbors, replace, directed, edge_type,\n share_memory=kwargs.get('num_workers', 0) > 0,\n neg_sampling_ratio=self.neg_sampling_ratio)\n\n super().__init__(Dataset(edge_label_index, edge_label),\n collate_fn=self.neighbor_sampler, **kwargs)\n\n def transform_fn(self, out: Any) -> Union[Data, HeteroData]:\n if isinstance(self.data, Data):\n node, row, col, edge, edge_label_index, edge_label = out\n data = filter_data(self.data, node, row, col, edge,\n self.neighbor_sampler.perm)\n data.edge_label_index = edge_label_index\n if edge_label is not None:\n data.edge_label = edge_label\n\n elif isinstance(self.data, HeteroData):\n (node_dict, row_dict, col_dict, edge_dict, edge_label_index,\n edge_label) = out\n data = filter_hetero_data(self.data, node_dict, row_dict, col_dict,\n edge_dict,\n self.neighbor_sampler.perm_dict)\n edge_type = self.neighbor_sampler.input_type\n data[edge_type].edge_label_index = edge_label_index\n if edge_label is not None:\n data[edge_type].edge_label = edge_label\n\n return data if self.transform is None else self.transform(data)\n\n def _get_iterator(self) -> Iterator:\n return DataLoaderIterator(super()._get_iterator(), self.transform_fn)\n\n def __repr__(self) -> str:\n return f'{self.__class__.__name__}()'\n\n\n###############################################################################\n\n\nclass Dataset(torch.utils.data.Dataset):\n def __init__(self, edge_label_index: Tensor, edge_label: OptTensor = None):\n self.edge_label_index = edge_label_index\n self.edge_label = edge_label\n\n def __getitem__(self, idx: int) -> Tuple[int]:\n if self.edge_label is None:\n return self.edge_label_index[0, idx], self.edge_label_index[1, idx]\n else:\n return (self.edge_label_index[0, idx],\n self.edge_label_index[1, idx], self.edge_label[idx])\n\n def __len__(self) -> int:\n return self.edge_label_index.size(1)\n\n\ndef get_edge_label_index(\n data: Union[Data, HeteroData],\n edge_label_index: InputEdges,\n) -> Tuple[Optional[str], Tensor]:\n edge_type = None\n if isinstance(data, Data):\n if edge_label_index is None:\n return None, data.edge_index\n return None, edge_label_index\n\n assert edge_label_index is not None\n assert isinstance(edge_label_index, (list, tuple))\n\n if isinstance(edge_label_index[0], str):\n edge_type = edge_label_index\n edge_type = data._to_canonical(*edge_type)\n assert edge_type in data.edge_types\n return edge_type, data[edge_type].edge_index\n\n assert len(edge_label_index) == 2\n\n edge_type, edge_label_index = edge_label_index\n edge_type = data._to_canonical(*edge_type)\n assert edge_type in data.edge_types\n\n if edge_label_index is None:\n return edge_type, data[edge_type].edge_index\n\n return edge_type, edge_label_index\n", "import torch.nn as nn\n\nfrom torch_geometric.graphgym.config import cfg\nfrom torch_geometric.graphgym.register import register_act\n\nif cfg is not None:\n register_act('relu', nn.ReLU(inplace=cfg.mem.inplace))\n register_act('selu', nn.SELU(inplace=cfg.mem.inplace))\n register_act('prelu', nn.PReLU())\n register_act('elu', nn.ELU(inplace=cfg.mem.inplace))\n register_act('lrelu_01', nn.LeakyReLU(0.1, inplace=cfg.mem.inplace))\n register_act('lrelu_025', nn.LeakyReLU(0.25, inplace=cfg.mem.inplace))\n register_act('lrelu_05', nn.LeakyReLU(0.5, inplace=cfg.mem.inplace))\n", "import os.path as osp\n\nimport torch\nimport torch.nn.functional as F\nfrom sklearn.metrics import f1_score\nfrom torch.nn import BatchNorm1d\n\nfrom torch_geometric.datasets import PPI\nfrom torch_geometric.loader import DataLoader\nfrom torch_geometric.nn import FiLMConv\n\npath = osp.join(osp.dirname(osp.realpath(__file__)), '..', 'data', 'PPI')\ntrain_dataset = PPI(path, split='train')\nval_dataset = PPI(path, split='val')\ntest_dataset = PPI(path, split='test')\ntrain_loader = DataLoader(train_dataset, batch_size=2, shuffle=True)\nval_loader = DataLoader(val_dataset, batch_size=2, shuffle=False)\ntest_loader = DataLoader(test_dataset, batch_size=2, shuffle=False)\n\n\nclass Net(torch.nn.Module):\n def __init__(self, in_channels, hidden_channels, out_channels, num_layers,\n dropout=0.0):\n super().__init__()\n self.dropout = dropout\n\n self.convs = torch.nn.ModuleList()\n self.convs.append(FiLMConv(in_channels, hidden_channels))\n for _ in range(num_layers - 2):\n self.convs.append(FiLMConv(hidden_channels, hidden_channels))\n self.convs.append(FiLMConv(hidden_channels, out_channels, act=None))\n\n self.norms = torch.nn.ModuleList()\n for _ in range(num_layers - 1):\n self.norms.append(BatchNorm1d(hidden_channels))\n\n def forward(self, x, edge_index):\n for conv, norm in zip(self.convs[:-1], self.norms):\n x = norm(conv(x, edge_index))\n x = F.dropout(x, p=self.dropout, training=self.training)\n x = self.convs[-1](x, edge_index)\n return x\n\n\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\nmodel = Net(in_channels=train_dataset.num_features, hidden_channels=320,\n out_channels=train_dataset.num_classes, num_layers=4,\n dropout=0.1).to(device)\ncriterion = torch.nn.BCEWithLogitsLoss()\noptimizer = torch.optim.Adam(model.parameters(), lr=0.01)\n\n\ndef train():\n model.train()\n\n total_loss = 0\n for data in train_loader:\n data = data.to(device)\n optimizer.zero_grad()\n loss = criterion(model(data.x, data.edge_index), data.y)\n total_loss += loss.item() * data.num_graphs\n loss.backward()\n optimizer.step()\n return total_loss / len(train_loader.dataset)\n\n\[email protected]_grad()\ndef test(loader):\n model.eval()\n\n ys, preds = [], []\n for data in loader:\n ys.append(data.y)\n out = model(data.x.to(device), data.edge_index.to(device))\n preds.append((out > 0).float().cpu())\n\n y, pred = torch.cat(ys, dim=0).numpy(), torch.cat(preds, dim=0).numpy()\n return f1_score(y, pred, average='micro') if pred.sum() > 0 else 0\n\n\nfor epoch in range(1, 501):\n loss = train()\n val_f1 = test(val_loader)\n test_f1 = test(test_loader)\n print(f'Epoch: {epoch:02d}, Loss: {loss:.4f}, Val: {val_f1:.4f}, '\n f'Test: {test_f1:.4f}')\n", "import os\nimport os.path as osp\nfrom typing import Callable, List, Optional\n\nimport torch\n\nfrom torch_geometric.data import Data, InMemoryDataset, download_url\n\n\nclass RelLinkPredDataset(InMemoryDataset):\n r\"\"\"The relational link prediction datasets from the\n `\"Modeling Relational Data with Graph Convolutional Networks\"\n <https://arxiv.org/abs/1703.06103>`_ paper.\n Training and test splits are given by sets of triplets.\n\n Args:\n root (string): Root directory where the dataset should be saved.\n name (string): The name of the dataset (:obj:`\"FB15k-237\"`).\n transform (callable, optional): A function/transform that takes in an\n :obj:`torch_geometric.data.Data` object and returns a transformed\n version. The data object will be transformed before every access.\n (default: :obj:`None`)\n pre_transform (callable, optional): A function/transform that takes in\n an :obj:`torch_geometric.data.Data` object and returns a\n transformed version. The data object will be transformed before\n being saved to disk. (default: :obj:`None`)\n \"\"\"\n\n urls = {\n 'FB15k-237': ('https://raw.githubusercontent.com/MichSchli/'\n 'RelationPrediction/master/data/FB-Toutanova')\n }\n\n def __init__(self, root: str, name: str,\n transform: Optional[Callable] = None,\n pre_transform: Optional[Callable] = None):\n self.name = name\n assert name in ['FB15k-237']\n super().__init__(root, transform, pre_transform)\n self.data, self.slices = torch.load(self.processed_paths[0])\n\n @property\n def num_relations(self) -> int:\n return int(self.data.edge_type.max()) + 1\n\n @property\n def raw_dir(self) -> str:\n return os.path.join(self.root, self.name, 'raw')\n\n @property\n def processed_dir(self) -> str:\n return os.path.join(self.root, self.name, 'processed')\n\n @property\n def processed_file_names(self) -> str:\n return 'data.pt'\n\n @property\n def raw_file_names(self) -> List[str]:\n return [\n 'entities.dict', 'relations.dict', 'test.txt', 'train.txt',\n 'valid.txt'\n ]\n\n def download(self):\n for file_name in self.raw_file_names:\n download_url(f'{self.urls[self.name]}/{file_name}', self.raw_dir)\n\n def process(self):\n with open(osp.join(self.raw_dir, 'entities.dict'), 'r') as f:\n lines = [row.split('\\t') for row in f.read().split('\\n')[:-1]]\n entities_dict = {key: int(value) for value, key in lines}\n\n with open(osp.join(self.raw_dir, 'relations.dict'), 'r') as f:\n lines = [row.split('\\t') for row in f.read().split('\\n')[:-1]]\n relations_dict = {key: int(value) for value, key in lines}\n\n kwargs = {}\n for split in ['train', 'valid', 'test']:\n with open(osp.join(self.raw_dir, f'{split}.txt'), 'r') as f:\n lines = [row.split('\\t') for row in f.read().split('\\n')[:-1]]\n src = [entities_dict[row[0]] for row in lines]\n rel = [relations_dict[row[1]] for row in lines]\n dst = [entities_dict[row[2]] for row in lines]\n kwargs[f'{split}_edge_index'] = torch.tensor([src, dst])\n kwargs[f'{split}_edge_type'] = torch.tensor(rel)\n\n # For message passing, we add reverse edges and types to the graph:\n row, col = kwargs['train_edge_index']\n edge_type = kwargs['train_edge_type']\n row, col = torch.cat([row, col], dim=0), torch.cat([col, row], dim=0)\n edge_index = torch.stack([row, col], dim=0)\n edge_type = torch.cat([edge_type, edge_type + len(relations_dict)])\n\n data = Data(num_nodes=len(entities_dict), edge_index=edge_index,\n edge_type=edge_type, **kwargs)\n\n if self.pre_transform is not None:\n data = self.pre_transform(data)\n\n torch.save((self.collate([data])), self.processed_paths[0])\n\n def __repr__(self) -> str:\n return f'{self.name}()'\n", "import glob\nimport os\nimport os.path as osp\nfrom typing import Callable, List, Optional\n\nimport torch\n\nfrom torch_geometric.data import InMemoryDataset, download_url, extract_zip\nfrom torch_geometric.io import read_off\n\n\nclass GeometricShapes(InMemoryDataset):\n r\"\"\"Synthetic dataset of various geometric shapes like cubes, spheres or\n pyramids.\n\n .. note::\n\n Data objects hold mesh faces instead of edge indices.\n To convert the mesh to a graph, use the\n :obj:`torch_geometric.transforms.FaceToEdge` as :obj:`pre_transform`.\n To convert the mesh to a point cloud, use the\n :obj:`torch_geometric.transforms.SamplePoints` as :obj:`transform` to\n sample a fixed number of points on the mesh faces according to their\n face area.\n\n Args:\n root (string): Root directory where the dataset should be saved.\n train (bool, optional): If :obj:`True`, loads the training dataset,\n otherwise the test dataset. (default: :obj:`True`)\n transform (callable, optional): A function/transform that takes in an\n :obj:`torch_geometric.data.Data` object and returns a transformed\n version. The data object will be transformed before every access.\n (default: :obj:`None`)\n pre_transform (callable, optional): A function/transform that takes in\n an :obj:`torch_geometric.data.Data` object and returns a\n transformed version. The data object will be transformed before\n being saved to disk. (default: :obj:`None`)\n pre_filter (callable, optional): A function that takes in an\n :obj:`torch_geometric.data.Data` object and returns a boolean\n value, indicating whether the data object should be included in the\n final dataset. (default: :obj:`None`)\n \"\"\"\n\n url = 'https://github.com/Yannick-S/geometric_shapes/raw/master/raw.zip'\n\n def __init__(self, root: str, train: bool = True,\n transform: Optional[Callable] = None,\n pre_transform: Optional[Callable] = None,\n pre_filter: Optional[Callable] = None):\n super().__init__(root, transform, pre_transform, pre_filter)\n path = self.processed_paths[0] if train else self.processed_paths[1]\n self.data, self.slices = torch.load(path)\n\n @property\n def raw_file_names(self) -> str:\n return '2d_circle'\n\n @property\n def processed_file_names(self) -> List[str]:\n return ['training.pt', 'test.pt']\n\n def download(self):\n path = download_url(self.url, self.root)\n extract_zip(path, self.root)\n os.unlink(path)\n\n def process(self):\n torch.save(self.process_set('train'), self.processed_paths[0])\n torch.save(self.process_set('test'), self.processed_paths[1])\n\n def process_set(self, dataset: str):\n categories = glob.glob(osp.join(self.raw_dir, '*', ''))\n categories = sorted([x.split(os.sep)[-2] for x in categories])\n\n data_list = []\n for target, category in enumerate(categories):\n folder = osp.join(self.raw_dir, category, dataset)\n paths = glob.glob(f'{folder}/*.off')\n for path in paths:\n data = read_off(path)\n data.pos = data.pos - data.pos.mean(dim=0, keepdim=True)\n data.y = torch.tensor([target])\n data_list.append(data)\n\n if self.pre_filter is not None:\n data_list = [d for d in data_list if self.pre_filter(d)]\n\n if self.pre_transform is not None:\n data_list = [self.pre_transform(d) for d in data_list]\n\n return self.collate(data_list)\n" ]
[ [ "torch.cuda.synchronize", "torch.cuda.is_available", "torch.tensor" ], [ "torch.nn.BatchNorm1d", "torch.cuda.device_count", "torch.nn.functional.cross_entropy" ], [ "torch.randn", "torch.ones" ], [ "torch.randn" ], [ "torch.randn", "torch.tensor" ], [ "torch.randint", "torch.cat", "torch.manual_seed", "torch.tensor", "torch.rand", "torch.arange", "torch.stack", "torch.allclose", "numpy.isin" ], [ "torch.tensor", "torch.load" ], [ "torch.tensor", "torch.load" ], [ "torch.jit.script", "torch.randn", "torch.manual_seed", "torch.tensor", "torch.rand" ], [ "torch.nn.Linear", "torch.nn.ModuleList", "torch.nn.functional.log_softmax", "torch.nn.functional.dropout" ], [ "torch.Tensor", "torch.nn.ParameterDict", "torch.nn.ModuleDict", "torch.nn.functional.gelu", "torch.stack" ], [ "torch.Tensor", "torch.nn.functional.dropout", "torch.cat", "torch.nn.ModuleList", "torch.nn.Linear", "torch.nn.functional.leaky_relu_", "torch.nn.GRUCell", "torch.stack" ], [ "torch.tensor", "torch.randn", "torch.randint", "torch.arange" ], [ "torch.randint", "torch.ones", "torch.cat", "torch.tensor", "torch.stack" ], [ "torch.nn.PReLU", "torch.nn.ELU", "torch.nn.SELU", "torch.nn.LeakyReLU", "torch.nn.ReLU" ], [ "torch.nn.BatchNorm1d", "torch.nn.functional.dropout", "torch.cat", "torch.nn.ModuleList", "torch.nn.BCEWithLogitsLoss", "torch.no_grad", "torch.cuda.is_available", "sklearn.metrics.f1_score" ], [ "torch.stack", "torch.tensor", "torch.cat", "torch.load" ], [ "torch.tensor", "torch.load" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
sandhi-artha/solaris
[ "230a58f94f300062ee880d43920d218edf3321c4", "230a58f94f300062ee880d43920d218edf3321c4" ]
[ "solaris/nets/infer.py", "solaris/utils/geo.py" ]
[ "import os\nimport torch\nfrom osgeo import gdal\nimport numpy as np\nfrom warnings import warn\nfrom .model_io import get_model\nfrom .transform import process_aug_dict\nfrom .datagen import InferenceTiler\nfrom ..raster.image import stitch_images, create_multiband_geotiff\nfrom ..utils.core import get_data_paths\n\n\nclass Inferer(object):\n \"\"\"Object for training `solaris` models using PyTorch or Keras.\"\"\"\n\n def __init__(self, config, custom_model_dict=None):\n self.config = config\n self.batch_size = self.config['batch_size']\n self.framework = self.config['nn_framework']\n self.model_name = self.config['model_name']\n # check if the model was trained as part of the same pipeline; if so,\n # use the output from that. If not, use the pre-trained model directly.\n if self.config['train']:\n warn('Because the configuration specifies both training and '\n 'inference, solaris is switching the model weights path '\n 'to the training output path.')\n self.model_path = self.config['training']['model_dest_path']\n if custom_model_dict is not None:\n custom_model_dict['weight_path'] = self.config[\n 'training']['model_dest_path']\n else:\n self.model_path = self.config.get('model_path', None)\n self.model = get_model(self.model_name, self.framework,\n self.model_path, pretrained=True,\n custom_model_dict=custom_model_dict)\n self.window_step_x = self.config['inference'].get('window_step_size_x',\n None)\n self.window_step_y = self.config['inference'].get('window_step_size_y',\n None)\n if self.window_step_x is None:\n self.window_step_x = self.config['data_specs']['width']\n if self.window_step_y is None:\n self.window_step_y = self.config['data_specs']['height']\n self.stitching_method = self.config['inference'].get(\n 'stitching_method', 'average')\n self.output_dir = self.config['inference']['output_dir']\n if not os.path.isdir(self.output_dir):\n os.makedirs(self.output_dir)\n\n def __call__(self, infer_df=None):\n \"\"\"Run inference.\n Arguments\n ---------\n infer_df : :class:`pandas.DataFrame` or `str`\n A :class:`pandas.DataFrame` with a column, ``'image'``, specifying\n paths to images for inference. Alternatively, `infer_df` can be a\n path to a CSV file containing the same information. Defaults to\n ``None``, in which case the file path specified in the Inferer's\n configuration dict is used.\n \"\"\"\n\n if infer_df is None:\n infer_df = get_infer_df(self.config)\n\n inf_tiler = InferenceTiler(\n self.framework,\n width=self.config['data_specs']['width'],\n height=self.config['data_specs']['height'],\n x_step=self.window_step_x,\n y_step=self.window_step_y,\n augmentations=process_aug_dict(\n self.config['inference_augmentation']))\n for idx, im_path in enumerate(infer_df['image']):\n temp_im = gdal.Open(im_path)\n proj = temp_im.GetProjection()\n gt = temp_im.GetGeoTransform()\n inf_input, idx_refs, (\n src_im_height, src_im_width) = inf_tiler(im_path)\n\n if self.framework == 'keras':\n subarr_preds = self.model.predict(inf_input,\n batch_size=self.batch_size)\n\n elif self.framework in ['torch', 'pytorch']:\n with torch.no_grad():\n self.model.eval()\n if torch.cuda.is_available():\n device = torch.device('cuda')\n self.model = self.model.cuda()\n else:\n device = torch.device('cpu')\n inf_input = torch.from_numpy(inf_input).float().to(device)\n # add additional input data, if applicable\n if self.config['data_specs'].get('additional_inputs',\n None) is not None:\n inf_input = [inf_input]\n for i in self.config['data_specs']['additional_inputs']:\n inf_input.append(\n infer_df[i].iloc[idx].to(device))\n\n subarr_preds = self.model(inf_input)\n subarr_preds = subarr_preds.cpu().data.numpy()\n stitched_result = stitch_images(subarr_preds,\n idx_refs=idx_refs,\n out_width=src_im_width,\n out_height=src_im_height,\n method=self.stitching_method)\n stitched_result = np.swapaxes(stitched_result, 1, 0)\n stitched_result = np.swapaxes(stitched_result, 2, 0)\n create_multiband_geotiff(stitched_result,\n os.path.join(self.output_dir,\n os.path.split(im_path)[1]),\n proj=proj, geo=gt, nodata=np.nan,\n out_format=gdal.GDT_Float32)\n\n\ndef get_infer_df(config):\n \"\"\"Get the inference df based on the contents of ``config`` .\n This function uses the logic described in the documentation for the config\n file to determine where to find images to be used for inference.\n See the docs and the comments in solaris/data/config_skeleton.yml for\n details.\n Arguments\n ---------\n config : dict\n The loaded configuration dict for model training and/or inference.\n Returns\n -------\n infer_df : :class:`dict`\n :class:`dict` containing at least one column: ``'image'`` . The values\n in this column correspond to the path to filenames to perform inference\n on.\n \"\"\"\n\n infer_df = get_data_paths(config['inference_data_csv'], infer=True)\n return infer_df\n", "import os\nfrom .core import _check_df_load, _check_gdf_load, _check_rasterio_im_load\nfrom .core import _check_geom, _check_crs\nimport numpy as np\nimport pandas as pd\nimport geopandas as gpd\nfrom affine import Affine\nimport rasterio\nfrom rasterio.warp import calculate_default_transform, Resampling\nfrom rasterio.warp import transform_bounds\nfrom shapely.affinity import affine_transform\nfrom shapely.wkt import loads\nfrom shapely.geometry import Point, Polygon, LineString\nfrom shapely.geometry import MultiLineString, MultiPolygon, mapping, box, shape\nfrom shapely.geometry.collection import GeometryCollection\nfrom shapely.ops import cascaded_union\nfrom osgeo import osr\nfrom osgeo import gdal\nimport json\nfrom warnings import warn\nimport sys\n\n\ndef reproject(input_object, input_crs=None,\n target_crs=None, target_object=None, dest_path=None,\n resampling_method='cubic'):\n \"\"\"Reproject a dataset (df, gdf, or image) to a new coordinate system.\n\n This function takes a georegistered image or a dataset of vector geometries\n and converts them to a new coordinate reference system. If no target CRS\n is provided, the data will be converted to the appropriate UTM zone by\n default. To convert a pixel-coordinate dataset to geographic coordinates or\n vice versa, use :func:`solaris.vector.polygon.georegister_px_df` or\n :func:`solaris.vector.polygon.geojson_to_px_gdf` instead.\n\n Arguments\n ---------\n input_object : `str` or :class:`rasterio.DatasetReader` or :class:`gdal.Dataset` or :class:`geopandas.GeoDataFrame`\n An object to transform to a new CRS. If a string, it must be a path\n to a georegistered image or vector dataset (e.g. a .GeoJSON). If the\n object itself does not contain georeferencing information, the\n coordinate reference system can be provided with `input_crs`.\n input_crs : int, optional\n The EPSG code integer for the input data's CRS. If provided and a CRS\n is also associated with `input_object`, this argument's value has\n precedence.\n target_crs : int, optional\n The EPSG code for the output projection. If values are not provided\n for this argument or `target_object`, the input data will be\n re-projected into the appropriate UTM zone. If both `target_crs` and\n `target_object` are provided, `target_crs` takes precedence (and a\n warning is raised).\n target_object : str, optional\n An object in the desired destination CRS. If neither this argument nor\n `target_crs` is provided, the input will be projected into the\n appropriate UTM zone. `target_crs` takes precedence if both it and\n `target_object` are provided.\n dest_path : str, optional\n The path to save the output to (if desired). This argument is only\n required if the input is a :class:`gdal.Dataset`; otherwise, it is\n optional.\n resampling_method : str, optional\n The resampling method to use during reprojection of raster data. **Only\n has an effect if the input is a :class:`rasterio.DatasetReader` !**\n Possible values are\n ``['cubic' (default), 'bilinear', 'nearest', 'average']``.\n\n Returns\n -------\n output : :class:`rasterio.DatasetReader` or :class:`gdal.Dataset` or :class:`geopandas.GeoDataFrame`\n An output in the same format as `input_object`, but reprojected\n into the destination CRS.\n \"\"\"\n input_data, input_type = _parse_geo_data(input_object)\n if input_crs is None:\n input_crs = _check_crs(get_crs(input_data))\n else:\n input_crs = _check_crs(input_crs)\n if target_object is not None:\n target_data, _ = _parse_geo_data(target_object)\n else:\n target_data = None\n # get CRS from target_object if it's not provided\n if target_crs is None and target_data is not None:\n target_crs = get_crs(target_data)\n\n if target_crs is not None:\n target_crs = _check_crs(target_crs)\n output = _reproject(input_data, input_type, input_crs, target_crs,\n dest_path, resampling_method)\n else:\n output = reproject_to_utm(input_data, input_type, input_crs,\n dest_path, resampling_method)\n return output\n\n\ndef _reproject(input_data, input_type, input_crs, target_crs, dest_path,\n resampling_method='bicubic'):\n\n input_crs = _check_crs(input_crs)\n target_crs = _check_crs(target_crs)\n if input_type == 'vector':\n output = input_data.to_crs(target_crs)\n if dest_path is not None:\n output.to_file(dest_path, driver='GeoJSON')\n\n elif input_type == 'raster':\n\n if isinstance(input_data, rasterio.DatasetReader):\n transform, width, height = calculate_default_transform(\n input_crs.to_wkt(\"WKT1_GDAL\"), target_crs.to_wkt(\"WKT1_GDAL\"),\n input_data.width, input_data.height, *input_data.bounds\n )\n kwargs = input_data.meta.copy()\n kwargs.update({'crs': target_crs.to_wkt(\"WKT1_GDAL\"),\n 'transform': transform,\n 'width': width,\n 'height': height})\n\n if dest_path is not None:\n with rasterio.open(dest_path, 'w', **kwargs) as dst:\n for band_idx in range(1, input_data.count + 1):\n rasterio.warp.reproject(\n source=rasterio.band(input_data, band_idx),\n destination=rasterio.band(dst, band_idx),\n src_transform=input_data.transform,\n src_crs=input_data.crs,\n dst_transform=transform,\n dst_crs=target_crs.to_wkt(\"WKT1_GDAL\"),\n resampling=getattr(Resampling, resampling_method)\n )\n output = rasterio.open(dest_path)\n input_data.close()\n\n else:\n output = np.zeros(shape=(height, width, input_data.count))\n for band_idx in range(1, input_data.count + 1):\n rasterio.warp.reproject(\n source=rasterio.band(input_data, band_idx),\n destination=output[:, :, band_idx-1],\n src_transform=input_data.transform,\n src_crs=input_data.crs,\n dst_transform=transform,\n dst_crs=target_crs,\n resampling=getattr(Resampling, resampling_method)\n )\n\n elif isinstance(input_data, gdal.Dataset):\n if dest_path is not None:\n gdal.Warp(dest_path, input_data,\n dstSRS='EPSG:' + str(target_crs.to_epsg()))\n output = gdal.Open(dest_path)\n else:\n raise ValueError('An output path must be provided for '\n 'reprojecting GDAL datasets.')\n return output\n\n\ndef reproject_to_utm(input_data, input_type, input_crs=None, dest_path=None,\n resampling_method='bicubic'):\n \"\"\"Convert an input to a UTM CRS (after determining the correct UTM zone).\n\n \"\"\"\n if input_crs is None:\n input_crs = get_crs(input_data)\n if input_crs is None:\n raise ValueError('An input CRS must be provided by input_data or'\n ' input_crs.')\n input_crs = _check_crs(input_crs)\n\n bounds = get_bounds(input_data, crs=_check_crs(4326)) # need in wkt84 for UTM zone\n midpoint = [(bounds[1] + bounds[3])/2., (bounds[0] + bounds[2])/2.]\n utm_epsg = latlon_to_utm_epsg(*midpoint)\n\n output = _reproject(input_data, input_type=input_type, input_crs=input_crs,\n target_crs=utm_epsg, dest_path=dest_path,\n resampling_method=resampling_method)\n # cleanup\n if os.path.isfile('tmp'):\n os.remove('tmp')\n\n return output\n\n\ndef get_bounds(geo_obj, crs=None):\n \"\"\"Get the ``[left, bottom, right, top]`` bounds in any CRS.\n\n Arguments\n ---------\n geo_obj : a georeferenced raster or vector dataset.\n crs : int, optional\n The EPSG code (or other CRS format supported by rasterio.warp)\n for the CRS the bounds should be returned in. If not provided,\n the bounds will be returned in the same crs as `geo_obj`.\n\n Returns\n -------\n bounds : list\n ``[left, bottom, right, top]`` bounds in the input crs (if `crs` is\n ``None``) or in `crs` if it was provided.\n \"\"\"\n input_data, input_type = _parse_geo_data(geo_obj)\n if input_type == 'vector':\n bounds = list(input_data.geometry.total_bounds)\n elif input_type == 'raster':\n if isinstance(input_data, rasterio.DatasetReader):\n bounds = list(input_data.bounds)\n elif isinstance(input_data, gdal.Dataset):\n input_gt = input_data.GetGeoTransform()\n min_x = input_gt[0]\n max_x = min_x + input_gt[1]*input_data.RasterXSize\n max_y = input_gt[3]\n min_y = max_y + input_gt[5]*input_data.RasterYSize\n\n bounds = [min_x, min_y, max_x, max_y]\n\n if crs is not None:\n crs = _check_crs(crs)\n src_crs = get_crs(input_data)\n # transform bounds to desired CRS\n bounds = transform_bounds(src_crs.to_wkt(\"WKT1_GDAL\"),\n crs.to_wkt(\"WKT1_GDAL\"), *bounds)\n\n return bounds\n\n\ndef get_crs(obj):\n \"\"\"Get a coordinate reference system from any georegistered object.\"\"\"\n if isinstance(obj, gpd.GeoDataFrame):\n return _check_crs(obj.crs)\n elif isinstance(obj, rasterio.DatasetReader):\n return _check_crs(obj.crs)\n elif isinstance(obj, gdal.Dataset):\n # rawr\n return _check_crs(int(osr.SpatialReference(wkt=obj.GetProjection()).GetAttrValue(\n 'AUTHORITY', 1)))\n else:\n raise TypeError(\"solaris doesn't know how to extract a crs from an \"\n \"object of type {}\".format(type(obj)))\n\n\ndef _parse_geo_data(input):\n if isinstance(input, str):\n if input.lower().endswith('json') or input.lower().endswith('csv'):\n input_type = 'vector'\n input_data = _check_df_load(input)\n elif input.lower().endswith('tif') or input.lower().endswith('tiff'):\n input_type = 'raster'\n input_data = _check_rasterio_im_load(input)\n else:\n input_data = input\n if isinstance(input_data, pd.DataFrame):\n input_type = 'vector'\n elif isinstance(\n input_data, rasterio.DatasetReader\n ) or isinstance(\n input_data, gdal.Dataset\n ):\n input_type = 'raster'\n else:\n raise ValueError('The input format {} is not compatible with '\n 'solaris.'.format(type(input)))\n return input_data, input_type\n\n\ndef reproject_geometry(input_geom, input_crs=None, target_crs=None,\n affine_obj=None):\n \"\"\"Reproject a geometry or coordinate into a new CRS.\n\n Arguments\n ---------\n input_geom : `str`, `list`, or `Shapely <https://shapely.readthedocs.io>`_ geometry\n A geometry object to re-project. This can be a 2-member ``list``, in\n which case `input_geom` is assumed to coorespond to ``[x, y]``\n coordinates in `input_crs`. It can also be a Shapely geometry object or\n a wkt string.\n input_crs : int, optional\n The coordinate reference system for `input_geom`'s coordinates, as an\n EPSG :class:`int`. Required unless `affine_transform` is provided.\n target_crs : int, optional\n The target coordinate reference system to re-project the geometry into.\n If not provided, the appropriate UTM zone will be selected by default,\n unless `affine_transform` is provided (and therefore CRSs are ignored.)\n affine_transform : :class:`affine.Affine`, optional\n An :class:`affine.Affine` object (or a ``[a, b, c, d, e, f]`` list to\n convert to that format) to use for transformation. Has no effect unless\n `input_crs` **and** `target_crs` are not provided.\n\n Returns\n -------\n output_geom : Shapely geometry\n A shapely geometry object:\n - in `target_crs`, if one was provided;\n - in the appropriate UTM zone, if `input_crs` was provided and\n `target_crs` was not;\n - with `affine_transform` applied to it if neither `input_crs` nor\n `target_crs` were provided.\n \"\"\"\n input_geom = _check_geom(input_geom)\n\n if input_crs is not None:\n input_crs = _check_crs(input_crs)\n if target_crs is None:\n geom = reproject_geometry(input_geom, input_crs,\n target_crs=_check_crs(4326))\n target_crs = latlon_to_utm_epsg(geom.centroid.y, geom.centroid.x)\n target_crs = _check_crs(target_crs)\n gdf = gpd.GeoDataFrame(geometry=[input_geom], crs=input_crs.to_wkt())\n # create a new instance of the same geometry class as above with the\n # new coordinates\n output_geom = gdf.to_crs(target_crs.to_wkt()).iloc[0]['geometry']\n\n else:\n if affine_obj is None:\n raise ValueError('If an input CRS is not provided, '\n 'affine_transform is required to complete the '\n 'transformation.')\n elif isinstance(affine_obj, Affine):\n affine_obj = affine_to_list(affine_obj)\n\n output_geom = affine_transform(input_geom, affine_obj)\n\n return output_geom\n\n\ndef gdf_get_projection_unit(vector_file):\n \"\"\"Get the projection unit for a vector_file or gdf.\n\n Arguments\n ---------\n vector_file : :py:class:`geopandas.GeoDataFrame` or geojson/shapefile\n A vector file or gdf with georeferencing\n\n Notes\n -----\n If vector file is already in UTM coords, the projection WKT is complex:\n https://www.spatialreference.org/ref/epsg/wgs-84-utm-zone-11n/html/\n In this case, return the second instance of 'UNIT'.\n\n Returns\n -------\n unit : String\n The unit i.e. meter, metre, or degree, of the projection\n \"\"\"\n c = _check_gdf_load(vector_file).crs\n return get_projection_unit(c)\n\n\ndef raster_get_projection_unit(image):\n \"\"\"Get the projection unit for an image.\n\n Arguments\n ---------\n image : raster image, GeoTIFF or other format\n A raster file with georeferencing\n\n Notes\n -----\n If raster is already in UTM coords, the projection WKT is complex:\n https://www.spatialreference.org/ref/epsg/wgs-84-utm-zone-11n/html/\n In this case, return the second instance of 'UNIT'.\n\n Returns\n -------\n unit : String\n The unit i.e. meters or degrees, of the projection\n \"\"\"\n c = _check_rasterio_im_load(image).crs\n return get_projection_unit(c)\n\n\ndef get_projection_unit(crs):\n \"\"\"Get the units of a specific SRS.\n\n Arguments\n ---------\n crs : :class:`pyproj.crs.CRS`, :class:`rasterio.crs.CRS`, `str`, or `int`\n The coordinate reference system to retrieve a unit for.\n\n Returns\n -------\n unit : str\n The string-formatted unit.\n \"\"\"\n crs = _check_crs(crs)\n unit = crs.axis_info[0].unit_name\n\n return unit\n\n\n\ndef list_to_affine(xform_mat):\n \"\"\"Create an Affine from a list or array-formatted [a, b, d, e, xoff, yoff]\n\n Arguments\n ---------\n xform_mat : `list` or :class:`numpy.array`\n A `list` of values to convert to an affine object.\n\n Returns\n -------\n aff : :class:`affine.Affine`\n An affine transformation object.\n \"\"\"\n # first make sure it's not in gdal order\n if len(xform_mat) > 6:\n xform_mat = xform_mat[0:6]\n if rasterio.transform.tastes_like_gdal(xform_mat):\n return Affine.from_gdal(*xform_mat)\n else:\n return Affine(*xform_mat)\n\n\ndef affine_to_list(affine_obj):\n \"\"\"Convert a :class:`affine.Affine` instance to a list for Shapely.\"\"\"\n return [affine_obj.a, affine_obj.b,\n affine_obj.d, affine_obj.e,\n affine_obj.xoff, affine_obj.yoff]\n\n\ndef geometries_internal_intersection(polygons):\n \"\"\"Get the intersection geometries between all geometries in a set.\n\n Arguments\n ---------\n polygons : `list`-like\n A `list`-like containing geometries. These will be placed in a\n :class:`geopandas.GeoSeries` object to take advantage of `rtree`\n spatial indexing.\n\n Returns\n -------\n intersect_list\n A `list` of geometric intersections between polygons in `polygons`, in\n the same CRS as the input.\n \"\"\"\n # convert `polygons` to geoseries and get spatialindex\n # TODO: Implement test to see if `polygon` items are actual polygons or\n # WKT strings\n if isinstance(polygons, gpd.GeoSeries):\n gs = polygons\n else:\n gs = gpd.GeoSeries(polygons).reset_index(drop=True)\n sindex = gs.sindex\n gs_bboxes = gs.apply(lambda x: x.bounds)\n\n # find indices of polygons that overlap in gs\n intersect_lists = gs_bboxes.apply(lambda x: list(sindex.intersection(x)))\n intersect_lists = intersect_lists.dropna()\n # drop all objects that only have self-intersects\n # first, filter down to the ones that have _some_ intersection with others\n intersect_lists = intersect_lists[\n intersect_lists.apply(lambda x: len(x) > 1)]\n if len(intersect_lists) == 0: # if there are no real intersections\n return GeometryCollection() # same result as failed union below\n # the below is a royal pain to follow. what it does is create a dataframe\n # with two columns: 'gs_idx' and 'intersectors'. 'gs_idx' corresponds to\n # a polygon's original index in gs, and 'intersectors' gives a list of\n # gs indices for polygons that intersect with its bbox.\n intersect_lists.name = 'intersectors'\n intersect_lists.index.name = 'gs_idx'\n intersect_lists = intersect_lists.reset_index()\n # first, we get rid of self-intersection indices in 'intersectors':\n intersect_lists['intersectors'] = intersect_lists.apply(\n lambda x: [i for i in x['intersectors'] if i != x['gs_idx']],\n axis=1)\n # for each row, we next create a union of the polygons in 'intersectors',\n # and find the intersection of that with the polygon at gs[gs_idx]. this\n # (Multi)Polygon output corresponds to all of the intersections for the\n # polygon at gs[gs_idx]. we add that to a list of intersections stored in\n # output_polys.\n output_polys = []\n _ = intersect_lists.apply(lambda x: output_polys.append(\n gs[x['gs_idx']].intersection(cascaded_union(gs[x['intersectors']]))\n ), axis=1)\n # we then generate the union of all of these intersections and return it.\n return cascaded_union(output_polys)\n\n\ndef split_multi_geometries(gdf, obj_id_col=None, group_col=None,\n geom_col='geometry'):\n \"\"\"Split apart MultiPolygon or MultiLineString geometries.\n\n Arguments\n ---------\n gdf : :class:`geopandas.GeoDataFrame` or `str`\n A :class:`geopandas.GeoDataFrame` or path to a geojson containing\n geometries.\n obj_id_col : str, optional\n If one exists, the name of the column that uniquely identifies each\n geometry (e.g. the ``\"BuildingId\"`` column in many SpaceNet datasets).\n This will be tracked so multiple objects don't get produced with\n the same ID. Note that object ID column will be renumbered on output.\n If passed, `group_col` must also be provided.\n group_col : str, optional\n A column to identify groups for sequential numbering (for example,\n ``'ImageId'`` for sequential number of ``'BuildingId'``). Must be\n provided if `obj_id_col` is passed.\n geom_col : str, optional\n The name of the column in `gdf` that corresponds to geometry. Defaults\n to ``'geometry'``.\n\n Returns\n -------\n :class:`geopandas.GeoDataFrame`\n A `geopandas.GeoDataFrame` that's identical to the input, except with\n the multipolygons split into separate rows, and the object ID column\n renumbered (if one exists).\n\n \"\"\"\n if obj_id_col and not group_col:\n raise ValueError('group_col must be provided if obj_id_col is used.')\n gdf2 = _check_gdf_load(gdf)\n # drop duplicate columns (happens if loading a csv with geopandas)\n gdf2 = gdf2.loc[:, ~gdf2.columns.duplicated()]\n if len(gdf2) == 0:\n return gdf2\n # check if the values in gdf2[geometry] are polygons; if strings, do loads\n if isinstance(gdf2[geom_col].iloc[0], str):\n gdf2[geom_col] = gdf2[geom_col].apply(loads)\n split_geoms_gdf = pd.concat(\n gdf2.apply(_split_multigeom_row, axis=1, geom_col=geom_col).tolist())\n gdf2 = gdf2.drop(index=split_geoms_gdf.index.unique()) # remove multipolygons\n gdf2 = gpd.GeoDataFrame(pd.concat([gdf2, split_geoms_gdf],\n ignore_index=True), crs=gdf2.crs)\n\n if obj_id_col:\n gdf2[obj_id_col] = gdf2.groupby(group_col).cumcount()+1\n\n return gdf2\n\n\ndef get_subgraph(G, node_subset):\n \"\"\"\n Create a subgraph from G. Code almost directly copied from osmnx.\n\n Arguments\n ---------\n G : :class:`networkx.MultiDiGraph`\n A graph to be subsetted\n node_subset : `list`-like\n The subset of nodes to induce a subgraph of `G`\n\n Returns\n -------\n G2 : :class:`networkx`.MultiDiGraph\n The subgraph of G that includes node_subset\n \"\"\"\n\n node_subset = set(node_subset)\n\n # copy nodes into new graph\n G2 = G.fresh_copy()\n G2.add_nodes_from((n, G.nodes[n]) for n in node_subset)\n\n # copy edges to new graph, including parallel edges\n if G2.is_multigraph:\n G2.add_edges_from(\n (n, nbr, key, d)\n for n, nbrs in G.adj.items() if n in node_subset\n for nbr, keydict in nbrs.items() if nbr in node_subset\n for key, d in keydict.items())\n else:\n G2.add_edges_from(\n (n, nbr, d)\n for n, nbrs in G.adj.items() if n in node_subset\n for nbr, d in nbrs.items() if nbr in node_subset)\n\n # update graph attribute dict, and return graph\n G2.graph.update(G.graph)\n return G2\n\n\ndef _split_multigeom_row(gdf_row, geom_col):\n new_rows = []\n if isinstance(gdf_row[geom_col], MultiPolygon) \\\n or isinstance(gdf_row[geom_col], MultiLineString):\n new_polys = _split_multigeom(gdf_row[geom_col])\n for poly in new_polys:\n row_w_poly = gdf_row.copy()\n row_w_poly[geom_col] = poly\n new_rows.append(row_w_poly)\n return pd.DataFrame(new_rows)\n\n\ndef _split_multigeom(multigeom):\n return list(multigeom)\n\n\ndef _reduce_geom_precision(geom, precision=2):\n geojson = mapping(geom)\n geojson['coordinates'] = np.round(np.array(geojson['coordinates']),\n precision)\n return shape(geojson)\n\n\ndef latlon_to_utm_epsg(latitude, longitude, return_proj4=False):\n \"\"\"Get the WGS84 UTM EPSG code based on a latitude and longitude value.\n\n Arguments\n ---------\n latitude : numeric\n The latitude value for the coordinate.\n longitude : numeric\n The longitude value for the coordinate.\n return_proj4 : bool, optional\n Should the proj4 string be returned as well as the EPSG code? Defaults\n to no (``False``)`\n\n Returns\n -------\n epsg : int\n The integer corresponding to the EPSG code for the relevant UTM zone\n in WGS 84.\n proj4 : str\n The proj4 string for the CRS. Only returned if ``return_proj4=True``.\n \"\"\"\n zone_number, zone_letter = _latlon_to_utm_zone(latitude, longitude)\n\n if return_proj4:\n if zone_letter == 'N':\n direction_indicator = '+north'\n elif zone_letter == 'S':\n direction_indicator = '+south'\n proj = \"+proj=utm +zone={} {}\".format(zone_number,\n direction_indicator)\n proj += \" +ellps=WGS84 +datum=WGS84 +units=m +no_defs\"\n\n if zone_letter == 'N':\n epsg = 32600 + zone_number\n elif zone_letter == 'S':\n epsg = 32700 + zone_number\n\n return (epsg, proj) if return_proj4 else epsg\n\n\ndef _latlon_to_utm_zone(latitude, longitude, ns_only=True):\n \"\"\"Convert latitude and longitude to a UTM zone ID.\n\n This function modified from\n `the python utm library <https://github.com/Turbo87/utm>`_.\n\n Arguments\n ---------\n latitude : numeric or :class:`numpy.ndarray`\n The latitude value of a coordinate.\n longitude : numeric or :class:`numpy.ndarray`\n The longitude value of a coordinate.\n ns_only : bool, optional\n Should the full list of possible zone numbers be used or just the N/S\n options? Defaults to N/S only (``True``).\n\n Returns\n -------\n zone_number : int\n The numeric portion of the UTM zone ID.\n zone_letter : str\n The string portion of the UTM zone ID. Note that by default this\n function uses only the N/S designation rather than the full range of\n possible letters.\n \"\"\"\n\n # If the input is a numpy array, just use the first element\n # User responsibility to make sure that all points are in one zone\n if isinstance(latitude, np.ndarray):\n latitude = latitude.flat[0]\n if isinstance(longitude, np.ndarray):\n longitude = longitude.flat[0]\n\n utm_val = None\n\n if 56 <= latitude < 64 and 3 <= longitude < 12:\n utm_val = 32\n\n elif 72 <= latitude <= 84 and longitude >= 0:\n if longitude < 9:\n utm_val = 31\n elif longitude < 21:\n utm_val = 33\n elif longitude < 33:\n utm_val = 35\n elif longitude < 42:\n utm_val = 37\n\n if latitude < 0:\n zone_letter = \"S\"\n else:\n zone_letter = \"N\"\n\n if not -80 <= latitude <= 84:\n warn('Warning: UTM projections not recommended for '\n 'latitude {}'.format(latitude))\n if utm_val is None:\n utm_val = int((longitude + 180) / 6) + 1\n\n return utm_val, zone_letter\n\n\ndef _get_coords(geom):\n \"\"\"Get coordinates from various shapely geometry types.\"\"\"\n if isinstance(geom, Point) or isinstance(geom, LineString):\n return geom.coords.xy\n elif isinstance(geom, Polygon):\n return geom.exterior.coords.xy\n\n\ndef bbox_corners_to_coco(bbox):\n \"\"\"Convert bbox from ``[minx, miny, maxx, maxy]`` to coco format.\n\n COCO formats bounding boxes as ``[minx, miny, width, height]``.\n\n Arguments\n ---------\n bbox : :class:`list`-like of numerics\n A 4-element list of the form ``[minx, miny, maxx, maxy]``.\n\n Returns\n -------\n coco_bbox : list\n ``[minx, miny, width, height]`` shape.\n \"\"\"\n\n return [bbox[0], bbox[1], bbox[2]-bbox[0], bbox[3]-bbox[1]]\n\n\ndef polygon_to_coco(polygon):\n \"\"\"Convert a geometry to COCO polygon format.\"\"\"\n if isinstance(polygon, Polygon):\n coords = polygon.exterior.coords.xy\n elif isinstance(polygon, str): # assume it's WKT\n coords = loads(polygon).exterior.coords.xy\n elif isinstance(polygon, MultiPolygon):\n raise ValueError(\"You have MultiPolygon types in your label df. Remove, explode, or fix these to be Polygon geometry types.\")\n else:\n raise ValueError('polygon must be a shapely geometry or WKT.')\n # zip together x,y pairs\n coords = list(zip(coords[0], coords[1]))\n coords = [item for coordinate in coords for item in coordinate]\n\n return coords\n\n\ndef split_geom(geometry, tile_size, resolution=None,\n use_projection_units=False, src_img=None):\n \"\"\"Splits a vector into approximately equal sized tiles.\n\n Adapted from @lossyrob's Gist__\n\n .. Gist: https://gist.github.com/lossyrob/7b620e6d2193cb55fbd0bffacf27f7f2\n\n The more complex the geometry, the slower this will run, but geometrys with\n around 10000 coordinates run in a few seconds time. You can simplify\n geometries with shapely.geometry.Polygon.simplify if necessary.\n\n Arguments\n ---------\n geometry : str, optional\n A shapely.geometry.Polygon, path to a single feature geojson,\n or list-like bounding box shaped like [left, bottom, right, top].\n The geometry must be in the projection coordinates corresponding to\n the resolution units.\n tile_size : `tuple` of `int`s\n The size of the input tiles in ``(y, x)`` coordinates. By default,\n this is in pixel units; this can be changed to metric units using the\n `use_metric_size` argument.\n use_projection_units : bool, optional\n Is `tile_size` in pixel units (default) or distance units? To set to distance units\n use ``use_projection_units=True``. If False, resolution must be supplied.\n resolution: `tuple` of `float`s, optional\n (x resolution, y resolution). Used by default if use_metric_size is False.\n Can be acquired from rasterio dataset object's metadata.\n src_img: `str` or `raster`, optional\n A rasterio raster object or path to a geotiff. The bounds of this raster and the geometry will be\n intersected and the result of the intersection will be tiled. Useful in cases where the extent of\n collected labels and source imagery partially overlap. The src_img must have the same projection units\n as the geometry.\n\n Returns\n -------\n tile_bounds : list (containing sublists like [left, bottom, right, top])\n\n \"\"\"\n if isinstance(geometry, str):\n gj = json.loads(open(geometry).read())\n\n features = gj['features']\n if not len(features) == 1:\n print('Feature collection must only contain one feature')\n sys.exit(1)\n\n geometry = shape(features[0]['geometry'])\n\n elif isinstance(geometry, list) or isinstance(geometry, np.ndarray):\n assert len(geometry) == 4\n geometry = box(*geometry)\n\n if use_projection_units is False:\n if resolution is None:\n print(\"Resolution must be specified if use_projection_units is\"\n \" False. Access it from src raster meta.\")\n return\n # convert pixel units to CRS units to use during image tiling.\n # NOTE: This will be imperfect for large AOIs where there isn't\n # a constant relationship between the src CRS units and src pixel\n # units.\n if isinstance(resolution, (float, int)):\n resolution = (resolution, resolution)\n tmp_tile_size = [tile_size[0]*resolution[0],\n tile_size[1]*resolution[1]]\n else:\n tmp_tile_size = tile_size\n \n if src_img is not None:\n src_img = _check_rasterio_im_load(src_img)\n geometry = geometry.intersection(box(*src_img.bounds))\n bounds = geometry.bounds\n else:\n bounds = geometry.bounds\n \n xmin = bounds[0]\n xmax = bounds[2]\n ymin = bounds[1]\n ymax = bounds[3]\n x_extent = xmax - xmin\n y_extent = ymax - ymin\n x_steps = np.ceil(x_extent/tmp_tile_size[1])\n y_steps = np.ceil(y_extent/tmp_tile_size[0])\n x_mins = np.arange(xmin, xmin + tmp_tile_size[1]*x_steps,\n tmp_tile_size[1])\n y_mins = np.arange(ymin, ymin + tmp_tile_size[0]*y_steps,\n tmp_tile_size[0])\n tile_bounds = [\n (i, j, i+tmp_tile_size[1], j+tmp_tile_size[0])\n for i in x_mins for j in y_mins if not geometry.intersection(\n box(*(i, j, i+tmp_tile_size[1], j+tmp_tile_size[0]))).is_empty\n ]\n return tile_bounds\n" ]
[ [ "numpy.swapaxes", "torch.from_numpy", "torch.no_grad", "torch.cuda.is_available", "torch.device" ], [ "pandas.concat", "numpy.arange", "pandas.DataFrame", "numpy.ceil", "numpy.array", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
Akhilesh271988/numpy
[ "1ab7e8fbf90ac4a81d2ffdde7d78ec464dccb02e", "1ab7e8fbf90ac4a81d2ffdde7d78ec464dccb02e" ]
[ "numpy/distutils/__init__.py", "numpy/lib/function_base.py" ]
[ "\"\"\"\nAn enhanced distutils, providing support for Fortran compilers, for BLAS,\nLAPACK and other common libraries for numerical computing, and more.\n\nPublic submodules are::\n\n misc_util\n system_info\n cpu_info\n log\n exec_command\n\nFor details, please see the *Packaging* and *NumPy Distutils User Guide*\nsections of the NumPy Reference Guide.\n\nFor configuring the preference for and location of libraries like BLAS and\nLAPACK, and for setting include paths and similar build options, please see\n``site.cfg.example`` in the root of the NumPy repository or sdist.\n\n\"\"\"\n\nimport warnings\n\n# Must import local ccompiler ASAP in order to get\n# customized CCompiler.spawn effective.\nfrom . import ccompiler\nfrom . import unixccompiler\n\nfrom .npy_pkg_config import *\n\nwarnings.warn(\"\\n\\n\"\n \" `numpy.distutils` is deprecated since NumPy 1.23.0, as a result\\n\"\n \" of the deprecation of `distutils` itself. It will be removed for\\n\"\n \" Python >= 3.12. For older Python versions it will remain present.\\n\"\n \" It is recommended to use `setuptools < 60.0` for those Python versions.\\n\"\n \" For more details, see:\\n\"\n \" https://numpy.org/devdocs/reference/distutils_status_migration.html \\n\\n\",\n DeprecationWarning, stacklevel=2\n)\ndel warnings\n\n# If numpy is installed, add distutils.test()\ntry:\n from . import __config__\n # Normally numpy is installed if the above import works, but an interrupted\n # in-place build could also have left a __config__.py. In that case the\n # next import may still fail, so keep it inside the try block.\n from numpy._pytesttester import PytestTester\n test = PytestTester(__name__)\n del PytestTester\nexcept ImportError:\n pass\n\n\ndef customized_fcompiler(plat=None, compiler=None):\n from numpy.distutils.fcompiler import new_fcompiler\n c = new_fcompiler(plat=plat, compiler=compiler)\n c.customize()\n return c\n\ndef customized_ccompiler(plat=None, compiler=None, verbose=1):\n c = ccompiler.new_compiler(plat=plat, compiler=compiler, verbose=verbose)\n c.customize('')\n return c\n", "import collections.abc\nimport functools\nimport re\nimport sys\nimport warnings\n\nimport numpy as np\nimport numpy.core.numeric as _nx\nfrom numpy.core import transpose\nfrom numpy.core.numeric import (\n ones, zeros_like, arange, concatenate, array, asarray, asanyarray, empty,\n ndarray, take, dot, where, intp, integer, isscalar, absolute\n )\nfrom numpy.core.umath import (\n pi, add, arctan2, frompyfunc, cos, less_equal, sqrt, sin,\n mod, exp, not_equal, subtract\n )\nfrom numpy.core.fromnumeric import (\n ravel, nonzero, partition, mean, any, sum\n )\nfrom numpy.core.numerictypes import typecodes\nfrom numpy.core.overrides import set_module\nfrom numpy.core import overrides\nfrom numpy.core.function_base import add_newdoc\nfrom numpy.lib.twodim_base import diag\nfrom numpy.core.multiarray import (\n _insert, add_docstring, bincount, normalize_axis_index, _monotonicity,\n interp as compiled_interp, interp_complex as compiled_interp_complex\n )\nfrom numpy.core.umath import _add_newdoc_ufunc as add_newdoc_ufunc\n\nimport builtins\n\n# needed in this module for compatibility\nfrom numpy.lib.histograms import histogram, histogramdd # noqa: F401\n\n\narray_function_dispatch = functools.partial(\n overrides.array_function_dispatch, module='numpy')\n\n\n__all__ = [\n 'select', 'piecewise', 'trim_zeros', 'copy', 'iterable', 'percentile',\n 'diff', 'gradient', 'angle', 'unwrap', 'sort_complex', 'disp', 'flip',\n 'rot90', 'extract', 'place', 'vectorize', 'asarray_chkfinite', 'average',\n 'bincount', 'digitize', 'cov', 'corrcoef',\n 'msort', 'median', 'sinc', 'hamming', 'hanning', 'bartlett',\n 'blackman', 'kaiser', 'trapz', 'i0', 'add_newdoc', 'add_docstring',\n 'meshgrid', 'delete', 'insert', 'append', 'interp', 'add_newdoc_ufunc',\n 'quantile'\n ]\n\n# _QuantileMethods is a dictionary listing all the supported methods to\n# compute quantile/percentile.\n#\n# Below virtual_index refer to the index of the element where the percentile\n# would be found in the sorted sample.\n# When the sample contains exactly the percentile wanted, the virtual_index is\n# an integer to the index of this element.\n# When the percentile wanted is in between two elements, the virtual_index\n# is made of a integer part (a.k.a 'i' or 'left') and a fractional part\n# (a.k.a 'g' or 'gamma')\n#\n# Each method in _QuantileMethods has two properties\n# get_virtual_index : Callable\n# The function used to compute the virtual_index.\n# fix_gamma : Callable\n# A function used for discret methods to force the index to a specific value.\n_QuantileMethods = dict(\n # --- HYNDMAN and FAN METHODS\n # Discrete methods\n inverted_cdf=dict(\n get_virtual_index=lambda n, quantiles: _inverted_cdf(n, quantiles),\n fix_gamma=lambda gamma, _: gamma, # should never be called\n ),\n averaged_inverted_cdf=dict(\n get_virtual_index=lambda n, quantiles: (n * quantiles) - 1,\n fix_gamma=lambda gamma, _: _get_gamma_mask(\n shape=gamma.shape,\n default_value=1.,\n conditioned_value=0.5,\n where=gamma == 0),\n ),\n closest_observation=dict(\n get_virtual_index=lambda n, quantiles: _closest_observation(n,\n quantiles),\n fix_gamma=lambda gamma, _: gamma, # should never be called\n ),\n # Continuous methods\n interpolated_inverted_cdf=dict(\n get_virtual_index=lambda n, quantiles:\n _compute_virtual_index(n, quantiles, 0, 1),\n fix_gamma=lambda gamma, _: gamma,\n ),\n hazen=dict(\n get_virtual_index=lambda n, quantiles:\n _compute_virtual_index(n, quantiles, 0.5, 0.5),\n fix_gamma=lambda gamma, _: gamma,\n ),\n weibull=dict(\n get_virtual_index=lambda n, quantiles:\n _compute_virtual_index(n, quantiles, 0, 0),\n fix_gamma=lambda gamma, _: gamma,\n ),\n # Default method.\n # To avoid some rounding issues, `(n-1) * quantiles` is preferred to\n # `_compute_virtual_index(n, quantiles, 1, 1)`.\n # They are mathematically equivalent.\n linear=dict(\n get_virtual_index=lambda n, quantiles: (n - 1) * quantiles,\n fix_gamma=lambda gamma, _: gamma,\n ),\n median_unbiased=dict(\n get_virtual_index=lambda n, quantiles:\n _compute_virtual_index(n, quantiles, 1 / 3.0, 1 / 3.0),\n fix_gamma=lambda gamma, _: gamma,\n ),\n normal_unbiased=dict(\n get_virtual_index=lambda n, quantiles:\n _compute_virtual_index(n, quantiles, 3 / 8.0, 3 / 8.0),\n fix_gamma=lambda gamma, _: gamma,\n ),\n # --- OTHER METHODS\n lower=dict(\n get_virtual_index=lambda n, quantiles: np.floor(\n (n - 1) * quantiles).astype(np.intp),\n fix_gamma=lambda gamma, _: gamma,\n # should never be called, index dtype is int\n ),\n higher=dict(\n get_virtual_index=lambda n, quantiles: np.ceil(\n (n - 1) * quantiles).astype(np.intp),\n fix_gamma=lambda gamma, _: gamma,\n # should never be called, index dtype is int\n ),\n midpoint=dict(\n get_virtual_index=lambda n, quantiles: 0.5 * (\n np.floor((n - 1) * quantiles)\n + np.ceil((n - 1) * quantiles)),\n fix_gamma=lambda gamma, index: _get_gamma_mask(\n shape=gamma.shape,\n default_value=0.5,\n conditioned_value=0.,\n where=index % 1 == 0),\n ),\n nearest=dict(\n get_virtual_index=lambda n, quantiles: np.around(\n (n - 1) * quantiles).astype(np.intp),\n fix_gamma=lambda gamma, _: gamma,\n # should never be called, index dtype is int\n ))\n\n\ndef _rot90_dispatcher(m, k=None, axes=None):\n return (m,)\n\n\n@array_function_dispatch(_rot90_dispatcher)\ndef rot90(m, k=1, axes=(0, 1)):\n \"\"\"\n Rotate an array by 90 degrees in the plane specified by axes.\n\n Rotation direction is from the first towards the second axis.\n\n Parameters\n ----------\n m : array_like\n Array of two or more dimensions.\n k : integer\n Number of times the array is rotated by 90 degrees.\n axes: (2,) array_like\n The array is rotated in the plane defined by the axes.\n Axes must be different.\n\n .. versionadded:: 1.12.0\n\n Returns\n -------\n y : ndarray\n A rotated view of `m`.\n\n See Also\n --------\n flip : Reverse the order of elements in an array along the given axis.\n fliplr : Flip an array horizontally.\n flipud : Flip an array vertically.\n\n Notes\n -----\n ``rot90(m, k=1, axes=(1,0))`` is the reverse of\n ``rot90(m, k=1, axes=(0,1))``\n\n ``rot90(m, k=1, axes=(1,0))`` is equivalent to\n ``rot90(m, k=-1, axes=(0,1))``\n\n Examples\n --------\n >>> m = np.array([[1,2],[3,4]], int)\n >>> m\n array([[1, 2],\n [3, 4]])\n >>> np.rot90(m)\n array([[2, 4],\n [1, 3]])\n >>> np.rot90(m, 2)\n array([[4, 3],\n [2, 1]])\n >>> m = np.arange(8).reshape((2,2,2))\n >>> np.rot90(m, 1, (1,2))\n array([[[1, 3],\n [0, 2]],\n [[5, 7],\n [4, 6]]])\n\n \"\"\"\n axes = tuple(axes)\n if len(axes) != 2:\n raise ValueError(\"len(axes) must be 2.\")\n\n m = asanyarray(m)\n\n if axes[0] == axes[1] or absolute(axes[0] - axes[1]) == m.ndim:\n raise ValueError(\"Axes must be different.\")\n\n if (axes[0] >= m.ndim or axes[0] < -m.ndim\n or axes[1] >= m.ndim or axes[1] < -m.ndim):\n raise ValueError(\"Axes={} out of range for array of ndim={}.\"\n .format(axes, m.ndim))\n\n k %= 4\n\n if k == 0:\n return m[:]\n if k == 2:\n return flip(flip(m, axes[0]), axes[1])\n\n axes_list = arange(0, m.ndim)\n (axes_list[axes[0]], axes_list[axes[1]]) = (axes_list[axes[1]],\n axes_list[axes[0]])\n\n if k == 1:\n return transpose(flip(m, axes[1]), axes_list)\n else:\n # k == 3\n return flip(transpose(m, axes_list), axes[1])\n\n\ndef _flip_dispatcher(m, axis=None):\n return (m,)\n\n\n@array_function_dispatch(_flip_dispatcher)\ndef flip(m, axis=None):\n \"\"\"\n Reverse the order of elements in an array along the given axis.\n\n The shape of the array is preserved, but the elements are reordered.\n\n .. versionadded:: 1.12.0\n\n Parameters\n ----------\n m : array_like\n Input array.\n axis : None or int or tuple of ints, optional\n Axis or axes along which to flip over. The default,\n axis=None, will flip over all of the axes of the input array.\n If axis is negative it counts from the last to the first axis.\n\n If axis is a tuple of ints, flipping is performed on all of the axes\n specified in the tuple.\n\n .. versionchanged:: 1.15.0\n None and tuples of axes are supported\n\n Returns\n -------\n out : array_like\n A view of `m` with the entries of axis reversed. Since a view is\n returned, this operation is done in constant time.\n\n See Also\n --------\n flipud : Flip an array vertically (axis=0).\n fliplr : Flip an array horizontally (axis=1).\n\n Notes\n -----\n flip(m, 0) is equivalent to flipud(m).\n\n flip(m, 1) is equivalent to fliplr(m).\n\n flip(m, n) corresponds to ``m[...,::-1,...]`` with ``::-1`` at position n.\n\n flip(m) corresponds to ``m[::-1,::-1,...,::-1]`` with ``::-1`` at all\n positions.\n\n flip(m, (0, 1)) corresponds to ``m[::-1,::-1,...]`` with ``::-1`` at\n position 0 and position 1.\n\n Examples\n --------\n >>> A = np.arange(8).reshape((2,2,2))\n >>> A\n array([[[0, 1],\n [2, 3]],\n [[4, 5],\n [6, 7]]])\n >>> np.flip(A, 0)\n array([[[4, 5],\n [6, 7]],\n [[0, 1],\n [2, 3]]])\n >>> np.flip(A, 1)\n array([[[2, 3],\n [0, 1]],\n [[6, 7],\n [4, 5]]])\n >>> np.flip(A)\n array([[[7, 6],\n [5, 4]],\n [[3, 2],\n [1, 0]]])\n >>> np.flip(A, (0, 2))\n array([[[5, 4],\n [7, 6]],\n [[1, 0],\n [3, 2]]])\n >>> A = np.random.randn(3,4,5)\n >>> np.all(np.flip(A,2) == A[:,:,::-1,...])\n True\n \"\"\"\n if not hasattr(m, 'ndim'):\n m = asarray(m)\n if axis is None:\n indexer = (np.s_[::-1],) * m.ndim\n else:\n axis = _nx.normalize_axis_tuple(axis, m.ndim)\n indexer = [np.s_[:]] * m.ndim\n for ax in axis:\n indexer[ax] = np.s_[::-1]\n indexer = tuple(indexer)\n return m[indexer]\n\n\n@set_module('numpy')\ndef iterable(y):\n \"\"\"\n Check whether or not an object can be iterated over.\n\n Parameters\n ----------\n y : object\n Input object.\n\n Returns\n -------\n b : bool\n Return ``True`` if the object has an iterator method or is a\n sequence and ``False`` otherwise.\n\n\n Examples\n --------\n >>> np.iterable([1, 2, 3])\n True\n >>> np.iterable(2)\n False\n\n Notes\n -----\n In most cases, the results of ``np.iterable(obj)`` are consistent with\n ``isinstance(obj, collections.abc.Iterable)``. One notable exception is\n the treatment of 0-dimensional arrays::\n\n >>> from collections.abc import Iterable\n >>> a = np.array(1.0) # 0-dimensional numpy array\n >>> isinstance(a, Iterable)\n True\n >>> np.iterable(a)\n False\n\n \"\"\"\n try:\n iter(y)\n except TypeError:\n return False\n return True\n\n\ndef _average_dispatcher(a, axis=None, weights=None, returned=None):\n return (a, weights)\n\n\n@array_function_dispatch(_average_dispatcher)\ndef average(a, axis=None, weights=None, returned=False):\n \"\"\"\n Compute the weighted average along the specified axis.\n\n Parameters\n ----------\n a : array_like\n Array containing data to be averaged. If `a` is not an array, a\n conversion is attempted.\n axis : None or int or tuple of ints, optional\n Axis or axes along which to average `a`. The default,\n axis=None, will average over all of the elements of the input array.\n If axis is negative it counts from the last to the first axis.\n\n .. versionadded:: 1.7.0\n\n If axis is a tuple of ints, averaging is performed on all of the axes\n specified in the tuple instead of a single axis or all the axes as\n before.\n weights : array_like, optional\n An array of weights associated with the values in `a`. Each value in\n `a` contributes to the average according to its associated weight.\n The weights array can either be 1-D (in which case its length must be\n the size of `a` along the given axis) or of the same shape as `a`.\n If `weights=None`, then all data in `a` are assumed to have a\n weight equal to one. The 1-D calculation is::\n\n avg = sum(a * weights) / sum(weights)\n\n The only constraint on `weights` is that `sum(weights)` must not be 0.\n returned : bool, optional\n Default is `False`. If `True`, the tuple (`average`, `sum_of_weights`)\n is returned, otherwise only the average is returned.\n If `weights=None`, `sum_of_weights` is equivalent to the number of\n elements over which the average is taken.\n\n Returns\n -------\n retval, [sum_of_weights] : array_type or double\n Return the average along the specified axis. When `returned` is `True`,\n return a tuple with the average as the first element and the sum\n of the weights as the second element. `sum_of_weights` is of the\n same type as `retval`. The result dtype follows a genereal pattern.\n If `weights` is None, the result dtype will be that of `a` , or ``float64``\n if `a` is integral. Otherwise, if `weights` is not None and `a` is non-\n integral, the result type will be the type of lowest precision capable of\n representing values of both `a` and `weights`. If `a` happens to be\n integral, the previous rules still applies but the result dtype will\n at least be ``float64``.\n\n Raises\n ------\n ZeroDivisionError\n When all weights along axis are zero. See `numpy.ma.average` for a\n version robust to this type of error.\n TypeError\n When the length of 1D `weights` is not the same as the shape of `a`\n along axis.\n\n See Also\n --------\n mean\n\n ma.average : average for masked arrays -- useful if your data contains\n \"missing\" values\n numpy.result_type : Returns the type that results from applying the\n numpy type promotion rules to the arguments.\n\n Examples\n --------\n >>> data = np.arange(1, 5)\n >>> data\n array([1, 2, 3, 4])\n >>> np.average(data)\n 2.5\n >>> np.average(np.arange(1, 11), weights=np.arange(10, 0, -1))\n 4.0\n\n >>> data = np.arange(6).reshape((3,2))\n >>> data\n array([[0, 1],\n [2, 3],\n [4, 5]])\n >>> np.average(data, axis=1, weights=[1./4, 3./4])\n array([0.75, 2.75, 4.75])\n >>> np.average(data, weights=[1./4, 3./4])\n Traceback (most recent call last):\n ...\n TypeError: Axis must be specified when shapes of a and weights differ.\n\n >>> a = np.ones(5, dtype=np.float128)\n >>> w = np.ones(5, dtype=np.complex64)\n >>> avg = np.average(a, weights=w)\n >>> print(avg.dtype)\n complex256\n \"\"\"\n a = np.asanyarray(a)\n\n if weights is None:\n avg = a.mean(axis)\n scl = avg.dtype.type(a.size/avg.size)\n else:\n wgt = np.asanyarray(weights)\n\n if issubclass(a.dtype.type, (np.integer, np.bool_)):\n result_dtype = np.result_type(a.dtype, wgt.dtype, 'f8')\n else:\n result_dtype = np.result_type(a.dtype, wgt.dtype)\n\n # Sanity checks\n if a.shape != wgt.shape:\n if axis is None:\n raise TypeError(\n \"Axis must be specified when shapes of a and weights \"\n \"differ.\")\n if wgt.ndim != 1:\n raise TypeError(\n \"1D weights expected when shapes of a and weights differ.\")\n if wgt.shape[0] != a.shape[axis]:\n raise ValueError(\n \"Length of weights not compatible with specified axis.\")\n\n # setup wgt to broadcast along axis\n wgt = np.broadcast_to(wgt, (a.ndim-1)*(1,) + wgt.shape)\n wgt = wgt.swapaxes(-1, axis)\n\n scl = wgt.sum(axis=axis, dtype=result_dtype)\n if np.any(scl == 0.0):\n raise ZeroDivisionError(\n \"Weights sum to zero, can't be normalized\")\n\n avg = np.multiply(a, wgt, dtype=result_dtype).sum(axis)/scl\n\n if returned:\n if scl.shape != avg.shape:\n scl = np.broadcast_to(scl, avg.shape).copy()\n return avg, scl\n else:\n return avg\n\n\n@set_module('numpy')\ndef asarray_chkfinite(a, dtype=None, order=None):\n \"\"\"Convert the input to an array, checking for NaNs or Infs.\n\n Parameters\n ----------\n a : array_like\n Input data, in any form that can be converted to an array. This\n includes lists, lists of tuples, tuples, tuples of tuples, tuples\n of lists and ndarrays. Success requires no NaNs or Infs.\n dtype : data-type, optional\n By default, the data-type is inferred from the input data.\n order : {'C', 'F', 'A', 'K'}, optional\n Memory layout. 'A' and 'K' depend on the order of input array a.\n 'C' row-major (C-style),\n 'F' column-major (Fortran-style) memory representation.\n 'A' (any) means 'F' if `a` is Fortran contiguous, 'C' otherwise\n 'K' (keep) preserve input order\n Defaults to 'C'.\n\n Returns\n -------\n out : ndarray\n Array interpretation of `a`. No copy is performed if the input\n is already an ndarray. If `a` is a subclass of ndarray, a base\n class ndarray is returned.\n\n Raises\n ------\n ValueError\n Raises ValueError if `a` contains NaN (Not a Number) or Inf (Infinity).\n\n See Also\n --------\n asarray : Create and array.\n asanyarray : Similar function which passes through subclasses.\n ascontiguousarray : Convert input to a contiguous array.\n asfarray : Convert input to a floating point ndarray.\n asfortranarray : Convert input to an ndarray with column-major\n memory order.\n fromiter : Create an array from an iterator.\n fromfunction : Construct an array by executing a function on grid\n positions.\n\n Examples\n --------\n Convert a list into an array. If all elements are finite\n ``asarray_chkfinite`` is identical to ``asarray``.\n\n >>> a = [1, 2]\n >>> np.asarray_chkfinite(a, dtype=float)\n array([1., 2.])\n\n Raises ValueError if array_like contains Nans or Infs.\n\n >>> a = [1, 2, np.inf]\n >>> try:\n ... np.asarray_chkfinite(a)\n ... except ValueError:\n ... print('ValueError')\n ...\n ValueError\n\n \"\"\"\n a = asarray(a, dtype=dtype, order=order)\n if a.dtype.char in typecodes['AllFloat'] and not np.isfinite(a).all():\n raise ValueError(\n \"array must not contain infs or NaNs\")\n return a\n\n\ndef _piecewise_dispatcher(x, condlist, funclist, *args, **kw):\n yield x\n # support the undocumented behavior of allowing scalars\n if np.iterable(condlist):\n yield from condlist\n\n\n@array_function_dispatch(_piecewise_dispatcher)\ndef piecewise(x, condlist, funclist, *args, **kw):\n \"\"\"\n Evaluate a piecewise-defined function.\n\n Given a set of conditions and corresponding functions, evaluate each\n function on the input data wherever its condition is true.\n\n Parameters\n ----------\n x : ndarray or scalar\n The input domain.\n condlist : list of bool arrays or bool scalars\n Each boolean array corresponds to a function in `funclist`. Wherever\n `condlist[i]` is True, `funclist[i](x)` is used as the output value.\n\n Each boolean array in `condlist` selects a piece of `x`,\n and should therefore be of the same shape as `x`.\n\n The length of `condlist` must correspond to that of `funclist`.\n If one extra function is given, i.e. if\n ``len(funclist) == len(condlist) + 1``, then that extra function\n is the default value, used wherever all conditions are false.\n funclist : list of callables, f(x,*args,**kw), or scalars\n Each function is evaluated over `x` wherever its corresponding\n condition is True. It should take a 1d array as input and give an 1d\n array or a scalar value as output. If, instead of a callable,\n a scalar is provided then a constant function (``lambda x: scalar``) is\n assumed.\n args : tuple, optional\n Any further arguments given to `piecewise` are passed to the functions\n upon execution, i.e., if called ``piecewise(..., ..., 1, 'a')``, then\n each function is called as ``f(x, 1, 'a')``.\n kw : dict, optional\n Keyword arguments used in calling `piecewise` are passed to the\n functions upon execution, i.e., if called\n ``piecewise(..., ..., alpha=1)``, then each function is called as\n ``f(x, alpha=1)``.\n\n Returns\n -------\n out : ndarray\n The output is the same shape and type as x and is found by\n calling the functions in `funclist` on the appropriate portions of `x`,\n as defined by the boolean arrays in `condlist`. Portions not covered\n by any condition have a default value of 0.\n\n\n See Also\n --------\n choose, select, where\n\n Notes\n -----\n This is similar to choose or select, except that functions are\n evaluated on elements of `x` that satisfy the corresponding condition from\n `condlist`.\n\n The result is::\n\n |--\n |funclist[0](x[condlist[0]])\n out = |funclist[1](x[condlist[1]])\n |...\n |funclist[n2](x[condlist[n2]])\n |--\n\n Examples\n --------\n Define the sigma function, which is -1 for ``x < 0`` and +1 for ``x >= 0``.\n\n >>> x = np.linspace(-2.5, 2.5, 6)\n >>> np.piecewise(x, [x < 0, x >= 0], [-1, 1])\n array([-1., -1., -1., 1., 1., 1.])\n\n Define the absolute value, which is ``-x`` for ``x <0`` and ``x`` for\n ``x >= 0``.\n\n >>> np.piecewise(x, [x < 0, x >= 0], [lambda x: -x, lambda x: x])\n array([2.5, 1.5, 0.5, 0.5, 1.5, 2.5])\n\n Apply the same function to a scalar value.\n\n >>> y = -2\n >>> np.piecewise(y, [y < 0, y >= 0], [lambda x: -x, lambda x: x])\n array(2)\n\n \"\"\"\n x = asanyarray(x)\n n2 = len(funclist)\n\n # undocumented: single condition is promoted to a list of one condition\n if isscalar(condlist) or (\n not isinstance(condlist[0], (list, ndarray)) and x.ndim != 0):\n condlist = [condlist]\n\n condlist = asarray(condlist, dtype=bool)\n n = len(condlist)\n\n if n == n2 - 1: # compute the \"otherwise\" condition.\n condelse = ~np.any(condlist, axis=0, keepdims=True)\n condlist = np.concatenate([condlist, condelse], axis=0)\n n += 1\n elif n != n2:\n raise ValueError(\n \"with {} condition(s), either {} or {} functions are expected\"\n .format(n, n, n+1)\n )\n\n y = zeros_like(x)\n for cond, func in zip(condlist, funclist):\n if not isinstance(func, collections.abc.Callable):\n y[cond] = func\n else:\n vals = x[cond]\n if vals.size > 0:\n y[cond] = func(vals, *args, **kw)\n\n return y\n\n\ndef _select_dispatcher(condlist, choicelist, default=None):\n yield from condlist\n yield from choicelist\n\n\n@array_function_dispatch(_select_dispatcher)\ndef select(condlist, choicelist, default=0):\n \"\"\"\n Return an array drawn from elements in choicelist, depending on conditions.\n\n Parameters\n ----------\n condlist : list of bool ndarrays\n The list of conditions which determine from which array in `choicelist`\n the output elements are taken. When multiple conditions are satisfied,\n the first one encountered in `condlist` is used.\n choicelist : list of ndarrays\n The list of arrays from which the output elements are taken. It has\n to be of the same length as `condlist`.\n default : scalar, optional\n The element inserted in `output` when all conditions evaluate to False.\n\n Returns\n -------\n output : ndarray\n The output at position m is the m-th element of the array in\n `choicelist` where the m-th element of the corresponding array in\n `condlist` is True.\n\n See Also\n --------\n where : Return elements from one of two arrays depending on condition.\n take, choose, compress, diag, diagonal\n\n Examples\n --------\n >>> x = np.arange(6)\n >>> condlist = [x<3, x>3]\n >>> choicelist = [x, x**2]\n >>> np.select(condlist, choicelist, 42)\n array([ 0, 1, 2, 42, 16, 25])\n\n >>> condlist = [x<=4, x>3]\n >>> choicelist = [x, x**2]\n >>> np.select(condlist, choicelist, 55)\n array([ 0, 1, 2, 3, 4, 25])\n\n \"\"\"\n # Check the size of condlist and choicelist are the same, or abort.\n if len(condlist) != len(choicelist):\n raise ValueError(\n 'list of cases must be same length as list of conditions')\n\n # Now that the dtype is known, handle the deprecated select([], []) case\n if len(condlist) == 0:\n raise ValueError(\"select with an empty condition list is not possible\")\n\n choicelist = [np.asarray(choice) for choice in choicelist]\n\n try:\n intermediate_dtype = np.result_type(*choicelist)\n except TypeError as e:\n msg = f'Choicelist elements do not have a common dtype: {e}'\n raise TypeError(msg) from None\n default_array = np.asarray(default)\n choicelist.append(default_array)\n\n # need to get the result type before broadcasting for correct scalar\n # behaviour\n try:\n dtype = np.result_type(intermediate_dtype, default_array)\n except TypeError as e:\n msg = f'Choicelists and default value do not have a common dtype: {e}'\n raise TypeError(msg) from None\n\n # Convert conditions to arrays and broadcast conditions and choices\n # as the shape is needed for the result. Doing it separately optimizes\n # for example when all choices are scalars.\n condlist = np.broadcast_arrays(*condlist)\n choicelist = np.broadcast_arrays(*choicelist)\n\n # If cond array is not an ndarray in boolean format or scalar bool, abort.\n for i, cond in enumerate(condlist):\n if cond.dtype.type is not np.bool_:\n raise TypeError(\n 'invalid entry {} in condlist: should be boolean ndarray'.format(i))\n\n if choicelist[0].ndim == 0:\n # This may be common, so avoid the call.\n result_shape = condlist[0].shape\n else:\n result_shape = np.broadcast_arrays(condlist[0], choicelist[0])[0].shape\n\n result = np.full(result_shape, choicelist[-1], dtype)\n\n # Use np.copyto to burn each choicelist array onto result, using the\n # corresponding condlist as a boolean mask. This is done in reverse\n # order since the first choice should take precedence.\n choicelist = choicelist[-2::-1]\n condlist = condlist[::-1]\n for choice, cond in zip(choicelist, condlist):\n np.copyto(result, choice, where=cond)\n\n return result\n\n\ndef _copy_dispatcher(a, order=None, subok=None):\n return (a,)\n\n\n@array_function_dispatch(_copy_dispatcher)\ndef copy(a, order='K', subok=False):\n \"\"\"\n Return an array copy of the given object.\n\n Parameters\n ----------\n a : array_like\n Input data.\n order : {'C', 'F', 'A', 'K'}, optional\n Controls the memory layout of the copy. 'C' means C-order,\n 'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous,\n 'C' otherwise. 'K' means match the layout of `a` as closely\n as possible. (Note that this function and :meth:`ndarray.copy` are very\n similar, but have different default values for their order=\n arguments.)\n subok : bool, optional\n If True, then sub-classes will be passed-through, otherwise the\n returned array will be forced to be a base-class array (defaults to False).\n\n .. versionadded:: 1.19.0\n\n Returns\n -------\n arr : ndarray\n Array interpretation of `a`.\n\n See Also\n --------\n ndarray.copy : Preferred method for creating an array copy\n\n Notes\n -----\n This is equivalent to:\n\n >>> np.array(a, copy=True) #doctest: +SKIP\n\n Examples\n --------\n Create an array x, with a reference y and a copy z:\n\n >>> x = np.array([1, 2, 3])\n >>> y = x\n >>> z = np.copy(x)\n\n Note that, when we modify x, y changes, but not z:\n\n >>> x[0] = 10\n >>> x[0] == y[0]\n True\n >>> x[0] == z[0]\n False\n\n Note that, np.copy clears previously set WRITEABLE=False flag.\n\n >>> a = np.array([1, 2, 3])\n >>> a.flags[\"WRITEABLE\"] = False\n >>> b = np.copy(a)\n >>> b.flags[\"WRITEABLE\"]\n True\n >>> b[0] = 3\n >>> b\n array([3, 2, 3])\n \n Note that np.copy is a shallow copy and will not copy object\n elements within arrays. This is mainly important for arrays\n containing Python objects. The new array will contain the\n same object which may lead to surprises if that object can\n be modified (is mutable):\n\n >>> a = np.array([1, 'm', [2, 3, 4]], dtype=object)\n >>> b = np.copy(a)\n >>> b[2][0] = 10\n >>> a\n array([1, 'm', list([10, 3, 4])], dtype=object)\n\n To ensure all elements within an ``object`` array are copied,\n use `copy.deepcopy`:\n\n >>> import copy\n >>> a = np.array([1, 'm', [2, 3, 4]], dtype=object)\n >>> c = copy.deepcopy(a)\n >>> c[2][0] = 10\n >>> c\n array([1, 'm', list([10, 3, 4])], dtype=object)\n >>> a\n array([1, 'm', list([2, 3, 4])], dtype=object)\n\n \"\"\"\n return array(a, order=order, subok=subok, copy=True)\n\n# Basic operations\n\n\ndef _gradient_dispatcher(f, *varargs, axis=None, edge_order=None):\n yield f\n yield from varargs\n\n\n@array_function_dispatch(_gradient_dispatcher)\ndef gradient(f, *varargs, axis=None, edge_order=1):\n \"\"\"\n Return the gradient of an N-dimensional array.\n\n The gradient is computed using second order accurate central differences\n in the interior points and either first or second order accurate one-sides\n (forward or backwards) differences at the boundaries.\n The returned gradient hence has the same shape as the input array.\n\n Parameters\n ----------\n f : array_like\n An N-dimensional array containing samples of a scalar function.\n varargs : list of scalar or array, optional\n Spacing between f values. Default unitary spacing for all dimensions.\n Spacing can be specified using:\n\n 1. single scalar to specify a sample distance for all dimensions.\n 2. N scalars to specify a constant sample distance for each dimension.\n i.e. `dx`, `dy`, `dz`, ...\n 3. N arrays to specify the coordinates of the values along each\n dimension of F. The length of the array must match the size of\n the corresponding dimension\n 4. Any combination of N scalars/arrays with the meaning of 2. and 3.\n\n If `axis` is given, the number of varargs must equal the number of axes.\n Default: 1.\n\n edge_order : {1, 2}, optional\n Gradient is calculated using N-th order accurate differences\n at the boundaries. Default: 1.\n\n .. versionadded:: 1.9.1\n\n axis : None or int or tuple of ints, optional\n Gradient is calculated only along the given axis or axes\n The default (axis = None) is to calculate the gradient for all the axes\n of the input array. axis may be negative, in which case it counts from\n the last to the first axis.\n\n .. versionadded:: 1.11.0\n\n Returns\n -------\n gradient : ndarray or list of ndarray\n A list of ndarrays (or a single ndarray if there is only one dimension)\n corresponding to the derivatives of f with respect to each dimension.\n Each derivative has the same shape as f.\n\n Examples\n --------\n >>> f = np.array([1, 2, 4, 7, 11, 16], dtype=float)\n >>> np.gradient(f)\n array([1. , 1.5, 2.5, 3.5, 4.5, 5. ])\n >>> np.gradient(f, 2)\n array([0.5 , 0.75, 1.25, 1.75, 2.25, 2.5 ])\n\n Spacing can be also specified with an array that represents the coordinates\n of the values F along the dimensions.\n For instance a uniform spacing:\n\n >>> x = np.arange(f.size)\n >>> np.gradient(f, x)\n array([1. , 1.5, 2.5, 3.5, 4.5, 5. ])\n\n Or a non uniform one:\n\n >>> x = np.array([0., 1., 1.5, 3.5, 4., 6.], dtype=float)\n >>> np.gradient(f, x)\n array([1. , 3. , 3.5, 6.7, 6.9, 2.5])\n\n For two dimensional arrays, the return will be two arrays ordered by\n axis. In this example the first array stands for the gradient in\n rows and the second one in columns direction:\n\n >>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=float))\n [array([[ 2., 2., -1.],\n [ 2., 2., -1.]]), array([[1. , 2.5, 4. ],\n [1. , 1. , 1. ]])]\n\n In this example the spacing is also specified:\n uniform for axis=0 and non uniform for axis=1\n\n >>> dx = 2.\n >>> y = [1., 1.5, 3.5]\n >>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=float), dx, y)\n [array([[ 1. , 1. , -0.5],\n [ 1. , 1. , -0.5]]), array([[2. , 2. , 2. ],\n [2. , 1.7, 0.5]])]\n\n It is possible to specify how boundaries are treated using `edge_order`\n\n >>> x = np.array([0, 1, 2, 3, 4])\n >>> f = x**2\n >>> np.gradient(f, edge_order=1)\n array([1., 2., 4., 6., 7.])\n >>> np.gradient(f, edge_order=2)\n array([0., 2., 4., 6., 8.])\n\n The `axis` keyword can be used to specify a subset of axes of which the\n gradient is calculated\n\n >>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=float), axis=0)\n array([[ 2., 2., -1.],\n [ 2., 2., -1.]])\n\n Notes\n -----\n Assuming that :math:`f\\\\in C^{3}` (i.e., :math:`f` has at least 3 continuous\n derivatives) and let :math:`h_{*}` be a non-homogeneous stepsize, we\n minimize the \"consistency error\" :math:`\\\\eta_{i}` between the true gradient\n and its estimate from a linear combination of the neighboring grid-points:\n\n .. math::\n\n \\\\eta_{i} = f_{i}^{\\\\left(1\\\\right)} -\n \\\\left[ \\\\alpha f\\\\left(x_{i}\\\\right) +\n \\\\beta f\\\\left(x_{i} + h_{d}\\\\right) +\n \\\\gamma f\\\\left(x_{i}-h_{s}\\\\right)\n \\\\right]\n\n By substituting :math:`f(x_{i} + h_{d})` and :math:`f(x_{i} - h_{s})`\n with their Taylor series expansion, this translates into solving\n the following the linear system:\n\n .. math::\n\n \\\\left\\\\{\n \\\\begin{array}{r}\n \\\\alpha+\\\\beta+\\\\gamma=0 \\\\\\\\\n \\\\beta h_{d}-\\\\gamma h_{s}=1 \\\\\\\\\n \\\\beta h_{d}^{2}+\\\\gamma h_{s}^{2}=0\n \\\\end{array}\n \\\\right.\n\n The resulting approximation of :math:`f_{i}^{(1)}` is the following:\n\n .. math::\n\n \\\\hat f_{i}^{(1)} =\n \\\\frac{\n h_{s}^{2}f\\\\left(x_{i} + h_{d}\\\\right)\n + \\\\left(h_{d}^{2} - h_{s}^{2}\\\\right)f\\\\left(x_{i}\\\\right)\n - h_{d}^{2}f\\\\left(x_{i}-h_{s}\\\\right)}\n { h_{s}h_{d}\\\\left(h_{d} + h_{s}\\\\right)}\n + \\\\mathcal{O}\\\\left(\\\\frac{h_{d}h_{s}^{2}\n + h_{s}h_{d}^{2}}{h_{d}\n + h_{s}}\\\\right)\n\n It is worth noting that if :math:`h_{s}=h_{d}`\n (i.e., data are evenly spaced)\n we find the standard second order approximation:\n\n .. math::\n\n \\\\hat f_{i}^{(1)}=\n \\\\frac{f\\\\left(x_{i+1}\\\\right) - f\\\\left(x_{i-1}\\\\right)}{2h}\n + \\\\mathcal{O}\\\\left(h^{2}\\\\right)\n\n With a similar procedure the forward/backward approximations used for\n boundaries can be derived.\n\n References\n ----------\n .. [1] Quarteroni A., Sacco R., Saleri F. (2007) Numerical Mathematics\n (Texts in Applied Mathematics). New York: Springer.\n .. [2] Durran D. R. (1999) Numerical Methods for Wave Equations\n in Geophysical Fluid Dynamics. New York: Springer.\n .. [3] Fornberg B. (1988) Generation of Finite Difference Formulas on\n Arbitrarily Spaced Grids,\n Mathematics of Computation 51, no. 184 : 699-706.\n `PDF <http://www.ams.org/journals/mcom/1988-51-184/\n S0025-5718-1988-0935077-0/S0025-5718-1988-0935077-0.pdf>`_.\n \"\"\"\n f = np.asanyarray(f)\n N = f.ndim # number of dimensions\n\n if axis is None:\n axes = tuple(range(N))\n else:\n axes = _nx.normalize_axis_tuple(axis, N)\n\n len_axes = len(axes)\n n = len(varargs)\n if n == 0:\n # no spacing argument - use 1 in all axes\n dx = [1.0] * len_axes\n elif n == 1 and np.ndim(varargs[0]) == 0:\n # single scalar for all axes\n dx = varargs * len_axes\n elif n == len_axes:\n # scalar or 1d array for each axis\n dx = list(varargs)\n for i, distances in enumerate(dx):\n distances = np.asanyarray(distances)\n if distances.ndim == 0:\n continue\n elif distances.ndim != 1:\n raise ValueError(\"distances must be either scalars or 1d\")\n if len(distances) != f.shape[axes[i]]:\n raise ValueError(\"when 1d, distances must match \"\n \"the length of the corresponding dimension\")\n if np.issubdtype(distances.dtype, np.integer):\n # Convert numpy integer types to float64 to avoid modular\n # arithmetic in np.diff(distances).\n distances = distances.astype(np.float64)\n diffx = np.diff(distances)\n # if distances are constant reduce to the scalar case\n # since it brings a consistent speedup\n if (diffx == diffx[0]).all():\n diffx = diffx[0]\n dx[i] = diffx\n else:\n raise TypeError(\"invalid number of arguments\")\n\n if edge_order > 2:\n raise ValueError(\"'edge_order' greater than 2 not supported\")\n\n # use central differences on interior and one-sided differences on the\n # endpoints. This preserves second order-accuracy over the full domain.\n\n outvals = []\n\n # create slice objects --- initially all are [:, :, ..., :]\n slice1 = [slice(None)]*N\n slice2 = [slice(None)]*N\n slice3 = [slice(None)]*N\n slice4 = [slice(None)]*N\n\n otype = f.dtype\n if otype.type is np.datetime64:\n # the timedelta dtype with the same unit information\n otype = np.dtype(otype.name.replace('datetime', 'timedelta'))\n # view as timedelta to allow addition\n f = f.view(otype)\n elif otype.type is np.timedelta64:\n pass\n elif np.issubdtype(otype, np.inexact):\n pass\n else:\n # All other types convert to floating point.\n # First check if f is a numpy integer type; if so, convert f to float64\n # to avoid modular arithmetic when computing the changes in f.\n if np.issubdtype(otype, np.integer):\n f = f.astype(np.float64)\n otype = np.float64\n\n for axis, ax_dx in zip(axes, dx):\n if f.shape[axis] < edge_order + 1:\n raise ValueError(\n \"Shape of array too small to calculate a numerical gradient, \"\n \"at least (edge_order + 1) elements are required.\")\n # result allocation\n out = np.empty_like(f, dtype=otype)\n\n # spacing for the current axis\n uniform_spacing = np.ndim(ax_dx) == 0\n\n # Numerical differentiation: 2nd order interior\n slice1[axis] = slice(1, -1)\n slice2[axis] = slice(None, -2)\n slice3[axis] = slice(1, -1)\n slice4[axis] = slice(2, None)\n\n if uniform_spacing:\n out[tuple(slice1)] = (f[tuple(slice4)] - f[tuple(slice2)]) / (2. * ax_dx)\n else:\n dx1 = ax_dx[0:-1]\n dx2 = ax_dx[1:]\n a = -(dx2)/(dx1 * (dx1 + dx2))\n b = (dx2 - dx1) / (dx1 * dx2)\n c = dx1 / (dx2 * (dx1 + dx2))\n # fix the shape for broadcasting\n shape = np.ones(N, dtype=int)\n shape[axis] = -1\n a.shape = b.shape = c.shape = shape\n # 1D equivalent -- out[1:-1] = a * f[:-2] + b * f[1:-1] + c * f[2:]\n out[tuple(slice1)] = a * f[tuple(slice2)] + b * f[tuple(slice3)] + c * f[tuple(slice4)]\n\n # Numerical differentiation: 1st order edges\n if edge_order == 1:\n slice1[axis] = 0\n slice2[axis] = 1\n slice3[axis] = 0\n dx_0 = ax_dx if uniform_spacing else ax_dx[0]\n # 1D equivalent -- out[0] = (f[1] - f[0]) / (x[1] - x[0])\n out[tuple(slice1)] = (f[tuple(slice2)] - f[tuple(slice3)]) / dx_0\n\n slice1[axis] = -1\n slice2[axis] = -1\n slice3[axis] = -2\n dx_n = ax_dx if uniform_spacing else ax_dx[-1]\n # 1D equivalent -- out[-1] = (f[-1] - f[-2]) / (x[-1] - x[-2])\n out[tuple(slice1)] = (f[tuple(slice2)] - f[tuple(slice3)]) / dx_n\n\n # Numerical differentiation: 2nd order edges\n else:\n slice1[axis] = 0\n slice2[axis] = 0\n slice3[axis] = 1\n slice4[axis] = 2\n if uniform_spacing:\n a = -1.5 / ax_dx\n b = 2. / ax_dx\n c = -0.5 / ax_dx\n else:\n dx1 = ax_dx[0]\n dx2 = ax_dx[1]\n a = -(2. * dx1 + dx2)/(dx1 * (dx1 + dx2))\n b = (dx1 + dx2) / (dx1 * dx2)\n c = - dx1 / (dx2 * (dx1 + dx2))\n # 1D equivalent -- out[0] = a * f[0] + b * f[1] + c * f[2]\n out[tuple(slice1)] = a * f[tuple(slice2)] + b * f[tuple(slice3)] + c * f[tuple(slice4)]\n\n slice1[axis] = -1\n slice2[axis] = -3\n slice3[axis] = -2\n slice4[axis] = -1\n if uniform_spacing:\n a = 0.5 / ax_dx\n b = -2. / ax_dx\n c = 1.5 / ax_dx\n else:\n dx1 = ax_dx[-2]\n dx2 = ax_dx[-1]\n a = (dx2) / (dx1 * (dx1 + dx2))\n b = - (dx2 + dx1) / (dx1 * dx2)\n c = (2. * dx2 + dx1) / (dx2 * (dx1 + dx2))\n # 1D equivalent -- out[-1] = a * f[-3] + b * f[-2] + c * f[-1]\n out[tuple(slice1)] = a * f[tuple(slice2)] + b * f[tuple(slice3)] + c * f[tuple(slice4)]\n\n outvals.append(out)\n\n # reset the slice object in this dimension to \":\"\n slice1[axis] = slice(None)\n slice2[axis] = slice(None)\n slice3[axis] = slice(None)\n slice4[axis] = slice(None)\n\n if len_axes == 1:\n return outvals[0]\n else:\n return outvals\n\n\ndef _diff_dispatcher(a, n=None, axis=None, prepend=None, append=None):\n return (a, prepend, append)\n\n\n@array_function_dispatch(_diff_dispatcher)\ndef diff(a, n=1, axis=-1, prepend=np._NoValue, append=np._NoValue):\n \"\"\"\n Calculate the n-th discrete difference along the given axis.\n\n The first difference is given by ``out[i] = a[i+1] - a[i]`` along\n the given axis, higher differences are calculated by using `diff`\n recursively.\n\n Parameters\n ----------\n a : array_like\n Input array\n n : int, optional\n The number of times values are differenced. If zero, the input\n is returned as-is.\n axis : int, optional\n The axis along which the difference is taken, default is the\n last axis.\n prepend, append : array_like, optional\n Values to prepend or append to `a` along axis prior to\n performing the difference. Scalar values are expanded to\n arrays with length 1 in the direction of axis and the shape\n of the input array in along all other axes. Otherwise the\n dimension and shape must match `a` except along axis.\n\n .. versionadded:: 1.16.0\n\n Returns\n -------\n diff : ndarray\n The n-th differences. The shape of the output is the same as `a`\n except along `axis` where the dimension is smaller by `n`. The\n type of the output is the same as the type of the difference\n between any two elements of `a`. This is the same as the type of\n `a` in most cases. A notable exception is `datetime64`, which\n results in a `timedelta64` output array.\n\n See Also\n --------\n gradient, ediff1d, cumsum\n\n Notes\n -----\n Type is preserved for boolean arrays, so the result will contain\n `False` when consecutive elements are the same and `True` when they\n differ.\n\n For unsigned integer arrays, the results will also be unsigned. This\n should not be surprising, as the result is consistent with\n calculating the difference directly:\n\n >>> u8_arr = np.array([1, 0], dtype=np.uint8)\n >>> np.diff(u8_arr)\n array([255], dtype=uint8)\n >>> u8_arr[1,...] - u8_arr[0,...]\n 255\n\n If this is not desirable, then the array should be cast to a larger\n integer type first:\n\n >>> i16_arr = u8_arr.astype(np.int16)\n >>> np.diff(i16_arr)\n array([-1], dtype=int16)\n\n Examples\n --------\n >>> x = np.array([1, 2, 4, 7, 0])\n >>> np.diff(x)\n array([ 1, 2, 3, -7])\n >>> np.diff(x, n=2)\n array([ 1, 1, -10])\n\n >>> x = np.array([[1, 3, 6, 10], [0, 5, 6, 8]])\n >>> np.diff(x)\n array([[2, 3, 4],\n [5, 1, 2]])\n >>> np.diff(x, axis=0)\n array([[-1, 2, 0, -2]])\n\n >>> x = np.arange('1066-10-13', '1066-10-16', dtype=np.datetime64)\n >>> np.diff(x)\n array([1, 1], dtype='timedelta64[D]')\n\n \"\"\"\n if n == 0:\n return a\n if n < 0:\n raise ValueError(\n \"order must be non-negative but got \" + repr(n))\n\n a = asanyarray(a)\n nd = a.ndim\n if nd == 0:\n raise ValueError(\"diff requires input that is at least one dimensional\")\n axis = normalize_axis_index(axis, nd)\n\n combined = []\n if prepend is not np._NoValue:\n prepend = np.asanyarray(prepend)\n if prepend.ndim == 0:\n shape = list(a.shape)\n shape[axis] = 1\n prepend = np.broadcast_to(prepend, tuple(shape))\n combined.append(prepend)\n\n combined.append(a)\n\n if append is not np._NoValue:\n append = np.asanyarray(append)\n if append.ndim == 0:\n shape = list(a.shape)\n shape[axis] = 1\n append = np.broadcast_to(append, tuple(shape))\n combined.append(append)\n\n if len(combined) > 1:\n a = np.concatenate(combined, axis)\n\n slice1 = [slice(None)] * nd\n slice2 = [slice(None)] * nd\n slice1[axis] = slice(1, None)\n slice2[axis] = slice(None, -1)\n slice1 = tuple(slice1)\n slice2 = tuple(slice2)\n\n op = not_equal if a.dtype == np.bool_ else subtract\n for _ in range(n):\n a = op(a[slice1], a[slice2])\n\n return a\n\n\ndef _interp_dispatcher(x, xp, fp, left=None, right=None, period=None):\n return (x, xp, fp)\n\n\n@array_function_dispatch(_interp_dispatcher)\ndef interp(x, xp, fp, left=None, right=None, period=None):\n \"\"\"\n One-dimensional linear interpolation for monotonically increasing sample points.\n\n Returns the one-dimensional piecewise linear interpolant to a function\n with given discrete data points (`xp`, `fp`), evaluated at `x`.\n\n Parameters\n ----------\n x : array_like\n The x-coordinates at which to evaluate the interpolated values.\n\n xp : 1-D sequence of floats\n The x-coordinates of the data points, must be increasing if argument\n `period` is not specified. Otherwise, `xp` is internally sorted after\n normalizing the periodic boundaries with ``xp = xp % period``.\n\n fp : 1-D sequence of float or complex\n The y-coordinates of the data points, same length as `xp`.\n\n left : optional float or complex corresponding to fp\n Value to return for `x < xp[0]`, default is `fp[0]`.\n\n right : optional float or complex corresponding to fp\n Value to return for `x > xp[-1]`, default is `fp[-1]`.\n\n period : None or float, optional\n A period for the x-coordinates. This parameter allows the proper\n interpolation of angular x-coordinates. Parameters `left` and `right`\n are ignored if `period` is specified.\n\n .. versionadded:: 1.10.0\n\n Returns\n -------\n y : float or complex (corresponding to fp) or ndarray\n The interpolated values, same shape as `x`.\n\n Raises\n ------\n ValueError\n If `xp` and `fp` have different length\n If `xp` or `fp` are not 1-D sequences\n If `period == 0`\n\n See Also\n --------\n scipy.interpolate\n\n Warnings\n --------\n The x-coordinate sequence is expected to be increasing, but this is not\n explicitly enforced. However, if the sequence `xp` is non-increasing,\n interpolation results are meaningless.\n\n Note that, since NaN is unsortable, `xp` also cannot contain NaNs.\n\n A simple check for `xp` being strictly increasing is::\n\n np.all(np.diff(xp) > 0)\n\n Examples\n --------\n >>> xp = [1, 2, 3]\n >>> fp = [3, 2, 0]\n >>> np.interp(2.5, xp, fp)\n 1.0\n >>> np.interp([0, 1, 1.5, 2.72, 3.14], xp, fp)\n array([3. , 3. , 2.5 , 0.56, 0. ])\n >>> UNDEF = -99.0\n >>> np.interp(3.14, xp, fp, right=UNDEF)\n -99.0\n\n Plot an interpolant to the sine function:\n\n >>> x = np.linspace(0, 2*np.pi, 10)\n >>> y = np.sin(x)\n >>> xvals = np.linspace(0, 2*np.pi, 50)\n >>> yinterp = np.interp(xvals, x, y)\n >>> import matplotlib.pyplot as plt\n >>> plt.plot(x, y, 'o')\n [<matplotlib.lines.Line2D object at 0x...>]\n >>> plt.plot(xvals, yinterp, '-x')\n [<matplotlib.lines.Line2D object at 0x...>]\n >>> plt.show()\n\n Interpolation with periodic x-coordinates:\n\n >>> x = [-180, -170, -185, 185, -10, -5, 0, 365]\n >>> xp = [190, -190, 350, -350]\n >>> fp = [5, 10, 3, 4]\n >>> np.interp(x, xp, fp, period=360)\n array([7.5 , 5. , 8.75, 6.25, 3. , 3.25, 3.5 , 3.75])\n\n Complex interpolation:\n\n >>> x = [1.5, 4.0]\n >>> xp = [2,3,5]\n >>> fp = [1.0j, 0, 2+3j]\n >>> np.interp(x, xp, fp)\n array([0.+1.j , 1.+1.5j])\n\n \"\"\"\n\n fp = np.asarray(fp)\n\n if np.iscomplexobj(fp):\n interp_func = compiled_interp_complex\n input_dtype = np.complex128\n else:\n interp_func = compiled_interp\n input_dtype = np.float64\n\n if period is not None:\n if period == 0:\n raise ValueError(\"period must be a non-zero value\")\n period = abs(period)\n left = None\n right = None\n\n x = np.asarray(x, dtype=np.float64)\n xp = np.asarray(xp, dtype=np.float64)\n fp = np.asarray(fp, dtype=input_dtype)\n\n if xp.ndim != 1 or fp.ndim != 1:\n raise ValueError(\"Data points must be 1-D sequences\")\n if xp.shape[0] != fp.shape[0]:\n raise ValueError(\"fp and xp are not of the same length\")\n # normalizing periodic boundaries\n x = x % period\n xp = xp % period\n asort_xp = np.argsort(xp)\n xp = xp[asort_xp]\n fp = fp[asort_xp]\n xp = np.concatenate((xp[-1:]-period, xp, xp[0:1]+period))\n fp = np.concatenate((fp[-1:], fp, fp[0:1]))\n\n return interp_func(x, xp, fp, left, right)\n\n\ndef _angle_dispatcher(z, deg=None):\n return (z,)\n\n\n@array_function_dispatch(_angle_dispatcher)\ndef angle(z, deg=False):\n \"\"\"\n Return the angle of the complex argument.\n\n Parameters\n ----------\n z : array_like\n A complex number or sequence of complex numbers.\n deg : bool, optional\n Return angle in degrees if True, radians if False (default).\n\n Returns\n -------\n angle : ndarray or scalar\n The counterclockwise angle from the positive real axis on the complex\n plane in the range ``(-pi, pi]``, with dtype as numpy.float64.\n\n .. versionchanged:: 1.16.0\n This function works on subclasses of ndarray like `ma.array`.\n\n See Also\n --------\n arctan2\n absolute\n\n Notes\n -----\n Although the angle of the complex number 0 is undefined, ``numpy.angle(0)``\n returns the value 0.\n\n Examples\n --------\n >>> np.angle([1.0, 1.0j, 1+1j]) # in radians\n array([ 0. , 1.57079633, 0.78539816]) # may vary\n >>> np.angle(1+1j, deg=True) # in degrees\n 45.0\n\n \"\"\"\n z = asanyarray(z)\n if issubclass(z.dtype.type, _nx.complexfloating):\n zimag = z.imag\n zreal = z.real\n else:\n zimag = 0\n zreal = z\n\n a = arctan2(zimag, zreal)\n if deg:\n a *= 180/pi\n return a\n\n\ndef _unwrap_dispatcher(p, discont=None, axis=None, *, period=None):\n return (p,)\n\n\n@array_function_dispatch(_unwrap_dispatcher)\ndef unwrap(p, discont=None, axis=-1, *, period=2*pi):\n r\"\"\"\n Unwrap by taking the complement of large deltas with respect to the period.\n\n This unwraps a signal `p` by changing elements which have an absolute\n difference from their predecessor of more than ``max(discont, period/2)``\n to their `period`-complementary values.\n\n For the default case where `period` is :math:`2\\pi` and `discont` is\n :math:`\\pi`, this unwraps a radian phase `p` such that adjacent differences\n are never greater than :math:`\\pi` by adding :math:`2k\\pi` for some\n integer :math:`k`.\n\n Parameters\n ----------\n p : array_like\n Input array.\n discont : float, optional\n Maximum discontinuity between values, default is ``period/2``.\n Values below ``period/2`` are treated as if they were ``period/2``.\n To have an effect different from the default, `discont` should be\n larger than ``period/2``.\n axis : int, optional\n Axis along which unwrap will operate, default is the last axis.\n period: float, optional\n Size of the range over which the input wraps. By default, it is\n ``2 pi``.\n\n .. versionadded:: 1.21.0\n\n Returns\n -------\n out : ndarray\n Output array.\n\n See Also\n --------\n rad2deg, deg2rad\n\n Notes\n -----\n If the discontinuity in `p` is smaller than ``period/2``,\n but larger than `discont`, no unwrapping is done because taking\n the complement would only make the discontinuity larger.\n\n Examples\n --------\n >>> phase = np.linspace(0, np.pi, num=5)\n >>> phase[3:] += np.pi\n >>> phase\n array([ 0. , 0.78539816, 1.57079633, 5.49778714, 6.28318531]) # may vary\n >>> np.unwrap(phase)\n array([ 0. , 0.78539816, 1.57079633, -0.78539816, 0. ]) # may vary\n >>> np.unwrap([0, 1, 2, -1, 0], period=4)\n array([0, 1, 2, 3, 4])\n >>> np.unwrap([ 1, 2, 3, 4, 5, 6, 1, 2, 3], period=6)\n array([1, 2, 3, 4, 5, 6, 7, 8, 9])\n >>> np.unwrap([2, 3, 4, 5, 2, 3, 4, 5], period=4)\n array([2, 3, 4, 5, 6, 7, 8, 9])\n >>> phase_deg = np.mod(np.linspace(0 ,720, 19), 360) - 180\n >>> np.unwrap(phase_deg, period=360)\n array([-180., -140., -100., -60., -20., 20., 60., 100., 140.,\n 180., 220., 260., 300., 340., 380., 420., 460., 500.,\n 540.])\n \"\"\"\n p = asarray(p)\n nd = p.ndim\n dd = diff(p, axis=axis)\n if discont is None:\n discont = period/2\n slice1 = [slice(None, None)]*nd # full slices\n slice1[axis] = slice(1, None)\n slice1 = tuple(slice1)\n dtype = np.result_type(dd, period)\n if _nx.issubdtype(dtype, _nx.integer):\n interval_high, rem = divmod(period, 2)\n boundary_ambiguous = rem == 0\n else:\n interval_high = period / 2\n boundary_ambiguous = True\n interval_low = -interval_high\n ddmod = mod(dd - interval_low, period) + interval_low\n if boundary_ambiguous:\n # for `mask = (abs(dd) == period/2)`, the above line made\n # `ddmod[mask] == -period/2`. correct these such that\n # `ddmod[mask] == sign(dd[mask])*period/2`.\n _nx.copyto(ddmod, interval_high,\n where=(ddmod == interval_low) & (dd > 0))\n ph_correct = ddmod - dd\n _nx.copyto(ph_correct, 0, where=abs(dd) < discont)\n up = array(p, copy=True, dtype=dtype)\n up[slice1] = p[slice1] + ph_correct.cumsum(axis)\n return up\n\n\ndef _sort_complex(a):\n return (a,)\n\n\n@array_function_dispatch(_sort_complex)\ndef sort_complex(a):\n \"\"\"\n Sort a complex array using the real part first, then the imaginary part.\n\n Parameters\n ----------\n a : array_like\n Input array\n\n Returns\n -------\n out : complex ndarray\n Always returns a sorted complex array.\n\n Examples\n --------\n >>> np.sort_complex([5, 3, 6, 2, 1])\n array([1.+0.j, 2.+0.j, 3.+0.j, 5.+0.j, 6.+0.j])\n\n >>> np.sort_complex([1 + 2j, 2 - 1j, 3 - 2j, 3 - 3j, 3 + 5j])\n array([1.+2.j, 2.-1.j, 3.-3.j, 3.-2.j, 3.+5.j])\n\n \"\"\"\n b = array(a, copy=True)\n b.sort()\n if not issubclass(b.dtype.type, _nx.complexfloating):\n if b.dtype.char in 'bhBH':\n return b.astype('F')\n elif b.dtype.char == 'g':\n return b.astype('G')\n else:\n return b.astype('D')\n else:\n return b\n\n\ndef _trim_zeros(filt, trim=None):\n return (filt,)\n\n\n@array_function_dispatch(_trim_zeros)\ndef trim_zeros(filt, trim='fb'):\n \"\"\"\n Trim the leading and/or trailing zeros from a 1-D array or sequence.\n\n Parameters\n ----------\n filt : 1-D array or sequence\n Input array.\n trim : str, optional\n A string with 'f' representing trim from front and 'b' to trim from\n back. Default is 'fb', trim zeros from both front and back of the\n array.\n\n Returns\n -------\n trimmed : 1-D array or sequence\n The result of trimming the input. The input data type is preserved.\n\n Examples\n --------\n >>> a = np.array((0, 0, 0, 1, 2, 3, 0, 2, 1, 0))\n >>> np.trim_zeros(a)\n array([1, 2, 3, 0, 2, 1])\n\n >>> np.trim_zeros(a, 'b')\n array([0, 0, 0, ..., 0, 2, 1])\n\n The input data type is preserved, list/tuple in means list/tuple out.\n\n >>> np.trim_zeros([0, 1, 2, 0])\n [1, 2]\n\n \"\"\"\n\n first = 0\n trim = trim.upper()\n if 'F' in trim:\n for i in filt:\n if i != 0.:\n break\n else:\n first = first + 1\n last = len(filt)\n if 'B' in trim:\n for i in filt[::-1]:\n if i != 0.:\n break\n else:\n last = last - 1\n return filt[first:last]\n\n\ndef _extract_dispatcher(condition, arr):\n return (condition, arr)\n\n\n@array_function_dispatch(_extract_dispatcher)\ndef extract(condition, arr):\n \"\"\"\n Return the elements of an array that satisfy some condition.\n\n This is equivalent to ``np.compress(ravel(condition), ravel(arr))``. If\n `condition` is boolean ``np.extract`` is equivalent to ``arr[condition]``.\n\n Note that `place` does the exact opposite of `extract`.\n\n Parameters\n ----------\n condition : array_like\n An array whose nonzero or True entries indicate the elements of `arr`\n to extract.\n arr : array_like\n Input array of the same size as `condition`.\n\n Returns\n -------\n extract : ndarray\n Rank 1 array of values from `arr` where `condition` is True.\n\n See Also\n --------\n take, put, copyto, compress, place\n\n Examples\n --------\n >>> arr = np.arange(12).reshape((3, 4))\n >>> arr\n array([[ 0, 1, 2, 3],\n [ 4, 5, 6, 7],\n [ 8, 9, 10, 11]])\n >>> condition = np.mod(arr, 3)==0\n >>> condition\n array([[ True, False, False, True],\n [False, False, True, False],\n [False, True, False, False]])\n >>> np.extract(condition, arr)\n array([0, 3, 6, 9])\n\n\n If `condition` is boolean:\n\n >>> arr[condition]\n array([0, 3, 6, 9])\n\n \"\"\"\n return _nx.take(ravel(arr), nonzero(ravel(condition))[0])\n\n\ndef _place_dispatcher(arr, mask, vals):\n return (arr, mask, vals)\n\n\n@array_function_dispatch(_place_dispatcher)\ndef place(arr, mask, vals):\n \"\"\"\n Change elements of an array based on conditional and input values.\n\n Similar to ``np.copyto(arr, vals, where=mask)``, the difference is that\n `place` uses the first N elements of `vals`, where N is the number of\n True values in `mask`, while `copyto` uses the elements where `mask`\n is True.\n\n Note that `extract` does the exact opposite of `place`.\n\n Parameters\n ----------\n arr : ndarray\n Array to put data into.\n mask : array_like\n Boolean mask array. Must have the same size as `a`.\n vals : 1-D sequence\n Values to put into `a`. Only the first N elements are used, where\n N is the number of True values in `mask`. If `vals` is smaller\n than N, it will be repeated, and if elements of `a` are to be masked,\n this sequence must be non-empty.\n\n See Also\n --------\n copyto, put, take, extract\n\n Examples\n --------\n >>> arr = np.arange(6).reshape(2, 3)\n >>> np.place(arr, arr>2, [44, 55])\n >>> arr\n array([[ 0, 1, 2],\n [44, 55, 44]])\n\n \"\"\"\n if not isinstance(arr, np.ndarray):\n raise TypeError(\"argument 1 must be numpy.ndarray, \"\n \"not {name}\".format(name=type(arr).__name__))\n\n return _insert(arr, mask, vals)\n\n\ndef disp(mesg, device=None, linefeed=True):\n \"\"\"\n Display a message on a device.\n\n Parameters\n ----------\n mesg : str\n Message to display.\n device : object\n Device to write message. If None, defaults to ``sys.stdout`` which is\n very similar to ``print``. `device` needs to have ``write()`` and\n ``flush()`` methods.\n linefeed : bool, optional\n Option whether to print a line feed or not. Defaults to True.\n\n Raises\n ------\n AttributeError\n If `device` does not have a ``write()`` or ``flush()`` method.\n\n Examples\n --------\n Besides ``sys.stdout``, a file-like object can also be used as it has\n both required methods:\n\n >>> from io import StringIO\n >>> buf = StringIO()\n >>> np.disp(u'\"Display\" in a file', device=buf)\n >>> buf.getvalue()\n '\"Display\" in a file\\\\n'\n\n \"\"\"\n if device is None:\n device = sys.stdout\n if linefeed:\n device.write('%s\\n' % mesg)\n else:\n device.write('%s' % mesg)\n device.flush()\n return\n\n\n# See https://docs.scipy.org/doc/numpy/reference/c-api.generalized-ufuncs.html\n_DIMENSION_NAME = r'\\w+'\n_CORE_DIMENSION_LIST = '(?:{0:}(?:,{0:})*)?'.format(_DIMENSION_NAME)\n_ARGUMENT = r'\\({}\\)'.format(_CORE_DIMENSION_LIST)\n_ARGUMENT_LIST = '{0:}(?:,{0:})*'.format(_ARGUMENT)\n_SIGNATURE = '^{0:}->{0:}$'.format(_ARGUMENT_LIST)\n\n\ndef _parse_gufunc_signature(signature):\n \"\"\"\n Parse string signatures for a generalized universal function.\n\n Arguments\n ---------\n signature : string\n Generalized universal function signature, e.g., ``(m,n),(n,p)->(m,p)``\n for ``np.matmul``.\n\n Returns\n -------\n Tuple of input and output core dimensions parsed from the signature, each\n of the form List[Tuple[str, ...]].\n \"\"\"\n signature = re.sub(r'\\s+', '', signature)\n\n if not re.match(_SIGNATURE, signature):\n raise ValueError(\n 'not a valid gufunc signature: {}'.format(signature))\n return tuple([tuple(re.findall(_DIMENSION_NAME, arg))\n for arg in re.findall(_ARGUMENT, arg_list)]\n for arg_list in signature.split('->'))\n\n\ndef _update_dim_sizes(dim_sizes, arg, core_dims):\n \"\"\"\n Incrementally check and update core dimension sizes for a single argument.\n\n Arguments\n ---------\n dim_sizes : Dict[str, int]\n Sizes of existing core dimensions. Will be updated in-place.\n arg : ndarray\n Argument to examine.\n core_dims : Tuple[str, ...]\n Core dimensions for this argument.\n \"\"\"\n if not core_dims:\n return\n\n num_core_dims = len(core_dims)\n if arg.ndim < num_core_dims:\n raise ValueError(\n '%d-dimensional argument does not have enough '\n 'dimensions for all core dimensions %r'\n % (arg.ndim, core_dims))\n\n core_shape = arg.shape[-num_core_dims:]\n for dim, size in zip(core_dims, core_shape):\n if dim in dim_sizes:\n if size != dim_sizes[dim]:\n raise ValueError(\n 'inconsistent size for core dimension %r: %r vs %r'\n % (dim, size, dim_sizes[dim]))\n else:\n dim_sizes[dim] = size\n\n\ndef _parse_input_dimensions(args, input_core_dims):\n \"\"\"\n Parse broadcast and core dimensions for vectorize with a signature.\n\n Arguments\n ---------\n args : Tuple[ndarray, ...]\n Tuple of input arguments to examine.\n input_core_dims : List[Tuple[str, ...]]\n List of core dimensions corresponding to each input.\n\n Returns\n -------\n broadcast_shape : Tuple[int, ...]\n Common shape to broadcast all non-core dimensions to.\n dim_sizes : Dict[str, int]\n Common sizes for named core dimensions.\n \"\"\"\n broadcast_args = []\n dim_sizes = {}\n for arg, core_dims in zip(args, input_core_dims):\n _update_dim_sizes(dim_sizes, arg, core_dims)\n ndim = arg.ndim - len(core_dims)\n dummy_array = np.lib.stride_tricks.as_strided(0, arg.shape[:ndim])\n broadcast_args.append(dummy_array)\n broadcast_shape = np.lib.stride_tricks._broadcast_shape(*broadcast_args)\n return broadcast_shape, dim_sizes\n\n\ndef _calculate_shapes(broadcast_shape, dim_sizes, list_of_core_dims):\n \"\"\"Helper for calculating broadcast shapes with core dimensions.\"\"\"\n return [broadcast_shape + tuple(dim_sizes[dim] for dim in core_dims)\n for core_dims in list_of_core_dims]\n\n\ndef _create_arrays(broadcast_shape, dim_sizes, list_of_core_dims, dtypes,\n results=None):\n \"\"\"Helper for creating output arrays in vectorize.\"\"\"\n shapes = _calculate_shapes(broadcast_shape, dim_sizes, list_of_core_dims)\n if dtypes is None:\n dtypes = [None] * len(shapes)\n if results is None:\n arrays = tuple(np.empty(shape=shape, dtype=dtype)\n for shape, dtype in zip(shapes, dtypes))\n else:\n arrays = tuple(np.empty_like(result, shape=shape, dtype=dtype)\n for result, shape, dtype\n in zip(results, shapes, dtypes))\n return arrays\n\n\n@set_module('numpy')\nclass vectorize:\n \"\"\"\n vectorize(pyfunc, otypes=None, doc=None, excluded=None, cache=False,\n signature=None)\n\n Generalized function class.\n\n Define a vectorized function which takes a nested sequence of objects or\n numpy arrays as inputs and returns a single numpy array or a tuple of numpy\n arrays. The vectorized function evaluates `pyfunc` over successive tuples\n of the input arrays like the python map function, except it uses the\n broadcasting rules of numpy.\n\n The data type of the output of `vectorized` is determined by calling\n the function with the first element of the input. This can be avoided\n by specifying the `otypes` argument.\n\n Parameters\n ----------\n pyfunc : callable\n A python function or method.\n otypes : str or list of dtypes, optional\n The output data type. It must be specified as either a string of\n typecode characters or a list of data type specifiers. There should\n be one data type specifier for each output.\n doc : str, optional\n The docstring for the function. If None, the docstring will be the\n ``pyfunc.__doc__``.\n excluded : set, optional\n Set of strings or integers representing the positional or keyword\n arguments for which the function will not be vectorized. These will be\n passed directly to `pyfunc` unmodified.\n\n .. versionadded:: 1.7.0\n\n cache : bool, optional\n If `True`, then cache the first function call that determines the number\n of outputs if `otypes` is not provided.\n\n .. versionadded:: 1.7.0\n\n signature : string, optional\n Generalized universal function signature, e.g., ``(m,n),(n)->(m)`` for\n vectorized matrix-vector multiplication. If provided, ``pyfunc`` will\n be called with (and expected to return) arrays with shapes given by the\n size of corresponding core dimensions. By default, ``pyfunc`` is\n assumed to take scalars as input and output.\n\n .. versionadded:: 1.12.0\n\n Returns\n -------\n vectorized : callable\n Vectorized function.\n\n See Also\n --------\n frompyfunc : Takes an arbitrary Python function and returns a ufunc\n\n Notes\n -----\n The `vectorize` function is provided primarily for convenience, not for\n performance. The implementation is essentially a for loop.\n\n If `otypes` is not specified, then a call to the function with the\n first argument will be used to determine the number of outputs. The\n results of this call will be cached if `cache` is `True` to prevent\n calling the function twice. However, to implement the cache, the\n original function must be wrapped which will slow down subsequent\n calls, so only do this if your function is expensive.\n\n The new keyword argument interface and `excluded` argument support\n further degrades performance.\n\n References\n ----------\n .. [1] :doc:`/reference/c-api/generalized-ufuncs`\n\n Examples\n --------\n >>> def myfunc(a, b):\n ... \"Return a-b if a>b, otherwise return a+b\"\n ... if a > b:\n ... return a - b\n ... else:\n ... return a + b\n\n >>> vfunc = np.vectorize(myfunc)\n >>> vfunc([1, 2, 3, 4], 2)\n array([3, 4, 1, 2])\n\n The docstring is taken from the input function to `vectorize` unless it\n is specified:\n\n >>> vfunc.__doc__\n 'Return a-b if a>b, otherwise return a+b'\n >>> vfunc = np.vectorize(myfunc, doc='Vectorized `myfunc`')\n >>> vfunc.__doc__\n 'Vectorized `myfunc`'\n\n The output type is determined by evaluating the first element of the input,\n unless it is specified:\n\n >>> out = vfunc([1, 2, 3, 4], 2)\n >>> type(out[0])\n <class 'numpy.int64'>\n >>> vfunc = np.vectorize(myfunc, otypes=[float])\n >>> out = vfunc([1, 2, 3, 4], 2)\n >>> type(out[0])\n <class 'numpy.float64'>\n\n The `excluded` argument can be used to prevent vectorizing over certain\n arguments. This can be useful for array-like arguments of a fixed length\n such as the coefficients for a polynomial as in `polyval`:\n\n >>> def mypolyval(p, x):\n ... _p = list(p)\n ... res = _p.pop(0)\n ... while _p:\n ... res = res*x + _p.pop(0)\n ... return res\n >>> vpolyval = np.vectorize(mypolyval, excluded=['p'])\n >>> vpolyval(p=[1, 2, 3], x=[0, 1])\n array([3, 6])\n\n Positional arguments may also be excluded by specifying their position:\n\n >>> vpolyval.excluded.add(0)\n >>> vpolyval([1, 2, 3], x=[0, 1])\n array([3, 6])\n\n The `signature` argument allows for vectorizing functions that act on\n non-scalar arrays of fixed length. For example, you can use it for a\n vectorized calculation of Pearson correlation coefficient and its p-value:\n\n >>> import scipy.stats\n >>> pearsonr = np.vectorize(scipy.stats.pearsonr,\n ... signature='(n),(n)->(),()')\n >>> pearsonr([[0, 1, 2, 3]], [[1, 2, 3, 4], [4, 3, 2, 1]])\n (array([ 1., -1.]), array([ 0., 0.]))\n\n Or for a vectorized convolution:\n\n >>> convolve = np.vectorize(np.convolve, signature='(n),(m)->(k)')\n >>> convolve(np.eye(4), [1, 2, 1])\n array([[1., 2., 1., 0., 0., 0.],\n [0., 1., 2., 1., 0., 0.],\n [0., 0., 1., 2., 1., 0.],\n [0., 0., 0., 1., 2., 1.]])\n\n \"\"\"\n def __init__(self, pyfunc, otypes=None, doc=None, excluded=None,\n cache=False, signature=None):\n self.pyfunc = pyfunc\n self.cache = cache\n self.signature = signature\n self._ufunc = {} # Caching to improve default performance\n\n if doc is None:\n self.__doc__ = pyfunc.__doc__\n else:\n self.__doc__ = doc\n\n if isinstance(otypes, str):\n for char in otypes:\n if char not in typecodes['All']:\n raise ValueError(\"Invalid otype specified: %s\" % (char,))\n elif iterable(otypes):\n otypes = ''.join([_nx.dtype(x).char for x in otypes])\n elif otypes is not None:\n raise ValueError(\"Invalid otype specification\")\n self.otypes = otypes\n\n # Excluded variable support\n if excluded is None:\n excluded = set()\n self.excluded = set(excluded)\n\n if signature is not None:\n self._in_and_out_core_dims = _parse_gufunc_signature(signature)\n else:\n self._in_and_out_core_dims = None\n\n def __call__(self, *args, **kwargs):\n \"\"\"\n Return arrays with the results of `pyfunc` broadcast (vectorized) over\n `args` and `kwargs` not in `excluded`.\n \"\"\"\n excluded = self.excluded\n if not kwargs and not excluded:\n func = self.pyfunc\n vargs = args\n else:\n # The wrapper accepts only positional arguments: we use `names` and\n # `inds` to mutate `the_args` and `kwargs` to pass to the original\n # function.\n nargs = len(args)\n\n names = [_n for _n in kwargs if _n not in excluded]\n inds = [_i for _i in range(nargs) if _i not in excluded]\n the_args = list(args)\n\n def func(*vargs):\n for _n, _i in enumerate(inds):\n the_args[_i] = vargs[_n]\n kwargs.update(zip(names, vargs[len(inds):]))\n return self.pyfunc(*the_args, **kwargs)\n\n vargs = [args[_i] for _i in inds]\n vargs.extend([kwargs[_n] for _n in names])\n\n return self._vectorize_call(func=func, args=vargs)\n\n def _get_ufunc_and_otypes(self, func, args):\n \"\"\"Return (ufunc, otypes).\"\"\"\n # frompyfunc will fail if args is empty\n if not args:\n raise ValueError('args can not be empty')\n\n if self.otypes is not None:\n otypes = self.otypes\n\n # self._ufunc is a dictionary whose keys are the number of\n # arguments (i.e. len(args)) and whose values are ufuncs created\n # by frompyfunc. len(args) can be different for different calls if\n # self.pyfunc has parameters with default values. We only use the\n # cache when func is self.pyfunc, which occurs when the call uses\n # only positional arguments and no arguments are excluded.\n\n nin = len(args)\n nout = len(self.otypes)\n if func is not self.pyfunc or nin not in self._ufunc:\n ufunc = frompyfunc(func, nin, nout)\n else:\n ufunc = None # We'll get it from self._ufunc\n if func is self.pyfunc:\n ufunc = self._ufunc.setdefault(nin, ufunc)\n else:\n # Get number of outputs and output types by calling the function on\n # the first entries of args. We also cache the result to prevent\n # the subsequent call when the ufunc is evaluated.\n # Assumes that ufunc first evaluates the 0th elements in the input\n # arrays (the input values are not checked to ensure this)\n args = [asarray(arg) for arg in args]\n if builtins.any(arg.size == 0 for arg in args):\n raise ValueError('cannot call `vectorize` on size 0 inputs '\n 'unless `otypes` is set')\n\n inputs = [arg.flat[0] for arg in args]\n outputs = func(*inputs)\n\n # Performance note: profiling indicates that -- for simple\n # functions at least -- this wrapping can almost double the\n # execution time.\n # Hence we make it optional.\n if self.cache:\n _cache = [outputs]\n\n def _func(*vargs):\n if _cache:\n return _cache.pop()\n else:\n return func(*vargs)\n else:\n _func = func\n\n if isinstance(outputs, tuple):\n nout = len(outputs)\n else:\n nout = 1\n outputs = (outputs,)\n\n otypes = ''.join([asarray(outputs[_k]).dtype.char\n for _k in range(nout)])\n\n # Performance note: profiling indicates that creating the ufunc is\n # not a significant cost compared with wrapping so it seems not\n # worth trying to cache this.\n ufunc = frompyfunc(_func, len(args), nout)\n\n return ufunc, otypes\n\n def _vectorize_call(self, func, args):\n \"\"\"Vectorized call to `func` over positional `args`.\"\"\"\n if self.signature is not None:\n res = self._vectorize_call_with_signature(func, args)\n elif not args:\n res = func()\n else:\n ufunc, otypes = self._get_ufunc_and_otypes(func=func, args=args)\n\n # Convert args to object arrays first\n inputs = [asanyarray(a, dtype=object) for a in args]\n\n outputs = ufunc(*inputs)\n\n if ufunc.nout == 1:\n res = asanyarray(outputs, dtype=otypes[0])\n else:\n res = tuple([asanyarray(x, dtype=t)\n for x, t in zip(outputs, otypes)])\n return res\n\n def _vectorize_call_with_signature(self, func, args):\n \"\"\"Vectorized call over positional arguments with a signature.\"\"\"\n input_core_dims, output_core_dims = self._in_and_out_core_dims\n\n if len(args) != len(input_core_dims):\n raise TypeError('wrong number of positional arguments: '\n 'expected %r, got %r'\n % (len(input_core_dims), len(args)))\n args = tuple(asanyarray(arg) for arg in args)\n\n broadcast_shape, dim_sizes = _parse_input_dimensions(\n args, input_core_dims)\n input_shapes = _calculate_shapes(broadcast_shape, dim_sizes,\n input_core_dims)\n args = [np.broadcast_to(arg, shape, subok=True)\n for arg, shape in zip(args, input_shapes)]\n\n outputs = None\n otypes = self.otypes\n nout = len(output_core_dims)\n\n for index in np.ndindex(*broadcast_shape):\n results = func(*(arg[index] for arg in args))\n\n n_results = len(results) if isinstance(results, tuple) else 1\n\n if nout != n_results:\n raise ValueError(\n 'wrong number of outputs from pyfunc: expected %r, got %r'\n % (nout, n_results))\n\n if nout == 1:\n results = (results,)\n\n if outputs is None:\n for result, core_dims in zip(results, output_core_dims):\n _update_dim_sizes(dim_sizes, result, core_dims)\n\n outputs = _create_arrays(broadcast_shape, dim_sizes,\n output_core_dims, otypes, results)\n\n for output, result in zip(outputs, results):\n output[index] = result\n\n if outputs is None:\n # did not call the function even once\n if otypes is None:\n raise ValueError('cannot call `vectorize` on size 0 inputs '\n 'unless `otypes` is set')\n if builtins.any(dim not in dim_sizes\n for dims in output_core_dims\n for dim in dims):\n raise ValueError('cannot call `vectorize` with a signature '\n 'including new output dimensions on size 0 '\n 'inputs')\n outputs = _create_arrays(broadcast_shape, dim_sizes,\n output_core_dims, otypes)\n\n return outputs[0] if nout == 1 else outputs\n\n\ndef _cov_dispatcher(m, y=None, rowvar=None, bias=None, ddof=None,\n fweights=None, aweights=None, *, dtype=None):\n return (m, y, fweights, aweights)\n\n\n@array_function_dispatch(_cov_dispatcher)\ndef cov(m, y=None, rowvar=True, bias=False, ddof=None, fweights=None,\n aweights=None, *, dtype=None):\n \"\"\"\n Estimate a covariance matrix, given data and weights.\n\n Covariance indicates the level to which two variables vary together.\n If we examine N-dimensional samples, :math:`X = [x_1, x_2, ... x_N]^T`,\n then the covariance matrix element :math:`C_{ij}` is the covariance of\n :math:`x_i` and :math:`x_j`. The element :math:`C_{ii}` is the variance\n of :math:`x_i`.\n\n See the notes for an outline of the algorithm.\n\n Parameters\n ----------\n m : array_like\n A 1-D or 2-D array containing multiple variables and observations.\n Each row of `m` represents a variable, and each column a single\n observation of all those variables. Also see `rowvar` below.\n y : array_like, optional\n An additional set of variables and observations. `y` has the same form\n as that of `m`.\n rowvar : bool, optional\n If `rowvar` is True (default), then each row represents a\n variable, with observations in the columns. Otherwise, the relationship\n is transposed: each column represents a variable, while the rows\n contain observations.\n bias : bool, optional\n Default normalization (False) is by ``(N - 1)``, where ``N`` is the\n number of observations given (unbiased estimate). If `bias` is True,\n then normalization is by ``N``. These values can be overridden by using\n the keyword ``ddof`` in numpy versions >= 1.5.\n ddof : int, optional\n If not ``None`` the default value implied by `bias` is overridden.\n Note that ``ddof=1`` will return the unbiased estimate, even if both\n `fweights` and `aweights` are specified, and ``ddof=0`` will return\n the simple average. See the notes for the details. The default value\n is ``None``.\n\n .. versionadded:: 1.5\n fweights : array_like, int, optional\n 1-D array of integer frequency weights; the number of times each\n observation vector should be repeated.\n\n .. versionadded:: 1.10\n aweights : array_like, optional\n 1-D array of observation vector weights. These relative weights are\n typically large for observations considered \"important\" and smaller for\n observations considered less \"important\". If ``ddof=0`` the array of\n weights can be used to assign probabilities to observation vectors.\n\n .. versionadded:: 1.10\n dtype : data-type, optional\n Data-type of the result. By default, the return data-type will have\n at least `numpy.float64` precision.\n\n .. versionadded:: 1.20\n\n Returns\n -------\n out : ndarray\n The covariance matrix of the variables.\n\n See Also\n --------\n corrcoef : Normalized covariance matrix\n\n Notes\n -----\n Assume that the observations are in the columns of the observation\n array `m` and let ``f = fweights`` and ``a = aweights`` for brevity. The\n steps to compute the weighted covariance are as follows::\n\n >>> m = np.arange(10, dtype=np.float64)\n >>> f = np.arange(10) * 2\n >>> a = np.arange(10) ** 2.\n >>> ddof = 1\n >>> w = f * a\n >>> v1 = np.sum(w)\n >>> v2 = np.sum(w * a)\n >>> m -= np.sum(m * w, axis=None, keepdims=True) / v1\n >>> cov = np.dot(m * w, m.T) * v1 / (v1**2 - ddof * v2)\n\n Note that when ``a == 1``, the normalization factor\n ``v1 / (v1**2 - ddof * v2)`` goes over to ``1 / (np.sum(f) - ddof)``\n as it should.\n\n Examples\n --------\n Consider two variables, :math:`x_0` and :math:`x_1`, which\n correlate perfectly, but in opposite directions:\n\n >>> x = np.array([[0, 2], [1, 1], [2, 0]]).T\n >>> x\n array([[0, 1, 2],\n [2, 1, 0]])\n\n Note how :math:`x_0` increases while :math:`x_1` decreases. The covariance\n matrix shows this clearly:\n\n >>> np.cov(x)\n array([[ 1., -1.],\n [-1., 1.]])\n\n Note that element :math:`C_{0,1}`, which shows the correlation between\n :math:`x_0` and :math:`x_1`, is negative.\n\n Further, note how `x` and `y` are combined:\n\n >>> x = [-2.1, -1, 4.3]\n >>> y = [3, 1.1, 0.12]\n >>> X = np.stack((x, y), axis=0)\n >>> np.cov(X)\n array([[11.71 , -4.286 ], # may vary\n [-4.286 , 2.144133]])\n >>> np.cov(x, y)\n array([[11.71 , -4.286 ], # may vary\n [-4.286 , 2.144133]])\n >>> np.cov(x)\n array(11.71)\n\n \"\"\"\n # Check inputs\n if ddof is not None and ddof != int(ddof):\n raise ValueError(\n \"ddof must be integer\")\n\n # Handles complex arrays too\n m = np.asarray(m)\n if m.ndim > 2:\n raise ValueError(\"m has more than 2 dimensions\")\n\n if y is not None:\n y = np.asarray(y)\n if y.ndim > 2:\n raise ValueError(\"y has more than 2 dimensions\")\n\n if dtype is None:\n if y is None:\n dtype = np.result_type(m, np.float64)\n else:\n dtype = np.result_type(m, y, np.float64)\n\n X = array(m, ndmin=2, dtype=dtype)\n if not rowvar and X.shape[0] != 1:\n X = X.T\n if X.shape[0] == 0:\n return np.array([]).reshape(0, 0)\n if y is not None:\n y = array(y, copy=False, ndmin=2, dtype=dtype)\n if not rowvar and y.shape[0] != 1:\n y = y.T\n X = np.concatenate((X, y), axis=0)\n\n if ddof is None:\n if bias == 0:\n ddof = 1\n else:\n ddof = 0\n\n # Get the product of frequencies and weights\n w = None\n if fweights is not None:\n fweights = np.asarray(fweights, dtype=float)\n if not np.all(fweights == np.around(fweights)):\n raise TypeError(\n \"fweights must be integer\")\n if fweights.ndim > 1:\n raise RuntimeError(\n \"cannot handle multidimensional fweights\")\n if fweights.shape[0] != X.shape[1]:\n raise RuntimeError(\n \"incompatible numbers of samples and fweights\")\n if any(fweights < 0):\n raise ValueError(\n \"fweights cannot be negative\")\n w = fweights\n if aweights is not None:\n aweights = np.asarray(aweights, dtype=float)\n if aweights.ndim > 1:\n raise RuntimeError(\n \"cannot handle multidimensional aweights\")\n if aweights.shape[0] != X.shape[1]:\n raise RuntimeError(\n \"incompatible numbers of samples and aweights\")\n if any(aweights < 0):\n raise ValueError(\n \"aweights cannot be negative\")\n if w is None:\n w = aweights\n else:\n w *= aweights\n\n avg, w_sum = average(X, axis=1, weights=w, returned=True)\n w_sum = w_sum[0]\n\n # Determine the normalization\n if w is None:\n fact = X.shape[1] - ddof\n elif ddof == 0:\n fact = w_sum\n elif aweights is None:\n fact = w_sum - ddof\n else:\n fact = w_sum - ddof*sum(w*aweights)/w_sum\n\n if fact <= 0:\n warnings.warn(\"Degrees of freedom <= 0 for slice\",\n RuntimeWarning, stacklevel=3)\n fact = 0.0\n\n X -= avg[:, None]\n if w is None:\n X_T = X.T\n else:\n X_T = (X*w).T\n c = dot(X, X_T.conj())\n c *= np.true_divide(1, fact)\n return c.squeeze()\n\n\ndef _corrcoef_dispatcher(x, y=None, rowvar=None, bias=None, ddof=None, *,\n dtype=None):\n return (x, y)\n\n\n@array_function_dispatch(_corrcoef_dispatcher)\ndef corrcoef(x, y=None, rowvar=True, bias=np._NoValue, ddof=np._NoValue, *,\n dtype=None):\n \"\"\"\n Return Pearson product-moment correlation coefficients.\n\n Please refer to the documentation for `cov` for more detail. The\n relationship between the correlation coefficient matrix, `R`, and the\n covariance matrix, `C`, is\n\n .. math:: R_{ij} = \\\\frac{ C_{ij} } { \\\\sqrt{ C_{ii} * C_{jj} } }\n\n The values of `R` are between -1 and 1, inclusive.\n\n Parameters\n ----------\n x : array_like\n A 1-D or 2-D array containing multiple variables and observations.\n Each row of `x` represents a variable, and each column a single\n observation of all those variables. Also see `rowvar` below.\n y : array_like, optional\n An additional set of variables and observations. `y` has the same\n shape as `x`.\n rowvar : bool, optional\n If `rowvar` is True (default), then each row represents a\n variable, with observations in the columns. Otherwise, the relationship\n is transposed: each column represents a variable, while the rows\n contain observations.\n bias : _NoValue, optional\n Has no effect, do not use.\n\n .. deprecated:: 1.10.0\n ddof : _NoValue, optional\n Has no effect, do not use.\n\n .. deprecated:: 1.10.0\n dtype : data-type, optional\n Data-type of the result. By default, the return data-type will have\n at least `numpy.float64` precision.\n\n .. versionadded:: 1.20\n\n Returns\n -------\n R : ndarray\n The correlation coefficient matrix of the variables.\n\n See Also\n --------\n cov : Covariance matrix\n\n Notes\n -----\n Due to floating point rounding the resulting array may not be Hermitian,\n the diagonal elements may not be 1, and the elements may not satisfy the\n inequality abs(a) <= 1. The real and imaginary parts are clipped to the\n interval [-1, 1] in an attempt to improve on that situation but is not\n much help in the complex case.\n\n This function accepts but discards arguments `bias` and `ddof`. This is\n for backwards compatibility with previous versions of this function. These\n arguments had no effect on the return values of the function and can be\n safely ignored in this and previous versions of numpy.\n\n Examples\n --------\n In this example we generate two random arrays, ``xarr`` and ``yarr``, and\n compute the row-wise and column-wise Pearson correlation coefficients,\n ``R``. Since ``rowvar`` is true by default, we first find the row-wise\n Pearson correlation coefficients between the variables of ``xarr``.\n\n >>> import numpy as np\n >>> rng = np.random.default_rng(seed=42)\n >>> xarr = rng.random((3, 3))\n >>> xarr\n array([[0.77395605, 0.43887844, 0.85859792],\n [0.69736803, 0.09417735, 0.97562235],\n [0.7611397 , 0.78606431, 0.12811363]])\n >>> R1 = np.corrcoef(xarr)\n >>> R1\n array([[ 1. , 0.99256089, -0.68080986],\n [ 0.99256089, 1. , -0.76492172],\n [-0.68080986, -0.76492172, 1. ]])\n\n If we add another set of variables and observations ``yarr``, we can\n compute the row-wise Pearson correlation coefficients between the\n variables in ``xarr`` and ``yarr``.\n\n >>> yarr = rng.random((3, 3))\n >>> yarr\n array([[0.45038594, 0.37079802, 0.92676499],\n [0.64386512, 0.82276161, 0.4434142 ],\n [0.22723872, 0.55458479, 0.06381726]])\n >>> R2 = np.corrcoef(xarr, yarr)\n >>> R2\n array([[ 1. , 0.99256089, -0.68080986, 0.75008178, -0.934284 ,\n -0.99004057],\n [ 0.99256089, 1. , -0.76492172, 0.82502011, -0.97074098,\n -0.99981569],\n [-0.68080986, -0.76492172, 1. , -0.99507202, 0.89721355,\n 0.77714685],\n [ 0.75008178, 0.82502011, -0.99507202, 1. , -0.93657855,\n -0.83571711],\n [-0.934284 , -0.97074098, 0.89721355, -0.93657855, 1. ,\n 0.97517215],\n [-0.99004057, -0.99981569, 0.77714685, -0.83571711, 0.97517215,\n 1. ]])\n\n Finally if we use the option ``rowvar=False``, the columns are now\n being treated as the variables and we will find the column-wise Pearson\n correlation coefficients between variables in ``xarr`` and ``yarr``.\n\n >>> R3 = np.corrcoef(xarr, yarr, rowvar=False)\n >>> R3\n array([[ 1. , 0.77598074, -0.47458546, -0.75078643, -0.9665554 ,\n 0.22423734],\n [ 0.77598074, 1. , -0.92346708, -0.99923895, -0.58826587,\n -0.44069024],\n [-0.47458546, -0.92346708, 1. , 0.93773029, 0.23297648,\n 0.75137473],\n [-0.75078643, -0.99923895, 0.93773029, 1. , 0.55627469,\n 0.47536961],\n [-0.9665554 , -0.58826587, 0.23297648, 0.55627469, 1. ,\n -0.46666491],\n [ 0.22423734, -0.44069024, 0.75137473, 0.47536961, -0.46666491,\n 1. ]])\n\n \"\"\"\n if bias is not np._NoValue or ddof is not np._NoValue:\n # 2015-03-15, 1.10\n warnings.warn('bias and ddof have no effect and are deprecated',\n DeprecationWarning, stacklevel=3)\n c = cov(x, y, rowvar, dtype=dtype)\n try:\n d = diag(c)\n except ValueError:\n # scalar covariance\n # nan if incorrect value (nan, inf, 0), 1 otherwise\n return c / c\n stddev = sqrt(d.real)\n c /= stddev[:, None]\n c /= stddev[None, :]\n\n # Clip real and imaginary parts to [-1, 1]. This does not guarantee\n # abs(a[i,j]) <= 1 for complex arrays, but is the best we can do without\n # excessive work.\n np.clip(c.real, -1, 1, out=c.real)\n if np.iscomplexobj(c):\n np.clip(c.imag, -1, 1, out=c.imag)\n\n return c\n\n\n@set_module('numpy')\ndef blackman(M):\n \"\"\"\n Return the Blackman window.\n\n The Blackman window is a taper formed by using the first three\n terms of a summation of cosines. It was designed to have close to the\n minimal leakage possible. It is close to optimal, only slightly worse\n than a Kaiser window.\n\n Parameters\n ----------\n M : int\n Number of points in the output window. If zero or less, an empty\n array is returned.\n\n Returns\n -------\n out : ndarray\n The window, with the maximum value normalized to one (the value one\n appears only if the number of samples is odd).\n\n See Also\n --------\n bartlett, hamming, hanning, kaiser\n\n Notes\n -----\n The Blackman window is defined as\n\n .. math:: w(n) = 0.42 - 0.5 \\\\cos(2\\\\pi n/M) + 0.08 \\\\cos(4\\\\pi n/M)\n\n Most references to the Blackman window come from the signal processing\n literature, where it is used as one of many windowing functions for\n smoothing values. It is also known as an apodization (which means\n \"removing the foot\", i.e. smoothing discontinuities at the beginning\n and end of the sampled signal) or tapering function. It is known as a\n \"near optimal\" tapering function, almost as good (by some measures)\n as the kaiser window.\n\n References\n ----------\n Blackman, R.B. and Tukey, J.W., (1958) The measurement of power spectra,\n Dover Publications, New York.\n\n Oppenheim, A.V., and R.W. Schafer. Discrete-Time Signal Processing.\n Upper Saddle River, NJ: Prentice-Hall, 1999, pp. 468-471.\n\n Examples\n --------\n >>> import matplotlib.pyplot as plt\n >>> np.blackman(12)\n array([-1.38777878e-17, 3.26064346e-02, 1.59903635e-01, # may vary\n 4.14397981e-01, 7.36045180e-01, 9.67046769e-01,\n 9.67046769e-01, 7.36045180e-01, 4.14397981e-01,\n 1.59903635e-01, 3.26064346e-02, -1.38777878e-17])\n\n Plot the window and the frequency response:\n\n >>> from numpy.fft import fft, fftshift\n >>> window = np.blackman(51)\n >>> plt.plot(window)\n [<matplotlib.lines.Line2D object at 0x...>]\n >>> plt.title(\"Blackman window\")\n Text(0.5, 1.0, 'Blackman window')\n >>> plt.ylabel(\"Amplitude\")\n Text(0, 0.5, 'Amplitude')\n >>> plt.xlabel(\"Sample\")\n Text(0.5, 0, 'Sample')\n >>> plt.show()\n\n >>> plt.figure()\n <Figure size 640x480 with 0 Axes>\n >>> A = fft(window, 2048) / 25.5\n >>> mag = np.abs(fftshift(A))\n >>> freq = np.linspace(-0.5, 0.5, len(A))\n >>> with np.errstate(divide='ignore', invalid='ignore'):\n ... response = 20 * np.log10(mag)\n ...\n >>> response = np.clip(response, -100, 100)\n >>> plt.plot(freq, response)\n [<matplotlib.lines.Line2D object at 0x...>]\n >>> plt.title(\"Frequency response of Blackman window\")\n Text(0.5, 1.0, 'Frequency response of Blackman window')\n >>> plt.ylabel(\"Magnitude [dB]\")\n Text(0, 0.5, 'Magnitude [dB]')\n >>> plt.xlabel(\"Normalized frequency [cycles per sample]\")\n Text(0.5, 0, 'Normalized frequency [cycles per sample]')\n >>> _ = plt.axis('tight')\n >>> plt.show()\n\n \"\"\"\n if M < 1:\n return array([], dtype=np.result_type(M, 0.0))\n if M == 1:\n return ones(1, dtype=np.result_type(M, 0.0))\n n = arange(1-M, M, 2)\n return 0.42 + 0.5*cos(pi*n/(M-1)) + 0.08*cos(2.0*pi*n/(M-1))\n\n\n@set_module('numpy')\ndef bartlett(M):\n \"\"\"\n Return the Bartlett window.\n\n The Bartlett window is very similar to a triangular window, except\n that the end points are at zero. It is often used in signal\n processing for tapering a signal, without generating too much\n ripple in the frequency domain.\n\n Parameters\n ----------\n M : int\n Number of points in the output window. If zero or less, an\n empty array is returned.\n\n Returns\n -------\n out : array\n The triangular window, with the maximum value normalized to one\n (the value one appears only if the number of samples is odd), with\n the first and last samples equal to zero.\n\n See Also\n --------\n blackman, hamming, hanning, kaiser\n\n Notes\n -----\n The Bartlett window is defined as\n\n .. math:: w(n) = \\\\frac{2}{M-1} \\\\left(\n \\\\frac{M-1}{2} - \\\\left|n - \\\\frac{M-1}{2}\\\\right|\n \\\\right)\n\n Most references to the Bartlett window come from the signal\n processing literature, where it is used as one of many windowing\n functions for smoothing values. Note that convolution with this\n window produces linear interpolation. It is also known as an\n apodization (which means\"removing the foot\", i.e. smoothing\n discontinuities at the beginning and end of the sampled signal) or\n tapering function. The fourier transform of the Bartlett is the product\n of two sinc functions.\n Note the excellent discussion in Kanasewich.\n\n References\n ----------\n .. [1] M.S. Bartlett, \"Periodogram Analysis and Continuous Spectra\",\n Biometrika 37, 1-16, 1950.\n .. [2] E.R. Kanasewich, \"Time Sequence Analysis in Geophysics\",\n The University of Alberta Press, 1975, pp. 109-110.\n .. [3] A.V. Oppenheim and R.W. Schafer, \"Discrete-Time Signal\n Processing\", Prentice-Hall, 1999, pp. 468-471.\n .. [4] Wikipedia, \"Window function\",\n https://en.wikipedia.org/wiki/Window_function\n .. [5] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,\n \"Numerical Recipes\", Cambridge University Press, 1986, page 429.\n\n Examples\n --------\n >>> import matplotlib.pyplot as plt\n >>> np.bartlett(12)\n array([ 0. , 0.18181818, 0.36363636, 0.54545455, 0.72727273, # may vary\n 0.90909091, 0.90909091, 0.72727273, 0.54545455, 0.36363636,\n 0.18181818, 0. ])\n\n Plot the window and its frequency response (requires SciPy and matplotlib):\n\n >>> from numpy.fft import fft, fftshift\n >>> window = np.bartlett(51)\n >>> plt.plot(window)\n [<matplotlib.lines.Line2D object at 0x...>]\n >>> plt.title(\"Bartlett window\")\n Text(0.5, 1.0, 'Bartlett window')\n >>> plt.ylabel(\"Amplitude\")\n Text(0, 0.5, 'Amplitude')\n >>> plt.xlabel(\"Sample\")\n Text(0.5, 0, 'Sample')\n >>> plt.show()\n\n >>> plt.figure()\n <Figure size 640x480 with 0 Axes>\n >>> A = fft(window, 2048) / 25.5\n >>> mag = np.abs(fftshift(A))\n >>> freq = np.linspace(-0.5, 0.5, len(A))\n >>> with np.errstate(divide='ignore', invalid='ignore'):\n ... response = 20 * np.log10(mag)\n ...\n >>> response = np.clip(response, -100, 100)\n >>> plt.plot(freq, response)\n [<matplotlib.lines.Line2D object at 0x...>]\n >>> plt.title(\"Frequency response of Bartlett window\")\n Text(0.5, 1.0, 'Frequency response of Bartlett window')\n >>> plt.ylabel(\"Magnitude [dB]\")\n Text(0, 0.5, 'Magnitude [dB]')\n >>> plt.xlabel(\"Normalized frequency [cycles per sample]\")\n Text(0.5, 0, 'Normalized frequency [cycles per sample]')\n >>> _ = plt.axis('tight')\n >>> plt.show()\n\n \"\"\"\n if M < 1:\n return array([], dtype=np.result_type(M, 0.0))\n if M == 1:\n return ones(1, dtype=np.result_type(M, 0.0))\n n = arange(1-M, M, 2)\n return where(less_equal(n, 0), 1 + n/(M-1), 1 - n/(M-1))\n\n\n@set_module('numpy')\ndef hanning(M):\n \"\"\"\n Return the Hanning window.\n\n The Hanning window is a taper formed by using a weighted cosine.\n\n Parameters\n ----------\n M : int\n Number of points in the output window. If zero or less, an\n empty array is returned.\n\n Returns\n -------\n out : ndarray, shape(M,)\n The window, with the maximum value normalized to one (the value\n one appears only if `M` is odd).\n\n See Also\n --------\n bartlett, blackman, hamming, kaiser\n\n Notes\n -----\n The Hanning window is defined as\n\n .. math:: w(n) = 0.5 - 0.5cos\\\\left(\\\\frac{2\\\\pi{n}}{M-1}\\\\right)\n \\\\qquad 0 \\\\leq n \\\\leq M-1\n\n The Hanning was named for Julius von Hann, an Austrian meteorologist.\n It is also known as the Cosine Bell. Some authors prefer that it be\n called a Hann window, to help avoid confusion with the very similar\n Hamming window.\n\n Most references to the Hanning window come from the signal processing\n literature, where it is used as one of many windowing functions for\n smoothing values. It is also known as an apodization (which means\n \"removing the foot\", i.e. smoothing discontinuities at the beginning\n and end of the sampled signal) or tapering function.\n\n References\n ----------\n .. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power\n spectra, Dover Publications, New York.\n .. [2] E.R. Kanasewich, \"Time Sequence Analysis in Geophysics\",\n The University of Alberta Press, 1975, pp. 106-108.\n .. [3] Wikipedia, \"Window function\",\n https://en.wikipedia.org/wiki/Window_function\n .. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,\n \"Numerical Recipes\", Cambridge University Press, 1986, page 425.\n\n Examples\n --------\n >>> np.hanning(12)\n array([0. , 0.07937323, 0.29229249, 0.57115742, 0.82743037,\n 0.97974649, 0.97974649, 0.82743037, 0.57115742, 0.29229249,\n 0.07937323, 0. ])\n\n Plot the window and its frequency response:\n\n >>> import matplotlib.pyplot as plt\n >>> from numpy.fft import fft, fftshift\n >>> window = np.hanning(51)\n >>> plt.plot(window)\n [<matplotlib.lines.Line2D object at 0x...>]\n >>> plt.title(\"Hann window\")\n Text(0.5, 1.0, 'Hann window')\n >>> plt.ylabel(\"Amplitude\")\n Text(0, 0.5, 'Amplitude')\n >>> plt.xlabel(\"Sample\")\n Text(0.5, 0, 'Sample')\n >>> plt.show()\n\n >>> plt.figure()\n <Figure size 640x480 with 0 Axes>\n >>> A = fft(window, 2048) / 25.5\n >>> mag = np.abs(fftshift(A))\n >>> freq = np.linspace(-0.5, 0.5, len(A))\n >>> with np.errstate(divide='ignore', invalid='ignore'):\n ... response = 20 * np.log10(mag)\n ...\n >>> response = np.clip(response, -100, 100)\n >>> plt.plot(freq, response)\n [<matplotlib.lines.Line2D object at 0x...>]\n >>> plt.title(\"Frequency response of the Hann window\")\n Text(0.5, 1.0, 'Frequency response of the Hann window')\n >>> plt.ylabel(\"Magnitude [dB]\")\n Text(0, 0.5, 'Magnitude [dB]')\n >>> plt.xlabel(\"Normalized frequency [cycles per sample]\")\n Text(0.5, 0, 'Normalized frequency [cycles per sample]')\n >>> plt.axis('tight')\n ...\n >>> plt.show()\n\n \"\"\"\n if M < 1:\n return array([], dtype=np.result_type(M, 0.0))\n if M == 1:\n return ones(1, dtype=np.result_type(M, 0.0))\n n = arange(1-M, M, 2)\n return 0.5 + 0.5*cos(pi*n/(M-1))\n\n\n@set_module('numpy')\ndef hamming(M):\n \"\"\"\n Return the Hamming window.\n\n The Hamming window is a taper formed by using a weighted cosine.\n\n Parameters\n ----------\n M : int\n Number of points in the output window. If zero or less, an\n empty array is returned.\n\n Returns\n -------\n out : ndarray\n The window, with the maximum value normalized to one (the value\n one appears only if the number of samples is odd).\n\n See Also\n --------\n bartlett, blackman, hanning, kaiser\n\n Notes\n -----\n The Hamming window is defined as\n\n .. math:: w(n) = 0.54 - 0.46cos\\\\left(\\\\frac{2\\\\pi{n}}{M-1}\\\\right)\n \\\\qquad 0 \\\\leq n \\\\leq M-1\n\n The Hamming was named for R. W. Hamming, an associate of J. W. Tukey\n and is described in Blackman and Tukey. It was recommended for\n smoothing the truncated autocovariance function in the time domain.\n Most references to the Hamming window come from the signal processing\n literature, where it is used as one of many windowing functions for\n smoothing values. It is also known as an apodization (which means\n \"removing the foot\", i.e. smoothing discontinuities at the beginning\n and end of the sampled signal) or tapering function.\n\n References\n ----------\n .. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power\n spectra, Dover Publications, New York.\n .. [2] E.R. Kanasewich, \"Time Sequence Analysis in Geophysics\", The\n University of Alberta Press, 1975, pp. 109-110.\n .. [3] Wikipedia, \"Window function\",\n https://en.wikipedia.org/wiki/Window_function\n .. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,\n \"Numerical Recipes\", Cambridge University Press, 1986, page 425.\n\n Examples\n --------\n >>> np.hamming(12)\n array([ 0.08 , 0.15302337, 0.34890909, 0.60546483, 0.84123594, # may vary\n 0.98136677, 0.98136677, 0.84123594, 0.60546483, 0.34890909,\n 0.15302337, 0.08 ])\n\n Plot the window and the frequency response:\n\n >>> import matplotlib.pyplot as plt\n >>> from numpy.fft import fft, fftshift\n >>> window = np.hamming(51)\n >>> plt.plot(window)\n [<matplotlib.lines.Line2D object at 0x...>]\n >>> plt.title(\"Hamming window\")\n Text(0.5, 1.0, 'Hamming window')\n >>> plt.ylabel(\"Amplitude\")\n Text(0, 0.5, 'Amplitude')\n >>> plt.xlabel(\"Sample\")\n Text(0.5, 0, 'Sample')\n >>> plt.show()\n\n >>> plt.figure()\n <Figure size 640x480 with 0 Axes>\n >>> A = fft(window, 2048) / 25.5\n >>> mag = np.abs(fftshift(A))\n >>> freq = np.linspace(-0.5, 0.5, len(A))\n >>> response = 20 * np.log10(mag)\n >>> response = np.clip(response, -100, 100)\n >>> plt.plot(freq, response)\n [<matplotlib.lines.Line2D object at 0x...>]\n >>> plt.title(\"Frequency response of Hamming window\")\n Text(0.5, 1.0, 'Frequency response of Hamming window')\n >>> plt.ylabel(\"Magnitude [dB]\")\n Text(0, 0.5, 'Magnitude [dB]')\n >>> plt.xlabel(\"Normalized frequency [cycles per sample]\")\n Text(0.5, 0, 'Normalized frequency [cycles per sample]')\n >>> plt.axis('tight')\n ...\n >>> plt.show()\n\n \"\"\"\n if M < 1:\n return array([], dtype=np.result_type(M, 0.0))\n if M == 1:\n return ones(1, dtype=np.result_type(M, 0.0))\n n = arange(1-M, M, 2)\n return 0.54 + 0.46*cos(pi*n/(M-1))\n\n\n## Code from cephes for i0\n\n_i0A = [\n -4.41534164647933937950E-18,\n 3.33079451882223809783E-17,\n -2.43127984654795469359E-16,\n 1.71539128555513303061E-15,\n -1.16853328779934516808E-14,\n 7.67618549860493561688E-14,\n -4.85644678311192946090E-13,\n 2.95505266312963983461E-12,\n -1.72682629144155570723E-11,\n 9.67580903537323691224E-11,\n -5.18979560163526290666E-10,\n 2.65982372468238665035E-9,\n -1.30002500998624804212E-8,\n 6.04699502254191894932E-8,\n -2.67079385394061173391E-7,\n 1.11738753912010371815E-6,\n -4.41673835845875056359E-6,\n 1.64484480707288970893E-5,\n -5.75419501008210370398E-5,\n 1.88502885095841655729E-4,\n -5.76375574538582365885E-4,\n 1.63947561694133579842E-3,\n -4.32430999505057594430E-3,\n 1.05464603945949983183E-2,\n -2.37374148058994688156E-2,\n 4.93052842396707084878E-2,\n -9.49010970480476444210E-2,\n 1.71620901522208775349E-1,\n -3.04682672343198398683E-1,\n 6.76795274409476084995E-1\n ]\n\n_i0B = [\n -7.23318048787475395456E-18,\n -4.83050448594418207126E-18,\n 4.46562142029675999901E-17,\n 3.46122286769746109310E-17,\n -2.82762398051658348494E-16,\n -3.42548561967721913462E-16,\n 1.77256013305652638360E-15,\n 3.81168066935262242075E-15,\n -9.55484669882830764870E-15,\n -4.15056934728722208663E-14,\n 1.54008621752140982691E-14,\n 3.85277838274214270114E-13,\n 7.18012445138366623367E-13,\n -1.79417853150680611778E-12,\n -1.32158118404477131188E-11,\n -3.14991652796324136454E-11,\n 1.18891471078464383424E-11,\n 4.94060238822496958910E-10,\n 3.39623202570838634515E-9,\n 2.26666899049817806459E-8,\n 2.04891858946906374183E-7,\n 2.89137052083475648297E-6,\n 6.88975834691682398426E-5,\n 3.36911647825569408990E-3,\n 8.04490411014108831608E-1\n ]\n\n\ndef _chbevl(x, vals):\n b0 = vals[0]\n b1 = 0.0\n\n for i in range(1, len(vals)):\n b2 = b1\n b1 = b0\n b0 = x*b1 - b2 + vals[i]\n\n return 0.5*(b0 - b2)\n\n\ndef _i0_1(x):\n return exp(x) * _chbevl(x/2.0-2, _i0A)\n\n\ndef _i0_2(x):\n return exp(x) * _chbevl(32.0/x - 2.0, _i0B) / sqrt(x)\n\n\ndef _i0_dispatcher(x):\n return (x,)\n\n\n@array_function_dispatch(_i0_dispatcher)\ndef i0(x):\n \"\"\"\n Modified Bessel function of the first kind, order 0.\n\n Usually denoted :math:`I_0`.\n\n Parameters\n ----------\n x : array_like of float\n Argument of the Bessel function.\n\n Returns\n -------\n out : ndarray, shape = x.shape, dtype = float\n The modified Bessel function evaluated at each of the elements of `x`.\n\n See Also\n --------\n scipy.special.i0, scipy.special.iv, scipy.special.ive\n\n Notes\n -----\n The scipy implementation is recommended over this function: it is a\n proper ufunc written in C, and more than an order of magnitude faster.\n\n We use the algorithm published by Clenshaw [1]_ and referenced by\n Abramowitz and Stegun [2]_, for which the function domain is\n partitioned into the two intervals [0,8] and (8,inf), and Chebyshev\n polynomial expansions are employed in each interval. Relative error on\n the domain [0,30] using IEEE arithmetic is documented [3]_ as having a\n peak of 5.8e-16 with an rms of 1.4e-16 (n = 30000).\n\n References\n ----------\n .. [1] C. W. Clenshaw, \"Chebyshev series for mathematical functions\", in\n *National Physical Laboratory Mathematical Tables*, vol. 5, London:\n Her Majesty's Stationery Office, 1962.\n .. [2] M. Abramowitz and I. A. Stegun, *Handbook of Mathematical\n Functions*, 10th printing, New York: Dover, 1964, pp. 379.\n https://personal.math.ubc.ca/~cbm/aands/page_379.htm\n .. [3] https://metacpan.org/pod/distribution/Math-Cephes/lib/Math/Cephes.pod#i0:-Modified-Bessel-function-of-order-zero\n\n Examples\n --------\n >>> np.i0(0.)\n array(1.0)\n >>> np.i0([0, 1, 2, 3])\n array([1. , 1.26606588, 2.2795853 , 4.88079259])\n\n \"\"\"\n x = np.asanyarray(x)\n if x.dtype.kind == 'c':\n raise TypeError(\"i0 not supported for complex values\")\n if x.dtype.kind != 'f':\n x = x.astype(float)\n x = np.abs(x)\n return piecewise(x, [x <= 8.0], [_i0_1, _i0_2])\n\n## End of cephes code for i0\n\n\n@set_module('numpy')\ndef kaiser(M, beta):\n \"\"\"\n Return the Kaiser window.\n\n The Kaiser window is a taper formed by using a Bessel function.\n\n Parameters\n ----------\n M : int\n Number of points in the output window. If zero or less, an\n empty array is returned.\n beta : float\n Shape parameter for window.\n\n Returns\n -------\n out : array\n The window, with the maximum value normalized to one (the value\n one appears only if the number of samples is odd).\n\n See Also\n --------\n bartlett, blackman, hamming, hanning\n\n Notes\n -----\n The Kaiser window is defined as\n\n .. math:: w(n) = I_0\\\\left( \\\\beta \\\\sqrt{1-\\\\frac{4n^2}{(M-1)^2}}\n \\\\right)/I_0(\\\\beta)\n\n with\n\n .. math:: \\\\quad -\\\\frac{M-1}{2} \\\\leq n \\\\leq \\\\frac{M-1}{2},\n\n where :math:`I_0` is the modified zeroth-order Bessel function.\n\n The Kaiser was named for Jim Kaiser, who discovered a simple\n approximation to the DPSS window based on Bessel functions. The Kaiser\n window is a very good approximation to the Digital Prolate Spheroidal\n Sequence, or Slepian window, which is the transform which maximizes the\n energy in the main lobe of the window relative to total energy.\n\n The Kaiser can approximate many other windows by varying the beta\n parameter.\n\n ==== =======================\n beta Window shape\n ==== =======================\n 0 Rectangular\n 5 Similar to a Hamming\n 6 Similar to a Hanning\n 8.6 Similar to a Blackman\n ==== =======================\n\n A beta value of 14 is probably a good starting point. Note that as beta\n gets large, the window narrows, and so the number of samples needs to be\n large enough to sample the increasingly narrow spike, otherwise NaNs will\n get returned.\n\n Most references to the Kaiser window come from the signal processing\n literature, where it is used as one of many windowing functions for\n smoothing values. It is also known as an apodization (which means\n \"removing the foot\", i.e. smoothing discontinuities at the beginning\n and end of the sampled signal) or tapering function.\n\n References\n ----------\n .. [1] J. F. Kaiser, \"Digital Filters\" - Ch 7 in \"Systems analysis by\n digital computer\", Editors: F.F. Kuo and J.F. Kaiser, p 218-285.\n John Wiley and Sons, New York, (1966).\n .. [2] E.R. Kanasewich, \"Time Sequence Analysis in Geophysics\", The\n University of Alberta Press, 1975, pp. 177-178.\n .. [3] Wikipedia, \"Window function\",\n https://en.wikipedia.org/wiki/Window_function\n\n Examples\n --------\n >>> import matplotlib.pyplot as plt\n >>> np.kaiser(12, 14)\n array([7.72686684e-06, 3.46009194e-03, 4.65200189e-02, # may vary\n 2.29737120e-01, 5.99885316e-01, 9.45674898e-01,\n 9.45674898e-01, 5.99885316e-01, 2.29737120e-01,\n 4.65200189e-02, 3.46009194e-03, 7.72686684e-06])\n\n\n Plot the window and the frequency response:\n\n >>> from numpy.fft import fft, fftshift\n >>> window = np.kaiser(51, 14)\n >>> plt.plot(window)\n [<matplotlib.lines.Line2D object at 0x...>]\n >>> plt.title(\"Kaiser window\")\n Text(0.5, 1.0, 'Kaiser window')\n >>> plt.ylabel(\"Amplitude\")\n Text(0, 0.5, 'Amplitude')\n >>> plt.xlabel(\"Sample\")\n Text(0.5, 0, 'Sample')\n >>> plt.show()\n\n >>> plt.figure()\n <Figure size 640x480 with 0 Axes>\n >>> A = fft(window, 2048) / 25.5\n >>> mag = np.abs(fftshift(A))\n >>> freq = np.linspace(-0.5, 0.5, len(A))\n >>> response = 20 * np.log10(mag)\n >>> response = np.clip(response, -100, 100)\n >>> plt.plot(freq, response)\n [<matplotlib.lines.Line2D object at 0x...>]\n >>> plt.title(\"Frequency response of Kaiser window\")\n Text(0.5, 1.0, 'Frequency response of Kaiser window')\n >>> plt.ylabel(\"Magnitude [dB]\")\n Text(0, 0.5, 'Magnitude [dB]')\n >>> plt.xlabel(\"Normalized frequency [cycles per sample]\")\n Text(0.5, 0, 'Normalized frequency [cycles per sample]')\n >>> plt.axis('tight')\n (-0.5, 0.5, -100.0, ...) # may vary\n >>> plt.show()\n\n \"\"\"\n if M == 1:\n return np.ones(1, dtype=np.result_type(M, 0.0))\n n = arange(0, M)\n alpha = (M-1)/2.0\n return i0(beta * sqrt(1-((n-alpha)/alpha)**2.0))/i0(float(beta))\n\n\ndef _sinc_dispatcher(x):\n return (x,)\n\n\n@array_function_dispatch(_sinc_dispatcher)\ndef sinc(x):\n r\"\"\"\n Return the normalized sinc function.\n\n The sinc function is :math:`\\sin(\\pi x)/(\\pi x)`.\n\n .. note::\n\n Note the normalization factor of ``pi`` used in the definition.\n This is the most commonly used definition in signal processing.\n Use ``sinc(x / np.pi)`` to obtain the unnormalized sinc function\n :math:`\\sin(x)/(x)` that is more common in mathematics.\n\n Parameters\n ----------\n x : ndarray\n Array (possibly multi-dimensional) of values for which to calculate\n ``sinc(x)``.\n\n Returns\n -------\n out : ndarray\n ``sinc(x)``, which has the same shape as the input.\n\n Notes\n -----\n ``sinc(0)`` is the limit value 1.\n\n The name sinc is short for \"sine cardinal\" or \"sinus cardinalis\".\n\n The sinc function is used in various signal processing applications,\n including in anti-aliasing, in the construction of a Lanczos resampling\n filter, and in interpolation.\n\n For bandlimited interpolation of discrete-time signals, the ideal\n interpolation kernel is proportional to the sinc function.\n\n References\n ----------\n .. [1] Weisstein, Eric W. \"Sinc Function.\" From MathWorld--A Wolfram Web\n Resource. http://mathworld.wolfram.com/SincFunction.html\n .. [2] Wikipedia, \"Sinc function\",\n https://en.wikipedia.org/wiki/Sinc_function\n\n Examples\n --------\n >>> import matplotlib.pyplot as plt\n >>> x = np.linspace(-4, 4, 41)\n >>> np.sinc(x)\n array([-3.89804309e-17, -4.92362781e-02, -8.40918587e-02, # may vary\n -8.90384387e-02, -5.84680802e-02, 3.89804309e-17,\n 6.68206631e-02, 1.16434881e-01, 1.26137788e-01,\n 8.50444803e-02, -3.89804309e-17, -1.03943254e-01,\n -1.89206682e-01, -2.16236208e-01, -1.55914881e-01,\n 3.89804309e-17, 2.33872321e-01, 5.04551152e-01,\n 7.56826729e-01, 9.35489284e-01, 1.00000000e+00,\n 9.35489284e-01, 7.56826729e-01, 5.04551152e-01,\n 2.33872321e-01, 3.89804309e-17, -1.55914881e-01,\n -2.16236208e-01, -1.89206682e-01, -1.03943254e-01,\n -3.89804309e-17, 8.50444803e-02, 1.26137788e-01,\n 1.16434881e-01, 6.68206631e-02, 3.89804309e-17,\n -5.84680802e-02, -8.90384387e-02, -8.40918587e-02,\n -4.92362781e-02, -3.89804309e-17])\n\n >>> plt.plot(x, np.sinc(x))\n [<matplotlib.lines.Line2D object at 0x...>]\n >>> plt.title(\"Sinc Function\")\n Text(0.5, 1.0, 'Sinc Function')\n >>> plt.ylabel(\"Amplitude\")\n Text(0, 0.5, 'Amplitude')\n >>> plt.xlabel(\"X\")\n Text(0.5, 0, 'X')\n >>> plt.show()\n\n \"\"\"\n x = np.asanyarray(x)\n y = pi * where(x == 0, 1.0e-20, x)\n return sin(y)/y\n\n\ndef _msort_dispatcher(a):\n return (a,)\n\n\n@array_function_dispatch(_msort_dispatcher)\ndef msort(a):\n \"\"\"\n Return a copy of an array sorted along the first axis.\n\n Parameters\n ----------\n a : array_like\n Array to be sorted.\n\n Returns\n -------\n sorted_array : ndarray\n Array of the same type and shape as `a`.\n\n See Also\n --------\n sort\n\n Notes\n -----\n ``np.msort(a)`` is equivalent to ``np.sort(a, axis=0)``.\n\n \"\"\"\n b = array(a, subok=True, copy=True)\n b.sort(0)\n return b\n\n\ndef _ureduce(a, func, **kwargs):\n \"\"\"\n Internal Function.\n Call `func` with `a` as first argument swapping the axes to use extended\n axis on functions that don't support it natively.\n\n Returns result and a.shape with axis dims set to 1.\n\n Parameters\n ----------\n a : array_like\n Input array or object that can be converted to an array.\n func : callable\n Reduction function capable of receiving a single axis argument.\n It is called with `a` as first argument followed by `kwargs`.\n kwargs : keyword arguments\n additional keyword arguments to pass to `func`.\n\n Returns\n -------\n result : tuple\n Result of func(a, **kwargs) and a.shape with axis dims set to 1\n which can be used to reshape the result to the same shape a ufunc with\n keepdims=True would produce.\n\n \"\"\"\n a = np.asanyarray(a)\n axis = kwargs.get('axis', None)\n if axis is not None:\n keepdim = list(a.shape)\n nd = a.ndim\n axis = _nx.normalize_axis_tuple(axis, nd)\n\n for ax in axis:\n keepdim[ax] = 1\n\n if len(axis) == 1:\n kwargs['axis'] = axis[0]\n else:\n keep = set(range(nd)) - set(axis)\n nkeep = len(keep)\n # swap axis that should not be reduced to front\n for i, s in enumerate(sorted(keep)):\n a = a.swapaxes(i, s)\n # merge reduced axis\n a = a.reshape(a.shape[:nkeep] + (-1,))\n kwargs['axis'] = -1\n keepdim = tuple(keepdim)\n else:\n keepdim = (1,) * a.ndim\n\n r = func(a, **kwargs)\n return r, keepdim\n\n\ndef _median_dispatcher(\n a, axis=None, out=None, overwrite_input=None, keepdims=None):\n return (a, out)\n\n\n@array_function_dispatch(_median_dispatcher)\ndef median(a, axis=None, out=None, overwrite_input=False, keepdims=False):\n \"\"\"\n Compute the median along the specified axis.\n\n Returns the median of the array elements.\n\n Parameters\n ----------\n a : array_like\n Input array or object that can be converted to an array.\n axis : {int, sequence of int, None}, optional\n Axis or axes along which the medians are computed. The default\n is to compute the median along a flattened version of the array.\n A sequence of axes is supported since version 1.9.0.\n out : ndarray, optional\n Alternative output array in which to place the result. It must\n have the same shape and buffer length as the expected output,\n but the type (of the output) will be cast if necessary.\n overwrite_input : bool, optional\n If True, then allow use of memory of input array `a` for\n calculations. The input array will be modified by the call to\n `median`. This will save memory when you do not need to preserve\n the contents of the input array. Treat the input as undefined,\n but it will probably be fully or partially sorted. Default is\n False. If `overwrite_input` is ``True`` and `a` is not already an\n `ndarray`, an error will be raised.\n keepdims : bool, optional\n If this is set to True, the axes which are reduced are left\n in the result as dimensions with size one. With this option,\n the result will broadcast correctly against the original `arr`.\n\n .. versionadded:: 1.9.0\n\n Returns\n -------\n median : ndarray\n A new array holding the result. If the input contains integers\n or floats smaller than ``float64``, then the output data-type is\n ``np.float64``. Otherwise, the data-type of the output is the\n same as that of the input. If `out` is specified, that array is\n returned instead.\n\n See Also\n --------\n mean, percentile\n\n Notes\n -----\n Given a vector ``V`` of length ``N``, the median of ``V`` is the\n middle value of a sorted copy of ``V``, ``V_sorted`` - i\n e., ``V_sorted[(N-1)/2]``, when ``N`` is odd, and the average of the\n two middle values of ``V_sorted`` when ``N`` is even.\n\n Examples\n --------\n >>> a = np.array([[10, 7, 4], [3, 2, 1]])\n >>> a\n array([[10, 7, 4],\n [ 3, 2, 1]])\n >>> np.median(a)\n 3.5\n >>> np.median(a, axis=0)\n array([6.5, 4.5, 2.5])\n >>> np.median(a, axis=1)\n array([7., 2.])\n >>> m = np.median(a, axis=0)\n >>> out = np.zeros_like(m)\n >>> np.median(a, axis=0, out=m)\n array([6.5, 4.5, 2.5])\n >>> m\n array([6.5, 4.5, 2.5])\n >>> b = a.copy()\n >>> np.median(b, axis=1, overwrite_input=True)\n array([7., 2.])\n >>> assert not np.all(a==b)\n >>> b = a.copy()\n >>> np.median(b, axis=None, overwrite_input=True)\n 3.5\n >>> assert not np.all(a==b)\n\n \"\"\"\n r, k = _ureduce(a, func=_median, axis=axis, out=out,\n overwrite_input=overwrite_input)\n if keepdims:\n return r.reshape(k)\n else:\n return r\n\n\ndef _median(a, axis=None, out=None, overwrite_input=False):\n # can't be reasonably be implemented in terms of percentile as we have to\n # call mean to not break astropy\n a = np.asanyarray(a)\n\n # Set the partition indexes\n if axis is None:\n sz = a.size\n else:\n sz = a.shape[axis]\n if sz % 2 == 0:\n szh = sz // 2\n kth = [szh - 1, szh]\n else:\n kth = [(sz - 1) // 2]\n # Check if the array contains any nan's\n if np.issubdtype(a.dtype, np.inexact):\n kth.append(-1)\n\n if overwrite_input:\n if axis is None:\n part = a.ravel()\n part.partition(kth)\n else:\n a.partition(kth, axis=axis)\n part = a\n else:\n part = partition(a, kth, axis=axis)\n\n if part.shape == ():\n # make 0-D arrays work\n return part.item()\n if axis is None:\n axis = 0\n\n indexer = [slice(None)] * part.ndim\n index = part.shape[axis] // 2\n if part.shape[axis] % 2 == 1:\n # index with slice to allow mean (below) to work\n indexer[axis] = slice(index, index+1)\n else:\n indexer[axis] = slice(index-1, index+1)\n indexer = tuple(indexer)\n\n # Use mean in both odd and even case to coerce data type,\n # using out array if needed.\n rout = mean(part[indexer], axis=axis, out=out)\n # Check if the array contains any nan's\n if np.issubdtype(a.dtype, np.inexact) and sz > 0:\n # If nans are possible, warn and replace by nans like mean would.\n rout = np.lib.utils._median_nancheck(part, rout, axis)\n\n return rout\n\n\ndef _percentile_dispatcher(a, q, axis=None, out=None, overwrite_input=None,\n method=None, keepdims=None, *, interpolation=None):\n return (a, q, out)\n\n\n@array_function_dispatch(_percentile_dispatcher)\ndef percentile(a,\n q,\n axis=None,\n out=None,\n overwrite_input=False,\n method=\"linear\",\n keepdims=False,\n *,\n interpolation=None):\n \"\"\"\n Compute the q-th percentile of the data along the specified axis.\n\n Returns the q-th percentile(s) of the array elements.\n\n Parameters\n ----------\n a : array_like\n Input array or object that can be converted to an array.\n q : array_like of float\n Percentile or sequence of percentiles to compute, which must be between\n 0 and 100 inclusive.\n axis : {int, tuple of int, None}, optional\n Axis or axes along which the percentiles are computed. The\n default is to compute the percentile(s) along a flattened\n version of the array.\n\n .. versionchanged:: 1.9.0\n A tuple of axes is supported\n out : ndarray, optional\n Alternative output array in which to place the result. It must\n have the same shape and buffer length as the expected output,\n but the type (of the output) will be cast if necessary.\n overwrite_input : bool, optional\n If True, then allow the input array `a` to be modified by intermediate\n calculations, to save memory. In this case, the contents of the input\n `a` after this function completes is undefined.\n method : str, optional\n This parameter specifies the method to use for estimating the\n percentile. There are many different methods, some unique to NumPy.\n See the notes for explanation. The options sorted by their R type\n as summarized in the H&F paper [1]_ are:\n\n 1. 'inverted_cdf'\n 2. 'averaged_inverted_cdf'\n 3. 'closest_observation'\n 4. 'interpolated_inverted_cdf'\n 5. 'hazen'\n 6. 'weibull'\n 7. 'linear' (default)\n 8. 'median_unbiased'\n 9. 'normal_unbiased'\n\n The first three methods are discontiuous. NumPy further defines the\n following discontinuous variations of the default 'linear' (7.) option:\n\n * 'lower'\n * 'higher',\n * 'midpoint'\n * 'nearest'\n\n .. versionchanged:: 1.22.0\n This argument was previously called \"interpolation\" and only\n offered the \"linear\" default and last four options.\n\n keepdims : bool, optional\n If this is set to True, the axes which are reduced are left in\n the result as dimensions with size one. With this option, the\n result will broadcast correctly against the original array `a`.\n\n .. versionadded:: 1.9.0\n\n interpolation : str, optional\n Deprecated name for the method keyword argument.\n\n .. deprecated:: 1.22.0\n\n Returns\n -------\n percentile : scalar or ndarray\n If `q` is a single percentile and `axis=None`, then the result\n is a scalar. If multiple percentiles are given, first axis of\n the result corresponds to the percentiles. The other axes are\n the axes that remain after the reduction of `a`. If the input\n contains integers or floats smaller than ``float64``, the output\n data-type is ``float64``. Otherwise, the output data-type is the\n same as that of the input. If `out` is specified, that array is\n returned instead.\n\n See Also\n --------\n mean\n median : equivalent to ``percentile(..., 50)``\n nanpercentile\n quantile : equivalent to percentile, except q in the range [0, 1].\n\n Notes\n -----\n Given a vector ``V`` of length ``N``, the q-th percentile of ``V`` is\n the value ``q/100`` of the way from the minimum to the maximum in a\n sorted copy of ``V``. The values and distances of the two nearest\n neighbors as well as the `method` parameter will determine the\n percentile if the normalized ranking does not match the location of\n ``q`` exactly. This function is the same as the median if ``q=50``, the\n same as the minimum if ``q=0`` and the same as the maximum if\n ``q=100``.\n\n This optional `method` parameter specifies the method to use when the\n desired quantile lies between two data points ``i < j``.\n If ``g`` is the fractional part of the index surrounded by ``i`` and\n alpha and beta are correction constants modifying i and j.\n\n Below, 'q' is the quantile value, 'n' is the sample size and\n alpha and beta are constants.\n The following formula gives an interpolation \"i + g\" of where the quantile\n would be in the sorted sample.\n With 'i' being the floor and 'g' the fractional part of the result.\n\n .. math::\n i + g = (q - alpha) / ( n - alpha - beta + 1 )\n\n The different methods then work as follows\n\n inverted_cdf:\n method 1 of H&F [1]_.\n This method gives discontinuous results:\n * if g > 0 ; then take j\n * if g = 0 ; then take i\n\n averaged_inverted_cdf:\n method 2 of H&F [1]_.\n This method give discontinuous results:\n * if g > 0 ; then take j\n * if g = 0 ; then average between bounds\n\n closest_observation:\n method 3 of H&F [1]_.\n This method give discontinuous results:\n * if g > 0 ; then take j\n * if g = 0 and index is odd ; then take j\n * if g = 0 and index is even ; then take i\n\n interpolated_inverted_cdf:\n method 4 of H&F [1]_.\n This method give continuous results using:\n * alpha = 0\n * beta = 1\n\n hazen:\n method 5 of H&F [1]_.\n This method give continuous results using:\n * alpha = 1/2\n * beta = 1/2\n\n weibull:\n method 6 of H&F [1]_.\n This method give continuous results using:\n * alpha = 0\n * beta = 0\n\n linear:\n method 7 of H&F [1]_.\n This method give continuous results using:\n * alpha = 1\n * beta = 1\n\n median_unbiased:\n method 8 of H&F [1]_.\n This method is probably the best method if the sample\n distribution function is unknown (see reference).\n This method give continuous results using:\n * alpha = 1/3\n * beta = 1/3\n\n normal_unbiased:\n method 9 of H&F [1]_.\n This method is probably the best method if the sample\n distribution function is known to be normal.\n This method give continuous results using:\n * alpha = 3/8\n * beta = 3/8\n\n lower:\n NumPy method kept for backwards compatibility.\n Takes ``i`` as the interpolation point.\n\n higher:\n NumPy method kept for backwards compatibility.\n Takes ``j`` as the interpolation point.\n\n nearest:\n NumPy method kept for backwards compatibility.\n Takes ``i`` or ``j``, whichever is nearest.\n\n midpoint:\n NumPy method kept for backwards compatibility.\n Uses ``(i + j) / 2``.\n\n Examples\n --------\n >>> a = np.array([[10, 7, 4], [3, 2, 1]])\n >>> a\n array([[10, 7, 4],\n [ 3, 2, 1]])\n >>> np.percentile(a, 50)\n 3.5\n >>> np.percentile(a, 50, axis=0)\n array([6.5, 4.5, 2.5])\n >>> np.percentile(a, 50, axis=1)\n array([7., 2.])\n >>> np.percentile(a, 50, axis=1, keepdims=True)\n array([[7.],\n [2.]])\n\n >>> m = np.percentile(a, 50, axis=0)\n >>> out = np.zeros_like(m)\n >>> np.percentile(a, 50, axis=0, out=out)\n array([6.5, 4.5, 2.5])\n >>> m\n array([6.5, 4.5, 2.5])\n\n >>> b = a.copy()\n >>> np.percentile(b, 50, axis=1, overwrite_input=True)\n array([7., 2.])\n >>> assert not np.all(a == b)\n\n The different methods can be visualized graphically:\n\n .. plot::\n\n import matplotlib.pyplot as plt\n\n a = np.arange(4)\n p = np.linspace(0, 100, 6001)\n ax = plt.gca()\n lines = [\n ('linear', '-', 'C0'),\n ('inverted_cdf', ':', 'C1'),\n # Almost the same as `inverted_cdf`:\n ('averaged_inverted_cdf', '-.', 'C1'),\n ('closest_observation', ':', 'C2'),\n ('interpolated_inverted_cdf', '--', 'C1'),\n ('hazen', '--', 'C3'),\n ('weibull', '-.', 'C4'),\n ('median_unbiased', '--', 'C5'),\n ('normal_unbiased', '-.', 'C6'),\n ]\n for method, style, color in lines:\n ax.plot(\n p, np.percentile(a, p, method=method),\n label=method, linestyle=style, color=color)\n ax.set(\n title='Percentiles for different methods and data: ' + str(a),\n xlabel='Percentile',\n ylabel='Estimated percentile value',\n yticks=a)\n ax.legend()\n plt.show()\n\n References\n ----------\n .. [1] R. J. Hyndman and Y. Fan,\n \"Sample quantiles in statistical packages,\"\n The American Statistician, 50(4), pp. 361-365, 1996\n\n \"\"\"\n if interpolation is not None:\n method = _check_interpolation_as_method(\n method, interpolation, \"percentile\")\n q = np.true_divide(q, 100)\n q = asanyarray(q) # undo any decay that the ufunc performed (see gh-13105)\n if not _quantile_is_valid(q):\n raise ValueError(\"Percentiles must be in the range [0, 100]\")\n return _quantile_unchecked(\n a, q, axis, out, overwrite_input, method, keepdims)\n\n\ndef _quantile_dispatcher(a, q, axis=None, out=None, overwrite_input=None,\n method=None, keepdims=None, *, interpolation=None):\n return (a, q, out)\n\n\n@array_function_dispatch(_quantile_dispatcher)\ndef quantile(a,\n q,\n axis=None,\n out=None,\n overwrite_input=False,\n method=\"linear\",\n keepdims=False,\n *,\n interpolation=None):\n \"\"\"\n Compute the q-th quantile of the data along the specified axis.\n\n .. versionadded:: 1.15.0\n\n Parameters\n ----------\n a : array_like\n Input array or object that can be converted to an array.\n q : array_like of float\n Quantile or sequence of quantiles to compute, which must be between\n 0 and 1 inclusive.\n axis : {int, tuple of int, None}, optional\n Axis or axes along which the quantiles are computed. The default is\n to compute the quantile(s) along a flattened version of the array.\n out : ndarray, optional\n Alternative output array in which to place the result. It must have\n the same shape and buffer length as the expected output, but the\n type (of the output) will be cast if necessary.\n overwrite_input : bool, optional\n If True, then allow the input array `a` to be modified by\n intermediate calculations, to save memory. In this case, the\n contents of the input `a` after this function completes is\n undefined.\n method : str, optional\n This parameter specifies the method to use for estimating the\n quantile. There are many different methods, some unique to NumPy.\n See the notes for explanation. The options sorted by their R type\n as summarized in the H&F paper [1]_ are:\n\n 1. 'inverted_cdf'\n 2. 'averaged_inverted_cdf'\n 3. 'closest_observation'\n 4. 'interpolated_inverted_cdf'\n 5. 'hazen'\n 6. 'weibull'\n 7. 'linear' (default)\n 8. 'median_unbiased'\n 9. 'normal_unbiased'\n\n The first three methods are discontinuous. NumPy further defines the\n following discontinuous variations of the default 'linear' (7.) option:\n\n * 'lower'\n * 'higher',\n * 'midpoint'\n * 'nearest'\n\n .. versionchanged:: 1.22.0\n This argument was previously called \"interpolation\" and only\n offered the \"linear\" default and last four options.\n\n keepdims : bool, optional\n If this is set to True, the axes which are reduced are left in\n the result as dimensions with size one. With this option, the\n result will broadcast correctly against the original array `a`.\n\n interpolation : str, optional\n Deprecated name for the method keyword argument.\n\n .. deprecated:: 1.22.0\n\n Returns\n -------\n quantile : scalar or ndarray\n If `q` is a single quantile and `axis=None`, then the result\n is a scalar. If multiple quantiles are given, first axis of\n the result corresponds to the quantiles. The other axes are\n the axes that remain after the reduction of `a`. If the input\n contains integers or floats smaller than ``float64``, the output\n data-type is ``float64``. Otherwise, the output data-type is the\n same as that of the input. If `out` is specified, that array is\n returned instead.\n\n See Also\n --------\n mean\n percentile : equivalent to quantile, but with q in the range [0, 100].\n median : equivalent to ``quantile(..., 0.5)``\n nanquantile\n\n Notes\n -----\n Given a vector ``V`` of length ``N``, the q-th quantile of ``V`` is the\n value ``q`` of the way from the minimum to the maximum in a sorted copy of\n ``V``. The values and distances of the two nearest neighbors as well as the\n `method` parameter will determine the quantile if the normalized\n ranking does not match the location of ``q`` exactly. This function is the\n same as the median if ``q=0.5``, the same as the minimum if ``q=0.0`` and\n the same as the maximum if ``q=1.0``.\n\n The optional `method` parameter specifies the method to use when the\n desired quantile lies between two data points ``i < j``.\n If ``g`` is the fractional part of the index surrounded by ``i`` and ``j``,\n and alpha and beta are correction constants modifying i and j:\n\n .. math::\n i + g = (q - alpha) / ( n - alpha - beta + 1 )\n\n The different methods then work as follows\n\n inverted_cdf:\n method 1 of H&F [1]_.\n This method gives discontinuous results:\n * if g > 0 ; then take j\n * if g = 0 ; then take i\n\n averaged_inverted_cdf:\n method 2 of H&F [1]_.\n This method gives discontinuous results:\n * if g > 0 ; then take j\n * if g = 0 ; then average between bounds\n\n closest_observation:\n method 3 of H&F [1]_.\n This method gives discontinuous results:\n * if g > 0 ; then take j\n * if g = 0 and index is odd ; then take j\n * if g = 0 and index is even ; then take i\n\n interpolated_inverted_cdf:\n method 4 of H&F [1]_.\n This method gives continuous results using:\n * alpha = 0\n * beta = 1\n\n hazen:\n method 5 of H&F [1]_.\n This method gives continuous results using:\n * alpha = 1/2\n * beta = 1/2\n\n weibull:\n method 6 of H&F [1]_.\n This method gives continuous results using:\n * alpha = 0\n * beta = 0\n\n linear:\n method 7 of H&F [1]_.\n This method gives continuous results using:\n * alpha = 1\n * beta = 1\n\n median_unbiased:\n method 8 of H&F [1]_.\n This method is probably the best method if the sample\n distribution function is unknown (see reference).\n This method gives continuous results using:\n * alpha = 1/3\n * beta = 1/3\n\n normal_unbiased:\n method 9 of H&F [1]_.\n This method is probably the best method if the sample\n distribution function is known to be normal.\n This method gives continuous results using:\n * alpha = 3/8\n * beta = 3/8\n\n lower:\n NumPy method kept for backwards compatibility.\n Takes ``i`` as the interpolation point.\n\n higher:\n NumPy method kept for backwards compatibility.\n Takes ``j`` as the interpolation point.\n\n nearest:\n NumPy method kept for backwards compatibility.\n Takes ``i`` or ``j``, whichever is nearest.\n\n midpoint:\n NumPy method kept for backwards compatibility.\n Uses ``(i + j) / 2``.\n\n Examples\n --------\n >>> a = np.array([[10, 7, 4], [3, 2, 1]])\n >>> a\n array([[10, 7, 4],\n [ 3, 2, 1]])\n >>> np.quantile(a, 0.5)\n 3.5\n >>> np.quantile(a, 0.5, axis=0)\n array([6.5, 4.5, 2.5])\n >>> np.quantile(a, 0.5, axis=1)\n array([7., 2.])\n >>> np.quantile(a, 0.5, axis=1, keepdims=True)\n array([[7.],\n [2.]])\n >>> m = np.quantile(a, 0.5, axis=0)\n >>> out = np.zeros_like(m)\n >>> np.quantile(a, 0.5, axis=0, out=out)\n array([6.5, 4.5, 2.5])\n >>> m\n array([6.5, 4.5, 2.5])\n >>> b = a.copy()\n >>> np.quantile(b, 0.5, axis=1, overwrite_input=True)\n array([7., 2.])\n >>> assert not np.all(a == b)\n\n See also `numpy.percentile` for a visualization of most methods.\n\n References\n ----------\n .. [1] R. J. Hyndman and Y. Fan,\n \"Sample quantiles in statistical packages,\"\n The American Statistician, 50(4), pp. 361-365, 1996\n\n \"\"\"\n if interpolation is not None:\n method = _check_interpolation_as_method(\n method, interpolation, \"quantile\")\n\n q = np.asanyarray(q)\n if not _quantile_is_valid(q):\n raise ValueError(\"Quantiles must be in the range [0, 1]\")\n return _quantile_unchecked(\n a, q, axis, out, overwrite_input, method, keepdims)\n\n\ndef _quantile_unchecked(a,\n q,\n axis=None,\n out=None,\n overwrite_input=False,\n method=\"linear\",\n keepdims=False):\n \"\"\"Assumes that q is in [0, 1], and is an ndarray\"\"\"\n r, k = _ureduce(a,\n func=_quantile_ureduce_func,\n q=q,\n axis=axis,\n out=out,\n overwrite_input=overwrite_input,\n method=method)\n if keepdims:\n return r.reshape(q.shape + k)\n else:\n return r\n\n\ndef _quantile_is_valid(q):\n # avoid expensive reductions, relevant for arrays with < O(1000) elements\n if q.ndim == 1 and q.size < 10:\n for i in range(q.size):\n if not (0.0 <= q[i] <= 1.0):\n return False\n else:\n if not (np.all(0 <= q) and np.all(q <= 1)):\n return False\n return True\n\n\ndef _check_interpolation_as_method(method, interpolation, fname):\n # Deprecated NumPy 1.22, 2021-11-08\n warnings.warn(\n f\"the `interpolation=` argument to {fname} was renamed to \"\n \"`method=`, which has additional options.\\n\"\n \"Users of the modes 'nearest', 'lower', 'higher', or \"\n \"'midpoint' are encouraged to review the method they used. \"\n \"(Deprecated NumPy 1.22)\",\n DeprecationWarning, stacklevel=4)\n if method != \"linear\":\n # sanity check, we assume this basically never happens\n raise TypeError(\n \"You shall not pass both `method` and `interpolation`!\\n\"\n \"(`interpolation` is Deprecated in favor of `method`)\")\n return interpolation\n\n\ndef _compute_virtual_index(n, quantiles, alpha: float, beta: float):\n \"\"\"\n Compute the floating point indexes of an array for the linear\n interpolation of quantiles.\n n : array_like\n The sample sizes.\n quantiles : array_like\n The quantiles values.\n alpha : float\n A constant used to correct the index computed.\n beta : float\n A constant used to correct the index computed.\n\n alpha and beta values depend on the chosen method\n (see quantile documentation)\n\n Reference:\n Hyndman&Fan paper \"Sample Quantiles in Statistical Packages\",\n DOI: 10.1080/00031305.1996.10473566\n \"\"\"\n return n * quantiles + (\n alpha + quantiles * (1 - alpha - beta)\n ) - 1\n\n\ndef _get_gamma(virtual_indexes, previous_indexes, method):\n \"\"\"\n Compute gamma (a.k.a 'm' or 'weight') for the linear interpolation\n of quantiles.\n\n virtual_indexes : array_like\n The indexes where the percentile is supposed to be found in the sorted\n sample.\n previous_indexes : array_like\n The floor values of virtual_indexes.\n interpolation : dict\n The interpolation method chosen, which may have a specific rule\n modifying gamma.\n\n gamma is usually the fractional part of virtual_indexes but can be modified\n by the interpolation method.\n \"\"\"\n gamma = np.asanyarray(virtual_indexes - previous_indexes)\n gamma = method[\"fix_gamma\"](gamma, virtual_indexes)\n return np.asanyarray(gamma)\n\n\ndef _lerp(a, b, t, out=None):\n \"\"\"\n Compute the linear interpolation weighted by gamma on each point of\n two same shape array.\n\n a : array_like\n Left bound.\n b : array_like\n Right bound.\n t : array_like\n The interpolation weight.\n out : array_like\n Output array.\n \"\"\"\n diff_b_a = subtract(b, a)\n # asanyarray is a stop-gap until gh-13105\n lerp_interpolation = asanyarray(add(a, diff_b_a * t, out=out))\n subtract(b, diff_b_a * (1 - t), out=lerp_interpolation, where=t >= 0.5)\n if lerp_interpolation.ndim == 0 and out is None:\n lerp_interpolation = lerp_interpolation[()] # unpack 0d arrays\n return lerp_interpolation\n\n\ndef _get_gamma_mask(shape, default_value, conditioned_value, where):\n out = np.full(shape, default_value)\n np.copyto(out, conditioned_value, where=where, casting=\"unsafe\")\n return out\n\n\ndef _discret_interpolation_to_boundaries(index, gamma_condition_fun):\n previous = np.floor(index)\n next = previous + 1\n gamma = index - previous\n res = _get_gamma_mask(shape=index.shape,\n default_value=next,\n conditioned_value=previous,\n where=gamma_condition_fun(gamma, index)\n ).astype(np.intp)\n # Some methods can lead to out-of-bound integers, clip them:\n res[res < 0] = 0\n return res\n\n\ndef _closest_observation(n, quantiles):\n gamma_fun = lambda gamma, index: (gamma == 0) & (np.floor(index) % 2 == 0)\n return _discret_interpolation_to_boundaries((n * quantiles) - 1 - 0.5,\n gamma_fun)\n\n\ndef _inverted_cdf(n, quantiles):\n gamma_fun = lambda gamma, _: (gamma == 0)\n return _discret_interpolation_to_boundaries((n * quantiles) - 1,\n gamma_fun)\n\n\ndef _quantile_ureduce_func(\n a: np.array,\n q: np.array,\n axis: int = None,\n out=None,\n overwrite_input: bool = False,\n method=\"linear\",\n) -> np.array:\n if q.ndim > 2:\n # The code below works fine for nd, but it might not have useful\n # semantics. For now, keep the supported dimensions the same as it was\n # before.\n raise ValueError(\"q must be a scalar or 1d\")\n if overwrite_input:\n if axis is None:\n axis = 0\n arr = a.ravel()\n else:\n arr = a\n else:\n if axis is None:\n axis = 0\n arr = a.flatten()\n else:\n arr = a.copy()\n result = _quantile(arr,\n quantiles=q,\n axis=axis,\n method=method,\n out=out)\n return result\n\n\ndef _get_indexes(arr, virtual_indexes, valid_values_count):\n \"\"\"\n Get the valid indexes of arr neighbouring virtual_indexes.\n Note\n This is a companion function to linear interpolation of\n Quantiles\n\n Returns\n -------\n (previous_indexes, next_indexes): Tuple\n A Tuple of virtual_indexes neighbouring indexes\n \"\"\"\n previous_indexes = np.asanyarray(np.floor(virtual_indexes))\n next_indexes = np.asanyarray(previous_indexes + 1)\n indexes_above_bounds = virtual_indexes >= valid_values_count - 1\n # When indexes is above max index, take the max value of the array\n if indexes_above_bounds.any():\n previous_indexes[indexes_above_bounds] = -1\n next_indexes[indexes_above_bounds] = -1\n # When indexes is below min index, take the min value of the array\n indexes_below_bounds = virtual_indexes < 0\n if indexes_below_bounds.any():\n previous_indexes[indexes_below_bounds] = 0\n next_indexes[indexes_below_bounds] = 0\n if np.issubdtype(arr.dtype, np.inexact):\n # After the sort, slices having NaNs will have for last element a NaN\n virtual_indexes_nans = np.isnan(virtual_indexes)\n if virtual_indexes_nans.any():\n previous_indexes[virtual_indexes_nans] = -1\n next_indexes[virtual_indexes_nans] = -1\n previous_indexes = previous_indexes.astype(np.intp)\n next_indexes = next_indexes.astype(np.intp)\n return previous_indexes, next_indexes\n\n\ndef _quantile(\n arr: np.array,\n quantiles: np.array,\n axis: int = -1,\n method=\"linear\",\n out=None,\n):\n \"\"\"\n Private function that doesn't support extended axis or keepdims.\n These methods are extended to this function using _ureduce\n See nanpercentile for parameter usage\n It computes the quantiles of the array for the given axis.\n A linear interpolation is performed based on the `interpolation`.\n\n By default, the method is \"linear\" where alpha == beta == 1 which\n performs the 7th method of Hyndman&Fan.\n With \"median_unbiased\" we get alpha == beta == 1/3\n thus the 8th method of Hyndman&Fan.\n \"\"\"\n # --- Setup\n arr = np.asanyarray(arr)\n values_count = arr.shape[axis]\n # The dimensions of `q` are prepended to the output shape, so we need the\n # axis being sampled from `arr` to be last.\n DATA_AXIS = 0\n if axis != DATA_AXIS: # But moveaxis is slow, so only call it if axis!=0.\n arr = np.moveaxis(arr, axis, destination=DATA_AXIS)\n # --- Computation of indexes\n # Index where to find the value in the sorted array.\n # Virtual because it is a floating point value, not an valid index.\n # The nearest neighbours are used for interpolation\n try:\n method = _QuantileMethods[method]\n except KeyError:\n raise ValueError(\n f\"{method!r} is not a valid method. Use one of: \"\n f\"{_QuantileMethods.keys()}\") from None\n virtual_indexes = method[\"get_virtual_index\"](values_count, quantiles)\n virtual_indexes = np.asanyarray(virtual_indexes)\n if np.issubdtype(virtual_indexes.dtype, np.integer):\n # No interpolation needed, take the points along axis\n if np.issubdtype(arr.dtype, np.inexact):\n # may contain nan, which would sort to the end\n arr.partition(concatenate((virtual_indexes.ravel(), [-1])), axis=0)\n slices_having_nans = np.isnan(arr[-1])\n else:\n # cannot contain nan\n arr.partition(virtual_indexes.ravel(), axis=0)\n slices_having_nans = np.array(False, dtype=bool)\n result = take(arr, virtual_indexes, axis=0, out=out)\n else:\n previous_indexes, next_indexes = _get_indexes(arr,\n virtual_indexes,\n values_count)\n # --- Sorting\n arr.partition(\n np.unique(np.concatenate(([0, -1],\n previous_indexes.ravel(),\n next_indexes.ravel(),\n ))),\n axis=DATA_AXIS)\n if np.issubdtype(arr.dtype, np.inexact):\n slices_having_nans = np.isnan(\n take(arr, indices=-1, axis=DATA_AXIS)\n )\n else:\n slices_having_nans = None\n # --- Get values from indexes\n previous = np.take(arr, previous_indexes, axis=DATA_AXIS)\n next = np.take(arr, next_indexes, axis=DATA_AXIS)\n # --- Linear interpolation\n gamma = _get_gamma(virtual_indexes, previous_indexes, method)\n result_shape = virtual_indexes.shape + (1,) * (arr.ndim - 1)\n gamma = gamma.reshape(result_shape)\n result = _lerp(previous,\n next,\n gamma,\n out=out)\n if np.any(slices_having_nans):\n if result.ndim == 0 and out is None:\n # can't write to a scalar\n result = arr.dtype.type(np.nan)\n else:\n result[..., slices_having_nans] = np.nan\n return result\n\n\ndef _trapz_dispatcher(y, x=None, dx=None, axis=None):\n return (y, x)\n\n\n@array_function_dispatch(_trapz_dispatcher)\ndef trapz(y, x=None, dx=1.0, axis=-1):\n r\"\"\"\n Integrate along the given axis using the composite trapezoidal rule.\n\n If `x` is provided, the integration happens in sequence along its\n elements - they are not sorted.\n\n Integrate `y` (`x`) along each 1d slice on the given axis, compute\n :math:`\\int y(x) dx`.\n When `x` is specified, this integrates along the parametric curve,\n computing :math:`\\int_t y(t) dt =\n \\int_t y(t) \\left.\\frac{dx}{dt}\\right|_{x=x(t)} dt`.\n\n Parameters\n ----------\n y : array_like\n Input array to integrate.\n x : array_like, optional\n The sample points corresponding to the `y` values. If `x` is None,\n the sample points are assumed to be evenly spaced `dx` apart. The\n default is None.\n dx : scalar, optional\n The spacing between sample points when `x` is None. The default is 1.\n axis : int, optional\n The axis along which to integrate.\n\n Returns\n -------\n trapz : float or ndarray\n Definite integral of 'y' = n-dimensional array as approximated along\n a single axis by the trapezoidal rule. If 'y' is a 1-dimensional array,\n then the result is a float. If 'n' is greater than 1, then the result\n is an 'n-1' dimensional array.\n\n See Also\n --------\n sum, cumsum\n\n Notes\n -----\n Image [2]_ illustrates trapezoidal rule -- y-axis locations of points\n will be taken from `y` array, by default x-axis distances between\n points will be 1.0, alternatively they can be provided with `x` array\n or with `dx` scalar. Return value will be equal to combined area under\n the red lines.\n\n\n References\n ----------\n .. [1] Wikipedia page: https://en.wikipedia.org/wiki/Trapezoidal_rule\n\n .. [2] Illustration image:\n https://en.wikipedia.org/wiki/File:Composite_trapezoidal_rule_illustration.png\n\n Examples\n --------\n >>> np.trapz([1,2,3])\n 4.0\n >>> np.trapz([1,2,3], x=[4,6,8])\n 8.0\n >>> np.trapz([1,2,3], dx=2)\n 8.0\n\n Using a decreasing `x` corresponds to integrating in reverse:\n\n >>> np.trapz([1,2,3], x=[8,6,4])\n -8.0\n\n More generally `x` is used to integrate along a parametric curve.\n This finds the area of a circle, noting we repeat the sample which closes\n the curve:\n\n >>> theta = np.linspace(0, 2 * np.pi, num=1000, endpoint=True)\n >>> np.trapz(np.cos(theta), x=np.sin(theta))\n 3.141571941375841\n\n >>> a = np.arange(6).reshape(2, 3)\n >>> a\n array([[0, 1, 2],\n [3, 4, 5]])\n >>> np.trapz(a, axis=0)\n array([1.5, 2.5, 3.5])\n >>> np.trapz(a, axis=1)\n array([2., 8.])\n \"\"\"\n y = asanyarray(y)\n if x is None:\n d = dx\n else:\n x = asanyarray(x)\n if x.ndim == 1:\n d = diff(x)\n # reshape to correct shape\n shape = [1]*y.ndim\n shape[axis] = d.shape[0]\n d = d.reshape(shape)\n else:\n d = diff(x, axis=axis)\n nd = y.ndim\n slice1 = [slice(None)]*nd\n slice2 = [slice(None)]*nd\n slice1[axis] = slice(1, None)\n slice2[axis] = slice(None, -1)\n try:\n ret = (d * (y[tuple(slice1)] + y[tuple(slice2)]) / 2.0).sum(axis)\n except ValueError:\n # Operations didn't work, cast to ndarray\n d = np.asarray(d)\n y = np.asarray(y)\n ret = add.reduce(d * (y[tuple(slice1)]+y[tuple(slice2)])/2.0, axis)\n return ret\n\n\ndef _meshgrid_dispatcher(*xi, copy=None, sparse=None, indexing=None):\n return xi\n\n\n# Based on scitools meshgrid\n@array_function_dispatch(_meshgrid_dispatcher)\ndef meshgrid(*xi, copy=True, sparse=False, indexing='xy'):\n \"\"\"\n Return coordinate matrices from coordinate vectors.\n\n Make N-D coordinate arrays for vectorized evaluations of\n N-D scalar/vector fields over N-D grids, given\n one-dimensional coordinate arrays x1, x2,..., xn.\n\n .. versionchanged:: 1.9\n 1-D and 0-D cases are allowed.\n\n Parameters\n ----------\n x1, x2,..., xn : array_like\n 1-D arrays representing the coordinates of a grid.\n indexing : {'xy', 'ij'}, optional\n Cartesian ('xy', default) or matrix ('ij') indexing of output.\n See Notes for more details.\n\n .. versionadded:: 1.7.0\n sparse : bool, optional\n If True the shape of the returned coordinate array for dimension *i*\n is reduced from ``(N1, ..., Ni, ... Nn)`` to\n ``(1, ..., 1, Ni, 1, ..., 1)``. These sparse coordinate grids are\n intended to be use with :ref:`basics.broadcasting`. When all\n coordinates are used in an expression, broadcasting still leads to a\n fully-dimensonal result array.\n\n Default is False.\n\n .. versionadded:: 1.7.0\n copy : bool, optional\n If False, a view into the original arrays are returned in order to\n conserve memory. Default is True. Please note that\n ``sparse=False, copy=False`` will likely return non-contiguous\n arrays. Furthermore, more than one element of a broadcast array\n may refer to a single memory location. If you need to write to the\n arrays, make copies first.\n\n .. versionadded:: 1.7.0\n\n Returns\n -------\n X1, X2,..., XN : ndarray\n For vectors `x1`, `x2`,..., 'xn' with lengths ``Ni=len(xi)`` ,\n return ``(N1, N2, N3,...Nn)`` shaped arrays if indexing='ij'\n or ``(N2, N1, N3,...Nn)`` shaped arrays if indexing='xy'\n with the elements of `xi` repeated to fill the matrix along\n the first dimension for `x1`, the second for `x2` and so on.\n\n Notes\n -----\n This function supports both indexing conventions through the indexing\n keyword argument. Giving the string 'ij' returns a meshgrid with\n matrix indexing, while 'xy' returns a meshgrid with Cartesian indexing.\n In the 2-D case with inputs of length M and N, the outputs are of shape\n (N, M) for 'xy' indexing and (M, N) for 'ij' indexing. In the 3-D case\n with inputs of length M, N and P, outputs are of shape (N, M, P) for\n 'xy' indexing and (M, N, P) for 'ij' indexing. The difference is\n illustrated by the following code snippet::\n\n xv, yv = np.meshgrid(x, y, indexing='ij')\n for i in range(nx):\n for j in range(ny):\n # treat xv[i,j], yv[i,j]\n\n xv, yv = np.meshgrid(x, y, indexing='xy')\n for i in range(nx):\n for j in range(ny):\n # treat xv[j,i], yv[j,i]\n\n In the 1-D and 0-D case, the indexing and sparse keywords have no effect.\n\n See Also\n --------\n mgrid : Construct a multi-dimensional \"meshgrid\" using indexing notation.\n ogrid : Construct an open multi-dimensional \"meshgrid\" using indexing\n notation.\n\n Examples\n --------\n >>> nx, ny = (3, 2)\n >>> x = np.linspace(0, 1, nx)\n >>> y = np.linspace(0, 1, ny)\n >>> xv, yv = np.meshgrid(x, y)\n >>> xv\n array([[0. , 0.5, 1. ],\n [0. , 0.5, 1. ]])\n >>> yv\n array([[0., 0., 0.],\n [1., 1., 1.]])\n >>> xv, yv = np.meshgrid(x, y, sparse=True) # make sparse output arrays\n >>> xv\n array([[0. , 0.5, 1. ]])\n >>> yv\n array([[0.],\n [1.]])\n\n `meshgrid` is very useful to evaluate functions on a grid. If the\n function depends on all coordinates, you can use the parameter\n ``sparse=True`` to save memory and computation time.\n\n >>> x = np.linspace(-5, 5, 101)\n >>> y = np.linspace(-5, 5, 101)\n >>> # full coordinate arrays\n >>> xx, yy = np.meshgrid(x, y)\n >>> zz = np.sqrt(xx**2 + yy**2)\n >>> xx.shape, yy.shape, zz.shape\n ((101, 101), (101, 101), (101, 101))\n >>> # sparse coordinate arrays\n >>> xs, ys = np.meshgrid(x, y, sparse=True)\n >>> zs = np.sqrt(xs**2 + ys**2)\n >>> xs.shape, ys.shape, zs.shape\n ((1, 101), (101, 1), (101, 101))\n >>> np.array_equal(zz, zs)\n True\n\n >>> import matplotlib.pyplot as plt\n >>> h = plt.contourf(x, y, zs)\n >>> plt.axis('scaled')\n >>> plt.colorbar()\n >>> plt.show()\n \"\"\"\n ndim = len(xi)\n\n if indexing not in ['xy', 'ij']:\n raise ValueError(\n \"Valid values for `indexing` are 'xy' and 'ij'.\")\n\n s0 = (1,) * ndim\n output = [np.asanyarray(x).reshape(s0[:i] + (-1,) + s0[i + 1:])\n for i, x in enumerate(xi)]\n\n if indexing == 'xy' and ndim > 1:\n # switch first and second axis\n output[0].shape = (1, -1) + s0[2:]\n output[1].shape = (-1, 1) + s0[2:]\n\n if not sparse:\n # Return the full N-D matrix (not only the 1-D vector)\n output = np.broadcast_arrays(*output, subok=True)\n\n if copy:\n output = [x.copy() for x in output]\n\n return output\n\n\ndef _delete_dispatcher(arr, obj, axis=None):\n return (arr, obj)\n\n\n@array_function_dispatch(_delete_dispatcher)\ndef delete(arr, obj, axis=None):\n \"\"\"\n Return a new array with sub-arrays along an axis deleted. For a one\n dimensional array, this returns those entries not returned by\n `arr[obj]`.\n\n Parameters\n ----------\n arr : array_like\n Input array.\n obj : slice, int or array of ints\n Indicate indices of sub-arrays to remove along the specified axis.\n\n .. versionchanged:: 1.19.0\n Boolean indices are now treated as a mask of elements to remove,\n rather than being cast to the integers 0 and 1.\n\n axis : int, optional\n The axis along which to delete the subarray defined by `obj`.\n If `axis` is None, `obj` is applied to the flattened array.\n\n Returns\n -------\n out : ndarray\n A copy of `arr` with the elements specified by `obj` removed. Note\n that `delete` does not occur in-place. If `axis` is None, `out` is\n a flattened array.\n\n See Also\n --------\n insert : Insert elements into an array.\n append : Append elements at the end of an array.\n\n Notes\n -----\n Often it is preferable to use a boolean mask. For example:\n\n >>> arr = np.arange(12) + 1\n >>> mask = np.ones(len(arr), dtype=bool)\n >>> mask[[0,2,4]] = False\n >>> result = arr[mask,...]\n\n Is equivalent to `np.delete(arr, [0,2,4], axis=0)`, but allows further\n use of `mask`.\n\n Examples\n --------\n >>> arr = np.array([[1,2,3,4], [5,6,7,8], [9,10,11,12]])\n >>> arr\n array([[ 1, 2, 3, 4],\n [ 5, 6, 7, 8],\n [ 9, 10, 11, 12]])\n >>> np.delete(arr, 1, 0)\n array([[ 1, 2, 3, 4],\n [ 9, 10, 11, 12]])\n\n >>> np.delete(arr, np.s_[::2], 1)\n array([[ 2, 4],\n [ 6, 8],\n [10, 12]])\n >>> np.delete(arr, [1,3,5], None)\n array([ 1, 3, 5, 7, 8, 9, 10, 11, 12])\n\n \"\"\"\n wrap = None\n if type(arr) is not ndarray:\n try:\n wrap = arr.__array_wrap__\n except AttributeError:\n pass\n\n arr = asarray(arr)\n ndim = arr.ndim\n arrorder = 'F' if arr.flags.fnc else 'C'\n if axis is None:\n if ndim != 1:\n arr = arr.ravel()\n # needed for np.matrix, which is still not 1d after being ravelled\n ndim = arr.ndim\n axis = ndim - 1\n else:\n axis = normalize_axis_index(axis, ndim)\n\n slobj = [slice(None)]*ndim\n N = arr.shape[axis]\n newshape = list(arr.shape)\n\n if isinstance(obj, slice):\n start, stop, step = obj.indices(N)\n xr = range(start, stop, step)\n numtodel = len(xr)\n\n if numtodel <= 0:\n if wrap:\n return wrap(arr.copy(order=arrorder))\n else:\n return arr.copy(order=arrorder)\n\n # Invert if step is negative:\n if step < 0:\n step = -step\n start = xr[-1]\n stop = xr[0] + 1\n\n newshape[axis] -= numtodel\n new = empty(newshape, arr.dtype, arrorder)\n # copy initial chunk\n if start == 0:\n pass\n else:\n slobj[axis] = slice(None, start)\n new[tuple(slobj)] = arr[tuple(slobj)]\n # copy end chunk\n if stop == N:\n pass\n else:\n slobj[axis] = slice(stop-numtodel, None)\n slobj2 = [slice(None)]*ndim\n slobj2[axis] = slice(stop, None)\n new[tuple(slobj)] = arr[tuple(slobj2)]\n # copy middle pieces\n if step == 1:\n pass\n else: # use array indexing.\n keep = ones(stop-start, dtype=bool)\n keep[:stop-start:step] = False\n slobj[axis] = slice(start, stop-numtodel)\n slobj2 = [slice(None)]*ndim\n slobj2[axis] = slice(start, stop)\n arr = arr[tuple(slobj2)]\n slobj2[axis] = keep\n new[tuple(slobj)] = arr[tuple(slobj2)]\n if wrap:\n return wrap(new)\n else:\n return new\n\n if isinstance(obj, (int, integer)) and not isinstance(obj, bool):\n single_value = True\n else:\n single_value = False\n _obj = obj\n obj = np.asarray(obj)\n if obj.size == 0 and not isinstance(_obj, np.ndarray):\n obj = obj.astype(intp)\n elif obj.size == 1 and not isinstance(_obj, bool):\n obj = obj.astype(intp).reshape(())\n single_value = True\n\n if single_value:\n # optimization for a single value\n if (obj < -N or obj >= N):\n raise IndexError(\n \"index %i is out of bounds for axis %i with \"\n \"size %i\" % (obj, axis, N))\n if (obj < 0):\n obj += N\n newshape[axis] -= 1\n new = empty(newshape, arr.dtype, arrorder)\n slobj[axis] = slice(None, obj)\n new[tuple(slobj)] = arr[tuple(slobj)]\n slobj[axis] = slice(obj, None)\n slobj2 = [slice(None)]*ndim\n slobj2[axis] = slice(obj+1, None)\n new[tuple(slobj)] = arr[tuple(slobj2)]\n else:\n if obj.dtype == bool:\n if obj.shape != (N,):\n raise ValueError('boolean array argument obj to delete '\n 'must be one dimensional and match the axis '\n 'length of {}'.format(N))\n\n # optimization, the other branch is slower\n keep = ~obj\n else:\n keep = ones(N, dtype=bool)\n keep[obj,] = False\n\n slobj[axis] = keep\n new = arr[tuple(slobj)]\n\n if wrap:\n return wrap(new)\n else:\n return new\n\n\ndef _insert_dispatcher(arr, obj, values, axis=None):\n return (arr, obj, values)\n\n\n@array_function_dispatch(_insert_dispatcher)\ndef insert(arr, obj, values, axis=None):\n \"\"\"\n Insert values along the given axis before the given indices.\n\n Parameters\n ----------\n arr : array_like\n Input array.\n obj : int, slice or sequence of ints\n Object that defines the index or indices before which `values` is\n inserted.\n\n .. versionadded:: 1.8.0\n\n Support for multiple insertions when `obj` is a single scalar or a\n sequence with one element (similar to calling insert multiple\n times).\n values : array_like\n Values to insert into `arr`. If the type of `values` is different\n from that of `arr`, `values` is converted to the type of `arr`.\n `values` should be shaped so that ``arr[...,obj,...] = values``\n is legal.\n axis : int, optional\n Axis along which to insert `values`. If `axis` is None then `arr`\n is flattened first.\n\n Returns\n -------\n out : ndarray\n A copy of `arr` with `values` inserted. Note that `insert`\n does not occur in-place: a new array is returned. If\n `axis` is None, `out` is a flattened array.\n\n See Also\n --------\n append : Append elements at the end of an array.\n concatenate : Join a sequence of arrays along an existing axis.\n delete : Delete elements from an array.\n\n Notes\n -----\n Note that for higher dimensional inserts `obj=0` behaves very different\n from `obj=[0]` just like `arr[:,0,:] = values` is different from\n `arr[:,[0],:] = values`.\n\n Examples\n --------\n >>> a = np.array([[1, 1], [2, 2], [3, 3]])\n >>> a\n array([[1, 1],\n [2, 2],\n [3, 3]])\n >>> np.insert(a, 1, 5)\n array([1, 5, 1, ..., 2, 3, 3])\n >>> np.insert(a, 1, 5, axis=1)\n array([[1, 5, 1],\n [2, 5, 2],\n [3, 5, 3]])\n\n Difference between sequence and scalars:\n\n >>> np.insert(a, [1], [[1],[2],[3]], axis=1)\n array([[1, 1, 1],\n [2, 2, 2],\n [3, 3, 3]])\n >>> np.array_equal(np.insert(a, 1, [1, 2, 3], axis=1),\n ... np.insert(a, [1], [[1],[2],[3]], axis=1))\n True\n\n >>> b = a.flatten()\n >>> b\n array([1, 1, 2, 2, 3, 3])\n >>> np.insert(b, [2, 2], [5, 6])\n array([1, 1, 5, ..., 2, 3, 3])\n\n >>> np.insert(b, slice(2, 4), [5, 6])\n array([1, 1, 5, ..., 2, 3, 3])\n\n >>> np.insert(b, [2, 2], [7.13, False]) # type casting\n array([1, 1, 7, ..., 2, 3, 3])\n\n >>> x = np.arange(8).reshape(2, 4)\n >>> idx = (1, 3)\n >>> np.insert(x, idx, 999, axis=1)\n array([[ 0, 999, 1, 2, 999, 3],\n [ 4, 999, 5, 6, 999, 7]])\n\n \"\"\"\n wrap = None\n if type(arr) is not ndarray:\n try:\n wrap = arr.__array_wrap__\n except AttributeError:\n pass\n\n arr = asarray(arr)\n ndim = arr.ndim\n arrorder = 'F' if arr.flags.fnc else 'C'\n if axis is None:\n if ndim != 1:\n arr = arr.ravel()\n # needed for np.matrix, which is still not 1d after being ravelled\n ndim = arr.ndim\n axis = ndim - 1\n else:\n axis = normalize_axis_index(axis, ndim)\n slobj = [slice(None)]*ndim\n N = arr.shape[axis]\n newshape = list(arr.shape)\n\n if isinstance(obj, slice):\n # turn it into a range object\n indices = arange(*obj.indices(N), dtype=intp)\n else:\n # need to copy obj, because indices will be changed in-place\n indices = np.array(obj)\n if indices.dtype == bool:\n # See also delete\n # 2012-10-11, NumPy 1.8\n warnings.warn(\n \"in the future insert will treat boolean arrays and \"\n \"array-likes as a boolean index instead of casting it to \"\n \"integer\", FutureWarning, stacklevel=3)\n indices = indices.astype(intp)\n # Code after warning period:\n #if obj.ndim != 1:\n # raise ValueError('boolean array argument obj to insert '\n # 'must be one dimensional')\n #indices = np.flatnonzero(obj)\n elif indices.ndim > 1:\n raise ValueError(\n \"index array argument obj to insert must be one dimensional \"\n \"or scalar\")\n if indices.size == 1:\n index = indices.item()\n if index < -N or index > N:\n raise IndexError(f\"index {obj} is out of bounds for axis {axis} \"\n f\"with size {N}\")\n if (index < 0):\n index += N\n\n # There are some object array corner cases here, but we cannot avoid\n # that:\n values = array(values, copy=False, ndmin=arr.ndim, dtype=arr.dtype)\n if indices.ndim == 0:\n # broadcasting is very different here, since a[:,0,:] = ... behaves\n # very different from a[:,[0],:] = ...! This changes values so that\n # it works likes the second case. (here a[:,0:1,:])\n values = np.moveaxis(values, 0, axis)\n numnew = values.shape[axis]\n newshape[axis] += numnew\n new = empty(newshape, arr.dtype, arrorder)\n slobj[axis] = slice(None, index)\n new[tuple(slobj)] = arr[tuple(slobj)]\n slobj[axis] = slice(index, index+numnew)\n new[tuple(slobj)] = values\n slobj[axis] = slice(index+numnew, None)\n slobj2 = [slice(None)] * ndim\n slobj2[axis] = slice(index, None)\n new[tuple(slobj)] = arr[tuple(slobj2)]\n if wrap:\n return wrap(new)\n return new\n elif indices.size == 0 and not isinstance(obj, np.ndarray):\n # Can safely cast the empty list to intp\n indices = indices.astype(intp)\n\n indices[indices < 0] += N\n\n numnew = len(indices)\n order = indices.argsort(kind='mergesort') # stable sort\n indices[order] += np.arange(numnew)\n\n newshape[axis] += numnew\n old_mask = ones(newshape[axis], dtype=bool)\n old_mask[indices] = False\n\n new = empty(newshape, arr.dtype, arrorder)\n slobj2 = [slice(None)]*ndim\n slobj[axis] = indices\n slobj2[axis] = old_mask\n new[tuple(slobj)] = values\n new[tuple(slobj2)] = arr\n\n if wrap:\n return wrap(new)\n return new\n\n\ndef _append_dispatcher(arr, values, axis=None):\n return (arr, values)\n\n\n@array_function_dispatch(_append_dispatcher)\ndef append(arr, values, axis=None):\n \"\"\"\n Append values to the end of an array.\n\n Parameters\n ----------\n arr : array_like\n Values are appended to a copy of this array.\n values : array_like\n These values are appended to a copy of `arr`. It must be of the\n correct shape (the same shape as `arr`, excluding `axis`). If\n `axis` is not specified, `values` can be any shape and will be\n flattened before use.\n axis : int, optional\n The axis along which `values` are appended. If `axis` is not\n given, both `arr` and `values` are flattened before use.\n\n Returns\n -------\n append : ndarray\n A copy of `arr` with `values` appended to `axis`. Note that\n `append` does not occur in-place: a new array is allocated and\n filled. If `axis` is None, `out` is a flattened array.\n\n See Also\n --------\n insert : Insert elements into an array.\n delete : Delete elements from an array.\n\n Examples\n --------\n >>> np.append([1, 2, 3], [[4, 5, 6], [7, 8, 9]])\n array([1, 2, 3, ..., 7, 8, 9])\n\n When `axis` is specified, `values` must have the correct shape.\n\n >>> np.append([[1, 2, 3], [4, 5, 6]], [[7, 8, 9]], axis=0)\n array([[1, 2, 3],\n [4, 5, 6],\n [7, 8, 9]])\n >>> np.append([[1, 2, 3], [4, 5, 6]], [7, 8, 9], axis=0)\n Traceback (most recent call last):\n ...\n ValueError: all the input arrays must have same number of dimensions, but\n the array at index 0 has 2 dimension(s) and the array at index 1 has 1\n dimension(s)\n\n \"\"\"\n arr = asanyarray(arr)\n if axis is None:\n if arr.ndim != 1:\n arr = arr.ravel()\n values = ravel(values)\n axis = arr.ndim-1\n return concatenate((arr, values), axis=axis)\n\n\ndef _digitize_dispatcher(x, bins, right=None):\n return (x, bins)\n\n\n@array_function_dispatch(_digitize_dispatcher)\ndef digitize(x, bins, right=False):\n \"\"\"\n Return the indices of the bins to which each value in input array belongs.\n\n ========= ============= ============================\n `right` order of bins returned index `i` satisfies\n ========= ============= ============================\n ``False`` increasing ``bins[i-1] <= x < bins[i]``\n ``True`` increasing ``bins[i-1] < x <= bins[i]``\n ``False`` decreasing ``bins[i-1] > x >= bins[i]``\n ``True`` decreasing ``bins[i-1] >= x > bins[i]``\n ========= ============= ============================\n\n If values in `x` are beyond the bounds of `bins`, 0 or ``len(bins)`` is\n returned as appropriate.\n\n Parameters\n ----------\n x : array_like\n Input array to be binned. Prior to NumPy 1.10.0, this array had to\n be 1-dimensional, but can now have any shape.\n bins : array_like\n Array of bins. It has to be 1-dimensional and monotonic.\n right : bool, optional\n Indicating whether the intervals include the right or the left bin\n edge. Default behavior is (right==False) indicating that the interval\n does not include the right edge. The left bin end is open in this\n case, i.e., bins[i-1] <= x < bins[i] is the default behavior for\n monotonically increasing bins.\n\n Returns\n -------\n indices : ndarray of ints\n Output array of indices, of same shape as `x`.\n\n Raises\n ------\n ValueError\n If `bins` is not monotonic.\n TypeError\n If the type of the input is complex.\n\n See Also\n --------\n bincount, histogram, unique, searchsorted\n\n Notes\n -----\n If values in `x` are such that they fall outside the bin range,\n attempting to index `bins` with the indices that `digitize` returns\n will result in an IndexError.\n\n .. versionadded:: 1.10.0\n\n `np.digitize` is implemented in terms of `np.searchsorted`. This means\n that a binary search is used to bin the values, which scales much better\n for larger number of bins than the previous linear search. It also removes\n the requirement for the input array to be 1-dimensional.\n\n For monotonically _increasing_ `bins`, the following are equivalent::\n\n np.digitize(x, bins, right=True)\n np.searchsorted(bins, x, side='left')\n\n Note that as the order of the arguments are reversed, the side must be too.\n The `searchsorted` call is marginally faster, as it does not do any\n monotonicity checks. Perhaps more importantly, it supports all dtypes.\n\n Examples\n --------\n >>> x = np.array([0.2, 6.4, 3.0, 1.6])\n >>> bins = np.array([0.0, 1.0, 2.5, 4.0, 10.0])\n >>> inds = np.digitize(x, bins)\n >>> inds\n array([1, 4, 3, 2])\n >>> for n in range(x.size):\n ... print(bins[inds[n]-1], \"<=\", x[n], \"<\", bins[inds[n]])\n ...\n 0.0 <= 0.2 < 1.0\n 4.0 <= 6.4 < 10.0\n 2.5 <= 3.0 < 4.0\n 1.0 <= 1.6 < 2.5\n\n >>> x = np.array([1.2, 10.0, 12.4, 15.5, 20.])\n >>> bins = np.array([0, 5, 10, 15, 20])\n >>> np.digitize(x,bins,right=True)\n array([1, 2, 3, 4, 4])\n >>> np.digitize(x,bins,right=False)\n array([1, 3, 3, 4, 5])\n \"\"\"\n x = _nx.asarray(x)\n bins = _nx.asarray(bins)\n\n # here for compatibility, searchsorted below is happy to take this\n if np.issubdtype(x.dtype, _nx.complexfloating):\n raise TypeError(\"x may not be complex\")\n\n mono = _monotonicity(bins)\n if mono == 0:\n raise ValueError(\"bins must be monotonically increasing or decreasing\")\n\n # this is backwards because the arguments below are swapped\n side = 'left' if right else 'right'\n if mono == -1:\n # reverse the bins, and invert the results\n return len(bins) - _nx.searchsorted(bins[::-1], x, side=side)\n else:\n return _nx.searchsorted(bins, x, side=side)\n" ]
[ [ "numpy._pytesttester.PytestTester", "numpy.distutils.fcompiler.new_fcompiler" ], [ "numpy.true_divide", "numpy.take", "numpy.lib.twodim_base.diag", "numpy.asarray", "numpy.core.numeric.normalize_axis_tuple", "numpy.issubdtype", "numpy.core.umath.cos", "numpy.core.umath.frompyfunc", "numpy.around", "numpy.core.numeric.array", "numpy.concatenate", "numpy.lib.stride_tricks.as_strided", "numpy.all", "numpy.core.multiarray.normalize_axis_index", "numpy.any", "numpy.iscomplexobj", "numpy.copyto", "numpy.moveaxis", "numpy.core.numeric.where", "numpy.core.fromnumeric.ravel", "numpy.core.numeric.dtype", "numpy.clip", "numpy.arange", "numpy.empty_like", "numpy.core.fromnumeric.any", "numpy.full", "numpy.core.numeric.searchsorted", "numpy.ceil", "numpy.lib.stride_tricks._broadcast_shape", "numpy.asanyarray", "numpy.core.numeric.asanyarray", "numpy.diff", "numpy.lib.utils._median_nancheck", "numpy.core.numeric.zeros_like", "numpy.core.umath.subtract", "numpy.core.umath.exp", "numpy.core.numeric.arange", "numpy.core.numeric.concatenate", "numpy.core.multiarray._monotonicity", "numpy.core.numeric.empty", "numpy.multiply", "numpy.isnan", "numpy.core.umath.sqrt", "numpy.core.overrides.set_module", "numpy.core.numeric.issubdtype", "numpy.ndim", "numpy.core.fromnumeric.sum", "numpy.core.numeric.absolute", "numpy.core.fromnumeric.mean", "numpy.floor", "numpy.broadcast_arrays", "numpy.iterable", "numpy.core.multiarray._insert", "numpy.core.numeric.isscalar", "numpy.argsort", "numpy.core.umath.mod", "numpy.core.umath.add", "numpy.array", "numpy.core.transpose", "numpy.core.umath.arctan2", "numpy.abs", "numpy.core.numeric.copyto", "numpy.isfinite", "numpy.core.numeric.asarray", "numpy.ones", "numpy.result_type", "numpy.core.numeric.take", "numpy.core.fromnumeric.partition", "numpy.broadcast_to", "numpy.core.umath.sin", "numpy.ndindex", "numpy.core.umath.less_equal", "numpy.empty", "numpy.core.numeric.ones" ] ]
[ { "matplotlib": [], "numpy": [ "1.11", "1.19", "1.24", "1.16", "1.23", "1.20", "1.7", "1.12", "1.21", "1.22", "1.14", "1.6", "1.13", "1.9", "1.17", "1.10", "1.18", "1.15", "1.8" ], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [ "1.10", "1.12", "1.11", "1.19", "1.24", "1.13", "1.16", "1.9", "1.18", "1.23", "1.21", "1.22", "1.20", "1.7", "1.15", "1.14", "1.17", "1.8" ], "pandas": [], "scipy": [], "tensorflow": [] } ]
HeartFu/NeuralBabyTalk
[ "acd9f927d3b977c69ff8286bc45f9fb073dd1b6b", "acd9f927d3b977c69ff8286bc45f9fb073dd1b6b" ]
[ "object_detection/model/detection/keypoint_rcnn.py", "object_detection/model/detection/RelationNetwork.py" ]
[ "import torch\nfrom torch import nn\n\nfrom torchvision.ops import MultiScaleRoIAlign\n\nfrom ..utils import load_state_dict_from_url\n\nfrom .faster_rcnn import FasterRCNN\nfrom .backbone_utils import resnet_fpn_backbone\n\n\n__all__ = [\n \"KeypointRCNN\", \"keypointrcnn_resnet50_fpn\"\n]\n\n\nclass KeypointRCNN(FasterRCNN):\n \"\"\"\n Implements Keypoint R-CNN.\n The input to the model is expected to be a list of tensors, each of shape [C, H, W], one for each\n image, and should be in 0-1 range. Different images can have different sizes.\n The behavior of the model changes depending if it is in training or evaluation mode.\n During training, the model expects both the input tensors, as well as a targets (list of dictionary),\n containing:\n - boxes (FloatTensor[N, 4]): the ground-truth boxes in [x1, y1, x2, y2] format, with values of x\n between 0 and W and values of y between 0 and H\n - labels (Int64Tensor[N]): the class label for each ground-truth box\n - keypoints (FloatTensor[N, K, 3]): the K keypoints location for each of the N instances, in the\n format [x, y, visibility], where visibility=0 means that the keypoint is not visible.\n The model returns a Dict[Tensor] during training, containing the classification and regression\n losses for both the RPN and the R-CNN, and the keypoint loss.\n During inference, the model requires only the input tensors, and returns the post-processed\n predictions as a List[Dict[Tensor]], one for each input image. The fields of the Dict are as\n follows:\n - boxes (FloatTensor[N, 4]): the predicted boxes in [x1, y1, x2, y2] format, with values of x\n between 0 and W and values of y between 0 and H\n - labels (Int64Tensor[N]): the predicted labels for each image\n - scores (Tensor[N]): the scores or each prediction\n - keypoints (FloatTensor[N, K, 3]): the locations of the predicted keypoints, in [x, y, v] format.\n Arguments:\n backbone (nn.Module): the network used to compute the features for the model.\n It should contain a out_channels attribute, which indicates the number of output\n channels that each feature map has (and it should be the same for all feature maps).\n The backbone should return a single Tensor or and OrderedDict[Tensor].\n num_classes (int): number of output classes of the model (including the background).\n If box_predictor is specified, num_classes should be None.\n min_size (int): minimum size of the image to be rescaled before feeding it to the backbone\n max_size (int): maximum size of the image to be rescaled before feeding it to the backbone\n image_mean (Tuple[float, float, float]): mean values used for input normalization.\n They are generally the mean values of the dataset on which the backbone has been trained\n on\n image_std (Tuple[float, float, float]): std values used for input normalization.\n They are generally the std values of the dataset on which the backbone has been trained on\n rpn_anchor_generator (AnchorGenerator): module that generates the anchors for a set of feature\n maps.\n rpn_head (nn.Module): module that computes the objectness and regression deltas from the RPN\n rpn_pre_nms_top_n_train (int): number of proposals to keep before applying NMS during training\n rpn_pre_nms_top_n_test (int): number of proposals to keep before applying NMS during testing\n rpn_post_nms_top_n_train (int): number of proposals to keep after applying NMS during training\n rpn_post_nms_top_n_test (int): number of proposals to keep after applying NMS during testing\n rpn_nms_thresh (float): NMS threshold used for postprocessing the RPN proposals\n rpn_fg_iou_thresh (float): minimum IoU between the anchor and the GT box so that they can be\n considered as positive during training of the RPN.\n rpn_bg_iou_thresh (float): maximum IoU between the anchor and the GT box so that they can be\n considered as negative during training of the RPN.\n rpn_batch_size_per_image (int): number of anchors that are sampled during training of the RPN\n for computing the loss\n rpn_positive_fraction (float): proportion of positive anchors in a mini-batch during training\n of the RPN\n box_roi_pool (MultiScaleRoIAlign): the module which crops and resizes the feature maps in\n the locations indicated by the bounding boxes\n box_head (nn.Module): module that takes the cropped feature maps as input\n box_predictor (nn.Module): module that takes the output of box_head and returns the\n classification logits and box regression deltas.\n box_score_thresh (float): during inference, only return proposals with a classification score\n greater than box_score_thresh\n box_nms_thresh (float): NMS threshold for the prediction head. Used during inference\n box_detections_per_img (int): maximum number of detections per image, for all classes.\n box_fg_iou_thresh (float): minimum IoU between the proposals and the GT box so that they can be\n considered as positive during training of the classification head\n box_bg_iou_thresh (float): maximum IoU between the proposals and the GT box so that they can be\n considered as negative during training of the classification head\n box_batch_size_per_image (int): number of proposals that are sampled during training of the\n classification head\n box_positive_fraction (float): proportion of positive proposals in a mini-batch during training\n of the classification head\n bbox_reg_weights (Tuple[float, float, float, float]): weights for the encoding/decoding of the\n bounding boxes\n keypoint_roi_pool (MultiScaleRoIAlign): the module which crops and resizes the feature maps in\n the locations indicated by the bounding boxes, which will be used for the keypoint head.\n keypoint_head (nn.Module): module that takes the cropped feature maps as input\n keypoint_predictor (nn.Module): module that takes the output of the keypoint_head and returns the\n heatmap logits\n Example::\n >>> import torch\n >>> import torchvision\n >>> from torchvision.models.detection import KeypointRCNN\n >>> from torchvision.models.detection.rpn import AnchorGenerator\n >>>\n >>> # load a pre-trained model for classification and return\n >>> # only the features\n >>> backbone = torchvision.models.mobilenet_v2(pretrained=True).features\n >>> # KeypointRCNN needs to know the number of\n >>> # output channels in a backbone. For mobilenet_v2, it's 1280\n >>> # so we need to add it here\n >>> backbone.out_channels = 1280\n >>>\n >>> # let's make the RPN generate 5 x 3 anchors per spatial\n >>> # location, with 5 different sizes and 3 different aspect\n >>> # ratios. We have a Tuple[Tuple[int]] because each feature\n >>> # map could potentially have different sizes and\n >>> # aspect ratios\n >>> anchor_generator = AnchorGenerator(sizes=((32, 64, 128, 256, 512),),\n >>> aspect_ratios=((0.5, 1.0, 2.0),))\n >>>\n >>> # let's define what are the feature maps that we will\n >>> # use to perform the region of interest cropping, as well as\n >>> # the size of the crop after rescaling.\n >>> # if your backbone returns a Tensor, featmap_names is expected to\n >>> # be ['0']. More generally, the backbone should return an\n >>> # OrderedDict[Tensor], and in featmap_names you can choose which\n >>> # feature maps to use.\n >>> roi_pooler = torchvision.ops.MultiScaleRoIAlign(featmap_names=['0'],\n >>> output_size=7,\n >>> sampling_ratio=2)\n >>>\n >>> keypoint_roi_pooler = torchvision.ops.MultiScaleRoIAlign(featmap_names=['0'],\n >>> output_size=14,\n >>> sampling_ratio=2)\n >>> # put the pieces together inside a KeypointRCNN model\n >>> model = KeypointRCNN(backbone,\n >>> num_classes=2,\n >>> rpn_anchor_generator=anchor_generator,\n >>> box_roi_pool=roi_pooler,\n >>> keypoint_roi_pool=keypoint_roi_pooler)\n >>> model.eval()\n >>> model.eval()\n >>> x = [torch.rand(3, 300, 400), torch.rand(3, 500, 400)]\n >>> predictions = model(x)\n \"\"\"\n def __init__(self, backbone, num_classes=None,\n # transform parameters\n min_size=None, max_size=1333,\n image_mean=None, image_std=None,\n # RPN parameters\n rpn_anchor_generator=None, rpn_head=None,\n rpn_pre_nms_top_n_train=2000, rpn_pre_nms_top_n_test=1000,\n rpn_post_nms_top_n_train=2000, rpn_post_nms_top_n_test=1000,\n rpn_nms_thresh=0.7,\n rpn_fg_iou_thresh=0.7, rpn_bg_iou_thresh=0.3,\n rpn_batch_size_per_image=256, rpn_positive_fraction=0.5,\n # Box parameters\n box_roi_pool=None, box_head=None, box_predictor=None,\n box_score_thresh=0.05, box_nms_thresh=0.5, box_detections_per_img=100,\n box_fg_iou_thresh=0.5, box_bg_iou_thresh=0.5,\n box_batch_size_per_image=512, box_positive_fraction=0.25,\n bbox_reg_weights=None,\n # keypoint parameters\n keypoint_roi_pool=None, keypoint_head=None, keypoint_predictor=None,\n num_keypoints=17):\n\n assert isinstance(keypoint_roi_pool, (MultiScaleRoIAlign, type(None)))\n if min_size is None:\n min_size = (640, 672, 704, 736, 768, 800)\n\n if num_classes is not None:\n if keypoint_predictor is not None:\n raise ValueError(\"num_classes should be None when keypoint_predictor is specified\")\n\n out_channels = backbone.out_channels\n\n if keypoint_roi_pool is None:\n keypoint_roi_pool = MultiScaleRoIAlign(\n featmap_names=['0', '1', '2', '3'],\n output_size=14,\n sampling_ratio=2)\n\n if keypoint_head is None:\n keypoint_layers = tuple(512 for _ in range(8))\n keypoint_head = KeypointRCNNHeads(out_channels, keypoint_layers)\n\n if keypoint_predictor is None:\n keypoint_dim_reduced = 512 # == keypoint_layers[-1]\n keypoint_predictor = KeypointRCNNPredictor(keypoint_dim_reduced, num_keypoints)\n\n super(KeypointRCNN, self).__init__(\n backbone, num_classes,\n # transform parameters\n min_size, max_size,\n image_mean, image_std,\n # RPN-specific parameters\n rpn_anchor_generator, rpn_head,\n rpn_pre_nms_top_n_train, rpn_pre_nms_top_n_test,\n rpn_post_nms_top_n_train, rpn_post_nms_top_n_test,\n rpn_nms_thresh,\n rpn_fg_iou_thresh, rpn_bg_iou_thresh,\n rpn_batch_size_per_image, rpn_positive_fraction,\n # Box parameters\n box_roi_pool, box_head, box_predictor,\n box_score_thresh, box_nms_thresh, box_detections_per_img,\n box_fg_iou_thresh, box_bg_iou_thresh,\n box_batch_size_per_image, box_positive_fraction,\n bbox_reg_weights)\n\n self.roi_heads.keypoint_roi_pool = keypoint_roi_pool\n self.roi_heads.keypoint_head = keypoint_head\n self.roi_heads.keypoint_predictor = keypoint_predictor\n\n\nclass KeypointRCNNHeads(nn.Sequential):\n def __init__(self, in_channels, layers):\n d = []\n next_feature = in_channels\n for out_channels in layers:\n d.append(nn.Conv2d(next_feature, out_channels, 3, stride=1, padding=1))\n d.append(nn.ReLU(inplace=True))\n next_feature = out_channels\n super(KeypointRCNNHeads, self).__init__(*d)\n for m in self.children():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode=\"fan_out\", nonlinearity=\"relu\")\n nn.init.constant_(m.bias, 0)\n\n\nclass KeypointRCNNPredictor(nn.Module):\n def __init__(self, in_channels, num_keypoints):\n super(KeypointRCNNPredictor, self).__init__()\n input_features = in_channels\n deconv_kernel = 4\n self.kps_score_lowres = nn.ConvTranspose2d(\n input_features,\n num_keypoints,\n deconv_kernel,\n stride=2,\n padding=deconv_kernel // 2 - 1,\n )\n nn.init.kaiming_normal_(\n self.kps_score_lowres.weight, mode=\"fan_out\", nonlinearity=\"relu\"\n )\n nn.init.constant_(self.kps_score_lowres.bias, 0)\n self.up_scale = 2\n self.out_channels = num_keypoints\n\n def forward(self, x):\n x = self.kps_score_lowres(x)\n return torch.nn.functional.interpolate(\n x, scale_factor=float(self.up_scale), mode=\"bilinear\", align_corners=False, recompute_scale_factor=False\n )\n\n\nmodel_urls = {\n # legacy model for BC reasons, see https://github.com/pytorch/vision/issues/1606\n 'keypointrcnn_resnet50_fpn_coco_legacy':\n 'https://download.pytorch.org/models/keypointrcnn_resnet50_fpn_coco-9f466800.pth',\n 'keypointrcnn_resnet50_fpn_coco':\n 'https://download.pytorch.org/models/keypointrcnn_resnet50_fpn_coco-fc266e95.pth',\n}\n\n\ndef keypointrcnn_resnet50_fpn(pretrained=False, progress=True,\n num_classes=2, num_keypoints=17,\n pretrained_backbone=True, trainable_backbone_layers=3, **kwargs):\n \"\"\"\n Constructs a Keypoint R-CNN model with a ResNet-50-FPN backbone.\n The input to the model is expected to be a list of tensors, each of shape ``[C, H, W]``, one for each\n image, and should be in ``0-1`` range. Different images can have different sizes.\n The behavior of the model changes depending if it is in training or evaluation mode.\n During training, the model expects both the input tensors, as well as a targets (list of dictionary),\n containing:\n - boxes (``FloatTensor[N, 4]``): the ground-truth boxes in ``[x1, y1, x2, y2]`` format, with values of ``x``\n between ``0`` and ``W`` and values of ``y`` between ``0`` and ``H``\n - labels (``Int64Tensor[N]``): the class label for each ground-truth box\n - keypoints (``FloatTensor[N, K, 3]``): the ``K`` keypoints location for each of the ``N`` instances, in the\n format ``[x, y, visibility]``, where ``visibility=0`` means that the keypoint is not visible.\n The model returns a ``Dict[Tensor]`` during training, containing the classification and regression\n losses for both the RPN and the R-CNN, and the keypoint loss.\n During inference, the model requires only the input tensors, and returns the post-processed\n predictions as a ``List[Dict[Tensor]]``, one for each input image. The fields of the ``Dict`` are as\n follows:\n - boxes (``FloatTensor[N, 4]``): the predicted boxes in ``[x1, y1, x2, y2]`` format, with values of ``x``\n between ``0`` and ``W`` and values of ``y`` between ``0`` and ``H``\n - labels (``Int64Tensor[N]``): the predicted labels for each image\n - scores (``Tensor[N]``): the scores or each prediction\n - keypoints (``FloatTensor[N, K, 3]``): the locations of the predicted keypoints, in ``[x, y, v]`` format.\n Keypoint R-CNN is exportable to ONNX for a fixed batch size with inputs images of fixed size.\n Example::\n >>> model = torchvision.models.detection.keypointrcnn_resnet50_fpn(pretrained=True)\n >>> model.eval()\n >>> x = [torch.rand(3, 300, 400), torch.rand(3, 500, 400)]\n >>> predictions = model(x)\n >>>\n >>> # optionally, if you want to export the model to ONNX:\n >>> torch.onnx.export(model, x, \"keypoint_rcnn.onnx\", opset_version = 11)\n Arguments:\n pretrained (bool): If True, returns a model pre-trained on COCO train2017\n progress (bool): If True, displays a progress bar of the download to stderr\n pretrained_backbone (bool): If True, returns a model with backbone pre-trained on Imagenet\n num_classes (int): number of output classes of the model (including the background)\n trainable_backbone_layers (int): number of trainable (not frozen) resnet layers starting from final block.\n Valid values are between 0 and 5, with 5 meaning all backbone layers are trainable.\n \"\"\"\n assert trainable_backbone_layers <= 5 and trainable_backbone_layers >= 0\n # dont freeze any layers if pretrained model or backbone is not used\n if not (pretrained or pretrained_backbone):\n trainable_backbone_layers = 5\n if pretrained:\n # no need to download the backbone if pretrained is set\n pretrained_backbone = False\n backbone = resnet_fpn_backbone('resnet50', pretrained_backbone, trainable_layers=trainable_backbone_layers)\n model = KeypointRCNN(backbone, num_classes, num_keypoints=num_keypoints, **kwargs)\n if pretrained:\n key = 'keypointrcnn_resnet50_fpn_coco'\n if pretrained == 'legacy':\n key += '_legacy'\n state_dict = load_state_dict_from_url(model_urls[key],\n progress=progress)\n model.load_state_dict(state_dict)\n return model", "import math\nimport pdb\nimport torch\nimport numpy as np\nimport torch.nn.functional as F\n\nfrom torch import nn\n\n\n# class RelationNetwork(nn.Module):\n# \"\"\"\n# Relation Network Module for Object Detection\n# Arguments:\n# group: map to number of relations Nr ????\n# \"\"\"\n#\n# def __init__(self, fc_dim, feat_dim, dim=(1024,1024,1024), group=16, emb_dim=64, input_dim=1024):\n# super(RelationNetwork, self).__init__()\n# self.attention_network = AttentionNetwork(fc_dim, feat_dim, dim, group, emb_dim, input_dim)\n# # self.nong_dim = nong_dim\n#\n#\n# def forward(self, x, rois):\n# \"\"\"\n# forward for RelationNetwork.\n# Args:\n# x: Rois after ROIAlign and fc\n# rois: RoIs from RPN before ROIAlign\n# Returns:\n#\n# \"\"\"\n# # import pdb\n# # pdb.set_trace()\n# sliced_rois = rois[:, 1:5]\n# # TODO: Check nongt_dim\n# if self.train:\n# nongt_dim = 300\n# else:\n# nongt_dim = 300\n#\n# # [num_rois, nongt_dim, 4]\n# position_matrix = self.extract_position_matrix(sliced_rois, nongt_dim=nongt_dim)\n#\n# # [num_rois, nongt_dim, 64]\n# # 这一步调用extract_position_embedding方法实现论文中公式5的EG操作。\n# position_embedding = self.extract_position_embedding(position_matrix, feat_dim=64)\n#\n# # 这一步调用attention_module_multi_head方法,按顺序实现论文中公式5、4、3、2的内容\n# # 和公式6的后半部分内容,因此基本上包含了论文的核心。得到的attention_1(维度为[num_rois, 1024],\n# # 这个1024和前面的全连接层参数对应)就是论文中公式6的concat部分内容,\n# # 而公式6的加法部分通过 fc_all_1 = fc_new_1 + attention_1得到。\n# attention_1 = self.attention_network(x, position_embedding, nongt_dim)\n# # attention_1 = self.attention_module_multi_head(x, position_embedding, nongt_dim=nongt_dim, fc_dim=16,\n# # feat_dim=1024, index=1, group=16, dim=(1024, 1024, 1024))\n#\n# return attention_1\n\n\nclass RelationNetwork(nn.Module):\n def __init__(self, fc_dim, feat_dim, dim=(1024,1024,1024), group=16, emb_dim=64, input_dim=1024):\n super(RelationNetwork, self).__init__()\n self.dim_group = (int(dim[0] / group), int(dim[1] / group), int(dim[2] / group))\n self.dim = dim\n self.group = group\n self.fc_dim = fc_dim\n self.feat_dim = feat_dim\n # self.pair_pos_fc1 = nn.Linear(emb_dim, fc_dim) # formula 5 -> Wg\n self.pair_pos_fc1 = nn.Conv2d(emb_dim, fc_dim, kernel_size=(1, 1), stride=(1, 1), padding=(0, 0)) # formula 5 -> Wg\n self.query_fc1 = nn.Linear(input_dim, dim[0]) # formula 4 -> Wq, roi_feat -> fA\n self.key_fc1 = nn.Linear(feat_dim, dim[1]) # formula 4 -> Wk, nongt_roi_feat -> fA\n self.linear_out1 = nn.Conv2d(fc_dim * input_dim, dim[2], kernel_size=(1, 1), groups=fc_dim)\n\n # init weights\n for layer in self.children():\n if isinstance(layer, nn.Conv2d):\n torch.nn.init.normal_(layer.weight, std=0.01)\n torch.nn.init.constant_(layer.bias, 0)\n\n # roi_feat: [num_rois, feat_dim],这里的feat_dim默认是1024,对应前面全连接层的维度,\n # 因此和 extract_position_embedding方法中的feat_dim不是一回事,\n # extract_position_embedding方法的输出对应这里的输入position_embedding,维度\n # 是[num_rois, nongt_dim, emb_dim],注意emb_dim和feat_dim的区别。fc_dim要和group相等。\n def forward(self, roi_feat, position_embedding_reshape, nongt_dim):\n \"\"\"\n Attention module with vectorized version\n Args:\n roi_feat:[num_rois, feat_dim]\n position_embedding:[num_rois, nongt_dim, emb_dim]\n nongt_dim:\n fc_dim:should be same as group\n feat_dim:dimension of roi_feat, should be same as dim[2]\n dim:a 3-tuple of (query, key, output)\n group:\n index:\n\n Returns:\n output: [num_rois, ovr_feat_dim, output_dim]\n \"\"\"\n\n # 因为dim默认是(1024, 1024, 1024),group默认是16,所以dim_group就是(64, 64, 64)。\n # 与传统Faster-RCNN不同,FPN中进行了一定的改动\n\n # 然后在roi_feat的维度0上选取前nongt_dim的值,得到的nongt_roi_feat的维度是[nongt_dim, feat_dim]。\n # nongt_roi_feat = torch.chunk(roi_feat, nongt_dim, dim=0)\n nongt_roi_feat = roi_feat[0:nongt_dim, :]\n # [num_rois * nongt_dim, emb_dim]\n # 调用reshape方法将维度为[num_rois, nongt_dim, emb_dim]的position_embedding reshape成\n # [num_rois*nongt_dim, emb_dim]的position_embedding_reshape。\n \"\"\"\n FPN 与对应的 FasterRCNN不同,使用convolutional layer 进行公式5的计算\n position_embedding_reshape = torch.reshape(position_embedding, shape=(\n position_embedding.size(0) * position_embedding.size(1), position_embedding.size(2)))\n\n # position_feat_1, [num_rois * nongt_dim, fc_dim]\n # 用全连接层实现论文中公式5的max函数输入,全连接层的参数就是公式5的WG。输入是预测框位置信息\n # 的embedding结果:position_embedding_reshape,得到维度为[num_rois * nongt_dim, fc_dim]\n # 的position_feat_1。然后reshape成维度为[num_rois, nongt_dim, fc_dim]的aff_weight,\n # 最后调换维度得到维度为 [num_rois, fc_dim, nongt_dim] 的aff_weight。\n\n position_feat_1 = self.pair_pos_fc1(position_embedding_reshape)\n position_feat_1_relu = F.relu(position_feat_1)\n\n # aff_weight, [num_rois, nongt_dim, fc_dim]\n aff_weight = torch.reshape(position_feat_1_relu, shape=(-1, position_embedding.size(1), self.fc_dim))\n # aff_weight, [num_rois, fc_dim, nongt_dim]\n aff_weight = aff_weight.permute(0, 2, 1)\"\"\"\n # [1, emb_dim, num_rois, nongt_dim]\n # position_feat_1, [1, fc_dim, num_rois, nongt_dim]\n position_feat_1 = self.pair_pos_fc1(position_embedding_reshape)\n position_feat_1_relu = F.relu(position_feat_1)\n # aff_weight, [num_rois, fc_dim, nongt_dim, 1]\n aff_weight = position_feat_1_relu.permute(2, 1, 3, 0)\n # aff_weight, [num_rois, fc_dim, nongt_dim]\n aff_weight = aff_weight.squeeze(-1)\n\n # 用全连接层得到q_data,全连接层参数对应论文中公式4的WQ,roi_feat对应公式4的fA,维度\n # 是[num_rois, feat_dim]。reshape后得到的q_data_batch维度是[num_rois, group, dim_group[0]],\n # 默认是[num_rois, 16, 64],transpose后得到的q_data_batch维度\n # 是[group, num_rois, dim_group[0]],默认是[16, num_rois, 64]。\n assert self.dim[0] == self.dim[1], 'Matrix multiply requires same dimensions!'\n q_data = self.query_fc1(roi_feat)\n q_data_batch = torch.reshape(q_data, shape=(-1, self.group, self.dim_group[0]))\n q_data_batch = q_data_batch.permute(1, 0, 2)\n\n # 用全连接层得到k_data,全连接层参数对应论文中公式4的WK,nongt_roi_feat对应公式4中的fA,\n # 维度是[nongt_dim, feat_dim],最后经过reshape和transpose后得到的k_data_batch\n # 的维度是[group, nongt_dim, dim_group[0]],默认是[16, nongt_dim, 64]。\n k_data = self.key_fc1(nongt_roi_feat)\n k_data_batch = torch.reshape(k_data, shape=(-1, self.group, self.dim_group[1]))\n k_data_batch = k_data_batch.permute(1, 0, 2)\n\n v_data = nongt_roi_feat\n\n # 这个batch_dot操作就是论文中公式4的dot,dot就是矩阵乘法。\n # 得到的aff维度是[group, num_rois, nongt_dim],默认是[16, num_rois, nongt_dim]。\n # 然后做一个scale操作,对应论文中公式4的除法。最后transpose得到维度为\n # [num_rois, group, nongt_dim]的aff_scale。这个aff_scale就是论文中公式4的结果:wA。\n k_data_batch_t = k_data_batch.permute(0, 2, 1)\n aff = torch.bmm(q_data_batch, k_data_batch_t)\n # aff_scale, [group, num_rois, nongt_dim]\n aff_scale = (1.0 / math.sqrt(float(self.dim_group[1]))) * aff\n aff_scale = aff_scale.permute(1, 0, 2)\n\n assert self.fc_dim == self.group, 'fc_dim != group'\n # weighted_aff, [num_rois, fc_dim, nongt_dim]\n # aff_scale表示wA,前面的log函数输入:mx.sym.maximum(left=aff_weight, right=1e-6)\n # 对应论文中公式5,之所以要求log,是因为这里要用softmax实现论文3的公式,而在softmax中\n # 会对输入求指数(以e为底),而要达到论文中公式3的形式(e的指数只有wA,没有wG),\n # 就要先对wGmn求log,这样再求指数时候就恢复成wG。简而言之就是e^(log(wG)+wA)=wG+e^(wA)。\n # softmax实现论文中公式3的操作,axis设置为2表示在维度2上进行归一化。\n # 最后对维度为[num_rois, fc_dim, nongt_dim]的aff_softmax做reshape操作得到维度\n # 为[num_rois * fc_dim, nongt_dim]的aff_softmax_reshape,\n # aff_softmax_reshape也就对应论文中公式3的w。\n min_value = torch.from_numpy(np.asarray([1e-6])).float().cuda()\n weighted_aff = torch.log(torch.max(aff_weight, min_value)) + aff_scale\n aff_softmax = F.softmax(weighted_aff, dim=2)\n # [num_rois * fc_dim, nongt_dim]\n aff_softmax_reshape = torch.reshape(aff_softmax,\n shape=(aff_softmax.size(0) * aff_softmax.size(1), aff_softmax.size(2)))\n\n # output_t, [num_rois * fc_dim, feat_dim]\n # dot函数的输入aff_softmax_reshape维度是[num_rois * fc_dim, nongt_dim],\n # v_data的维度是[nongt_dim, feat_dim],因此得到的output_t的维度\n # 是[num_rois * fc_dim, feat_dim],对应论文中公式2的w和fA相乘的结果。\n # reshape后得到维度为[num_rois, fc_dim*feat_dim,1,1]的output_t。\n output_t = torch.mm(aff_softmax_reshape, v_data)\n # output_t, [num_rois, fc_dim * feat_dim, 1, 1]\n output_t = torch.reshape(output_t, shape=(-1, self.fc_dim * self.feat_dim, 1, 1))\n\n # linear_out, [num_rois, dim[2], 1, 1]\n # 最后用卷积核数量为dim[2](默认是1024)的1*1卷积得到维度为[num_rois, dim[2], 1, 1]的lineae_out,\n # 卷积层的参数对应论文中公式2的WV,reshape后得到维度为[num_rois, dim[2]]的output,\n # 这样得到的linear_out就是论文中公式2的fR。注意这里的卷积层有个num_group参数,\n # group数量设置为fc_dim,默认是16,对应论文中的Nr参数,因此论文中公式6的concat操\n # 作已经在这个卷积层中通过group操作实现了。\n linear_out = self.linear_out1(output_t)\n # output = torch.reshape(linear_out, shape=(linear_out.size(0), linear_out.size(1)))\n output = torch.squeeze(linear_out)\n\n return output" ]
[ [ "torch.nn.ConvTranspose2d", "torch.nn.init.constant_", "torch.nn.Conv2d", "torch.nn.ReLU", "torch.nn.init.kaiming_normal_" ], [ "torch.nn.functional.softmax", "torch.mm", "torch.max", "torch.nn.init.constant_", "numpy.asarray", "torch.reshape", "torch.nn.Conv2d", "torch.nn.Linear", "torch.nn.functional.relu", "torch.nn.init.normal_", "torch.bmm", "torch.squeeze" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
MarziehHaghighi/Mask_RCNN
[ "54d23a28c7bd37c4364b41dbc825adcd104392ab" ]
[ "samples/kidney/kidney3.py" ]
[ "\"\"\"\nMask R-CNN\nConfigurations and data loading code for the synthetic Shapes dataset.\nThis is a duplicate of the code in the noteobook train_shapes.ipynb for easy\nimport into other notebooks, such as inspect_model.ipynb.\n\nCopyright (c) 2017 Matterport, Inc.\nLicensed under the MIT License (see LICENSE for details)\nWritten by Waleed Abdulla\n\"\"\"\n\nimport math\nimport random\nimport numpy as np\nimport cv2\nimport pickle\nfrom config import Config\nimport utils\n\n\nclass KidneysConfig(Config):\n \"\"\"Configuration for training on the toy shapes dataset.\n Derives from the base Config class and overrides values specific\n to the toy shapes dataset.\n \"\"\"\n # Give the configuration a recognizable name\n NAME = \"kidneys\"\n\n # Train on 1 GPU and 8-->2 images per GPU. We can put multiple images on each\n # GPU because the images are small. Batch size is 8-->2 (GPUs * images/GPU).\n GPU_COUNT = 1\n IMAGES_PER_GPU = 8\n\n # Number of classes (including background)\n NUM_CLASSES = 1 + 2 # background + 3 shapes\n\n # Use small images for faster training. Set the limits of the small side\n # the large side, and that determines the image shape.\n IMAGE_MIN_DIM = 256\n IMAGE_MAX_DIM = 256\n\n # Use smaller anchors because our image and objects are small\n RPN_ANCHOR_SCALES = (16, 32, 64,128,256) # anchor side in pixels\n# RPN_ANCHOR_SCALES = (32, 64,128,256) # anchor side in pixels\n # Reduce training ROIs per image because the images are small and have\n # few objects. Aim to allow ROI sampling to pick 33% positive ROIs.\n TRAIN_ROIS_PER_IMAGE = 32\n\n # Use a small epoch since the data is simple\n STEPS_PER_EPOCH = 100\n\n # use small validation steps since the epoch is small\n VALIDATION_STEPS = 5\n\n\nclass KidneysDataset(utils.Dataset):\n \"\"\"Generates the shapes synthetic dataset. The dataset consists of simple\n shapes (triangles, squares, circles) placed randomly on a blank surface.\n The images are generated on the fly. No file access required.\n \"\"\"\n \n def load_Kidneys(self, subjectNamesNormalTrain):\n \"\"\"Generate the requested number of synthetic images.\n count: number of images to generate.\n height, width: the size of the generated images.\n \"\"\"\n self.subjectNamesNormalTrain=subjectNamesNormalTrain;\n # Add classes\n# self.add_class(\"kidneys\", 0, \"BG\")\n self.add_class(\"kidneys\", 1, \"right\")\n self.add_class(\"kidneys\", 2, \"left\")\n\n # Add images\n # Generate random specifications of images (i.e. color and\n # list of shapes sizes and locations). This is more compact than\n # actual images. Images are generated on the fly in load_image().\n self.numOfSlices=32;\n idCount=0;\n for i in range(len(subjectNamesNormalTrain)): \n subName=self.subjectNamesNormalTrain[i];\n data4D = pickle.load(open( \"/home/ch194093/Desktop/kidneydcemri/deepLearningSegmentation/normalData1Slice3D_Pcs.p\",\"rb\" ))\n sliceNum=i# % self.numOfSlices;\n# print(subName);\n# maxSlice=data4D[subName+'M'].shape[2];\n# if maxSlice<self.numOfSlices and sliceNum>=maxSlice:\n# m0=data4D[subName+'M'][:,:,20].astype('uint8');\n# else:\n m0=data4D[subName+'M'].astype('uint8'); \n \n annotations=[];\n# if 1 not in set(m0.flatten()) and 2 not in set(m0.flatten()) :\n# annotations.append('BG');\n# else:\n if 1 in set(m0.flatten()):\n annotations.append('right');\n \n if 2 in set(m0.flatten()):\n annotations.append('left');\n \n# print(i,set(m0.flatten()),annotations)\n# kidneys = self.load_image(i)\n# idCount=+1;\n self.add_image(\"kidneys\", image_id=i, path=None, \n subName=subName,sliceNum=sliceNum,width=256,\n height=256,annotations=annotations);\n \n def load_image(self, image_id):\n \"\"\"Generate an image from the specs of the given image ID.\n Typically this function loads the image from a file, but\n in this case it generates the image on the fly from the\n specs in image_info.\n \"\"\"\n# print(self.image_info[\"height\"])\n# subName=self.subjectNamesNormalTrain[int(image_id/self.numOfSlices)];\n subName=self.image_info[image_id]['subName'];\n# sliceNum=self.image_info[image_id]['sliceNum'];\n data4D = pickle.load( open( \"/home/ch194093/Desktop/kidneydcemri/deepLearningSegmentation/normalData1Slice3D_Pcs.p\",\"rb\" ))\n# data4D = pickle.load( open( \"/common/abd/marzieh/preprocessedData/singleSubjectsV4/\"+subName+\".p\",\"rb\" ));\n# print(image_id)\n# sliceNum=image_id % self.numOfSlices;\n# maxSlice=data4D[subName+'M'].shape[2];\n# if maxSlice<self.numOfSlices and sliceNum>=maxSlice:\n# image0=data4D[subName+'D'][:,:,20,:];\n# else:\n image0=data4D[subName+'D'];\n image000=(image0+abs(min([0,image0.min()])));image000=(image000/image000.max())*255;\n# image000=image0*255;\n image00=image000.astype('uint8');\n# image=np.lib.pad(image0, ((16, 16), (2, 3)), 'minimum')\n image = cv2.copyMakeBorder(image00,16,16,16,16,cv2.BORDER_REPLICATE)\n \n# info = self.image_info[image_id]\n\n return image[:,:,0:3]\n\n def image_reference(self, image_id):\n \"\"\"Return the shapes data of the image.\"\"\"\n info = self.image_info[image_id]\n if info[\"source\"] == \"kidneys\":\n return info[\"kidneys\"]\n else:\n super(self.__class__).image_reference(self, image_id)\n\n def load_mask(self, image_id):\n \"\"\"Generate instance masks for shapes of the given image ID.\n \"\"\"\n subName=self.image_info[image_id]['subName'];\n sliceNum=self.image_info[image_id]['sliceNum'];\n \n# subName=self.subjectNamesNormalTrain[int(image_id/self.numOfSlices)];\n data4D = pickle.load( open( \"/home/ch194093/Desktop/kidneydcemri/deepLearningSegmentation/normalData1Slice3D_Pcs.p\",\"rb\" ))\n# data4D = pickle.load( open( \"/common/abd/marzieh/preprocessedData/singleSubjectsV4/\"+subName+\".p\",\"rb\" ));\n# sliceNum=image_id % self.numOfSlices;\n# maxSlice=data4D[subName+'M'].shape[2];\n# if maxSlice<self.numOfSlices and sliceNum>=maxSlice:\n# m0=data4D[subName+'M'][:,:,20].astype('uint8');\n# else:\n# m0=data4D[subName+'M'][:,:,sliceNum].astype('uint8');\n \n m0=data4D[subName+'M'].astype('uint8'); \n# m0=data4D[subName+'M'][:,:,sliceNum].astype('uint8');\n m0 = cv2.copyMakeBorder(m0,16,16,16,16,cv2.BORDER_REPLICATE)\n \n# image_info = self.image_info[image_id]\n\n instance_masks = []\n class_ids = []\n# annotations = self.image_info[image_id][\"annotations\"]\n # Build mask of shape [height, width, instance_count] and list\n # of class IDs that correspond to each channel of the mask.\n annotations=[];classIds4Anns={};\n classIds4Anns['BG']=0;classIds4Anns['right']=1;classIds4Anns['left']=2;\n annotations=self.image_info[image_id]['annotations']; \n \n# print(annotations)\n# if 1 not in set(m0.flatten()) and 2 not in set(m0.flatten()) :\n# annotations.append('BG');\n# classIds4Anns['BG']=0;\n# else:\n# if 1 in set(m0.flatten()):\n# annotations.append('right');\n# classIds4Anns['right']=1;\n# \n# if 2 in set(m0.flatten()):\n# annotations.append('left');\n# classIds4Anns['left']=2;\n# annotations=['right','left'];classIds4Anns={};\n# classIds4Anns[annotations[0]]=1;classIds4Anns[annotations[1]]=2;\n \n# print(annotations)\n for a in range(len(annotations)):\n# class_id = self.map_source_class_id(classIds4Anns[annotations[a]])\n class_id = classIds4Anns[annotations[a]];\n m=np.copy(m0);\n m[m!=classIds4Anns[annotations[a]]]=0;\n if annotations[a]=='BG':\n m[m==0]=1;\n instance_masks.append(m);\n class_ids.append(class_id)\n\n # Pack instance masks into an array\n if len(annotations)>1:\n mask = np.stack(instance_masks, axis=2);\n else:\n mask=instance_masks;\n \n if class_ids:\n mask = np.stack(instance_masks, axis=2)\n class_ids = np.array(class_ids, dtype=np.int32)\n return mask, class_ids\n else:\n # Call super class to return an empty mask\n mask = np.ones([256,256], dtype=bool)\n return mask, class_ids \n \n \n# class_ids = np.array(class_ids, dtype=np.int32)\n# return mask, class_ids\n\n" ]
[ [ "numpy.copy", "numpy.array", "numpy.stack", "numpy.ones" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
IndigenousEngineering/keras_docker_with_NLTK
[ "1d81a20292ca6926e595d06a6cd725dbb104a146", "075958831a3f74763ad1e094b3642f5174c7f817", "1d81a20292ca6926e595d06a6cd725dbb104a146", "075958831a3f74763ad1e094b3642f5174c7f817" ]
[ "keras/engine/training_generator.py", "tests/keras/backend/backend_test.py", "keras/utils/layer_utils.py", "tests/keras/initializers_test.py" ]
[ "\"\"\"Part of the training engine related to Python generators of array data.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport warnings\nimport numpy as np\n\nfrom .training_utils import is_sequence\nfrom .training_utils import iter_sequence_infinite\nfrom .. import backend as K\nfrom ..utils.data_utils import Sequence\nfrom ..utils.data_utils import GeneratorEnqueuer\nfrom ..utils.data_utils import OrderedEnqueuer\nfrom ..utils.generic_utils import Progbar\nfrom ..utils.generic_utils import to_list\nfrom ..utils.generic_utils import unpack_singleton\nfrom .. import callbacks as cbks\n\n\ndef fit_generator(model,\n generator,\n steps_per_epoch=None,\n epochs=1,\n verbose=1,\n callbacks=None,\n validation_data=None,\n validation_steps=None,\n class_weight=None,\n max_queue_size=10,\n workers=1,\n use_multiprocessing=False,\n shuffle=True,\n initial_epoch=0):\n \"\"\"See docstring for `Model.fit_generator`.\"\"\"\n epoch = initial_epoch\n\n do_validation = bool(validation_data)\n model._make_train_function()\n if do_validation:\n model._make_test_function()\n\n use_sequence_api = is_sequence(generator)\n if not use_sequence_api and use_multiprocessing and workers > 1:\n warnings.warn(\n UserWarning('Using a generator with `use_multiprocessing=True`'\n ' and multiple workers may duplicate your data.'\n ' Please consider using the`keras.utils.Sequence'\n ' class.'))\n if steps_per_epoch is None:\n if use_sequence_api:\n steps_per_epoch = len(generator)\n else:\n raise ValueError('`steps_per_epoch=None` is only valid for a'\n ' generator based on the '\n '`keras.utils.Sequence`'\n ' class. Please specify `steps_per_epoch` '\n 'or use the `keras.utils.Sequence` class.')\n\n # python 2 has 'next', 3 has '__next__'\n # avoid any explicit version checks\n val_use_sequence_api = is_sequence(validation_data)\n val_gen = (hasattr(validation_data, 'next') or\n hasattr(validation_data, '__next__') or\n val_use_sequence_api)\n if (val_gen and not val_use_sequence_api and\n not validation_steps):\n raise ValueError('`validation_steps=None` is only valid for a'\n ' generator based on the `keras.utils.Sequence`'\n ' class. Please specify `validation_steps` or use'\n ' the `keras.utils.Sequence` class.')\n\n # Prepare display labels.\n out_labels = model.metrics_names\n callback_metrics = out_labels + ['val_' + n for n in out_labels]\n\n # prepare callbacks\n model.history = cbks.History()\n _callbacks = [cbks.BaseLogger(\n stateful_metrics=model.stateful_metric_names)]\n if verbose:\n _callbacks.append(\n cbks.ProgbarLogger(\n count_mode='steps',\n stateful_metrics=model.stateful_metric_names))\n _callbacks += (callbacks or []) + [model.history]\n callbacks = cbks.CallbackList(_callbacks)\n\n # it's possible to callback a different model than self:\n if hasattr(model, 'callback_model') and model.callback_model:\n callback_model = model.callback_model\n else:\n callback_model = model\n callbacks.set_model(callback_model)\n callbacks.set_params({\n 'epochs': epochs,\n 'steps': steps_per_epoch,\n 'verbose': verbose,\n 'do_validation': do_validation,\n 'metrics': callback_metrics,\n })\n callbacks.on_train_begin()\n\n enqueuer = None\n val_enqueuer = None\n\n try:\n if do_validation:\n if val_gen and workers > 0:\n # Create an Enqueuer that can be reused\n val_data = validation_data\n if is_sequence(val_data):\n val_enqueuer = OrderedEnqueuer(\n val_data,\n use_multiprocessing=use_multiprocessing)\n validation_steps = validation_steps or len(val_data)\n else:\n val_enqueuer = GeneratorEnqueuer(\n val_data,\n use_multiprocessing=use_multiprocessing)\n val_enqueuer.start(workers=workers,\n max_queue_size=max_queue_size)\n val_enqueuer_gen = val_enqueuer.get()\n elif val_gen:\n val_data = validation_data\n if is_sequence(val_data):\n val_enqueuer_gen = iter_sequence_infinite(val_data)\n validation_steps = validation_steps or len(val_data)\n else:\n val_enqueuer_gen = val_data\n else:\n # Prepare data for validation\n if len(validation_data) == 2:\n val_x, val_y = validation_data\n val_sample_weight = None\n elif len(validation_data) == 3:\n val_x, val_y, val_sample_weight = validation_data\n else:\n raise ValueError('`validation_data` should be a tuple '\n '`(val_x, val_y, val_sample_weight)` '\n 'or `(val_x, val_y)`. Found: ' +\n str(validation_data))\n val_x, val_y, val_sample_weights = model._standardize_user_data(\n val_x, val_y, val_sample_weight)\n val_data = val_x + val_y + val_sample_weights\n if model.uses_learning_phase and not isinstance(K.learning_phase(),\n int):\n val_data += [0.]\n for cbk in callbacks:\n cbk.validation_data = val_data\n\n if workers > 0:\n if use_sequence_api:\n enqueuer = OrderedEnqueuer(\n generator,\n use_multiprocessing=use_multiprocessing,\n shuffle=shuffle)\n else:\n enqueuer = GeneratorEnqueuer(\n generator,\n use_multiprocessing=use_multiprocessing)\n enqueuer.start(workers=workers, max_queue_size=max_queue_size)\n output_generator = enqueuer.get()\n else:\n if use_sequence_api:\n output_generator = iter_sequence_infinite(generator)\n else:\n output_generator = generator\n\n callback_model.stop_training = False\n # Construct epoch logs.\n epoch_logs = {}\n while epoch < epochs:\n for m in model.stateful_metric_functions:\n m.reset_states()\n callbacks.on_epoch_begin(epoch)\n steps_done = 0\n batch_index = 0\n while steps_done < steps_per_epoch:\n generator_output = next(output_generator)\n\n if not hasattr(generator_output, '__len__'):\n raise ValueError('Output of generator should be '\n 'a tuple `(x, y, sample_weight)` '\n 'or `(x, y)`. Found: ' +\n str(generator_output))\n\n if len(generator_output) == 2:\n x, y = generator_output\n sample_weight = None\n elif len(generator_output) == 3:\n x, y, sample_weight = generator_output\n else:\n raise ValueError('Output of generator should be '\n 'a tuple `(x, y, sample_weight)` '\n 'or `(x, y)`. Found: ' +\n str(generator_output))\n # build batch logs\n batch_logs = {}\n if x is None or len(x) == 0:\n # Handle data tensors support when no input given\n # step-size = 1 for data tensors\n batch_size = 1\n elif isinstance(x, list):\n batch_size = x[0].shape[0]\n elif isinstance(x, dict):\n batch_size = list(x.values())[0].shape[0]\n else:\n batch_size = x.shape[0]\n batch_logs['batch'] = batch_index\n batch_logs['size'] = batch_size\n callbacks.on_batch_begin(batch_index, batch_logs)\n\n outs = model.train_on_batch(x, y,\n sample_weight=sample_weight,\n class_weight=class_weight)\n\n outs = to_list(outs)\n for l, o in zip(out_labels, outs):\n batch_logs[l] = o\n\n callbacks.on_batch_end(batch_index, batch_logs)\n\n batch_index += 1\n steps_done += 1\n\n # Epoch finished.\n if steps_done >= steps_per_epoch and do_validation:\n if val_gen:\n val_outs = model.evaluate_generator(\n val_enqueuer_gen,\n validation_steps,\n workers=0)\n else:\n # No need for try/except because\n # data has already been validated.\n val_outs = model.evaluate(\n val_x, val_y,\n batch_size=batch_size,\n sample_weight=val_sample_weights,\n verbose=0)\n val_outs = to_list(val_outs)\n # Same labels assumed.\n for l, o in zip(out_labels, val_outs):\n epoch_logs['val_' + l] = o\n\n if callback_model.stop_training:\n break\n\n callbacks.on_epoch_end(epoch, epoch_logs)\n epoch += 1\n if callback_model.stop_training:\n break\n\n finally:\n try:\n if enqueuer is not None:\n enqueuer.stop()\n finally:\n if val_enqueuer is not None:\n val_enqueuer.stop()\n\n callbacks.on_train_end()\n return model.history\n\n\ndef evaluate_generator(model, generator,\n steps=None,\n max_queue_size=10,\n workers=1,\n use_multiprocessing=False,\n verbose=0):\n \"\"\"See docstring for `Model.evaluate_generator`.\"\"\"\n model._make_test_function()\n\n if hasattr(model, 'metrics'):\n for m in model.stateful_metric_functions:\n m.reset_states()\n stateful_metric_indices = [\n i for i, name in enumerate(model.metrics_names)\n if str(name) in model.stateful_metric_names]\n else:\n stateful_metric_indices = []\n\n steps_done = 0\n outs_per_batch = []\n batch_sizes = []\n use_sequence_api = is_sequence(generator)\n if not use_sequence_api and use_multiprocessing and workers > 1:\n warnings.warn(\n UserWarning('Using a generator with `use_multiprocessing=True`'\n ' and multiple workers may duplicate your data.'\n ' Please consider using the`keras.utils.Sequence'\n ' class.'))\n if steps is None:\n if use_sequence_api:\n steps = len(generator)\n else:\n raise ValueError('`steps=None` is only valid for a generator'\n ' based on the `keras.utils.Sequence` class.'\n ' Please specify `steps` or use the'\n ' `keras.utils.Sequence` class.')\n enqueuer = None\n\n try:\n if workers > 0:\n if use_sequence_api:\n enqueuer = OrderedEnqueuer(\n generator,\n use_multiprocessing=use_multiprocessing)\n else:\n enqueuer = GeneratorEnqueuer(\n generator,\n use_multiprocessing=use_multiprocessing)\n enqueuer.start(workers=workers, max_queue_size=max_queue_size)\n output_generator = enqueuer.get()\n else:\n if use_sequence_api:\n output_generator = iter_sequence_infinite(generator)\n else:\n output_generator = generator\n\n if verbose == 1:\n progbar = Progbar(target=steps)\n\n while steps_done < steps:\n generator_output = next(output_generator)\n if not hasattr(generator_output, '__len__'):\n raise ValueError('Output of generator should be a tuple '\n '(x, y, sample_weight) '\n 'or (x, y). Found: ' +\n str(generator_output))\n if len(generator_output) == 2:\n x, y = generator_output\n sample_weight = None\n elif len(generator_output) == 3:\n x, y, sample_weight = generator_output\n else:\n raise ValueError('Output of generator should be a tuple '\n '(x, y, sample_weight) '\n 'or (x, y). Found: ' +\n str(generator_output))\n outs = model.test_on_batch(x, y, sample_weight=sample_weight)\n outs = to_list(outs)\n outs_per_batch.append(outs)\n\n if x is None or len(x) == 0:\n # Handle data tensors support when no input given\n # step-size = 1 for data tensors\n batch_size = 1\n elif isinstance(x, list):\n batch_size = x[0].shape[0]\n elif isinstance(x, dict):\n batch_size = list(x.values())[0].shape[0]\n else:\n batch_size = x.shape[0]\n if batch_size == 0:\n raise ValueError('Received an empty batch. '\n 'Batches should contain '\n 'at least one item.')\n steps_done += 1\n batch_sizes.append(batch_size)\n if verbose == 1:\n progbar.update(steps_done)\n\n finally:\n if enqueuer is not None:\n enqueuer.stop()\n\n averages = []\n for i in range(len(outs)):\n if i not in stateful_metric_indices:\n averages.append(np.average([out[i] for out in outs_per_batch],\n weights=batch_sizes))\n else:\n averages.append(np.float64(outs_per_batch[-1][i]))\n return unpack_singleton(averages)\n\n\ndef predict_generator(model, generator,\n steps=None,\n max_queue_size=10,\n workers=1,\n use_multiprocessing=False,\n verbose=0):\n \"\"\"See docstring for `Model.predict_generator`.\"\"\"\n model._make_predict_function()\n\n steps_done = 0\n all_outs = []\n use_sequence_api = is_sequence(generator)\n if not use_sequence_api and use_multiprocessing and workers > 1:\n warnings.warn(\n UserWarning('Using a generator with `use_multiprocessing=True`'\n ' and multiple workers may duplicate your data.'\n ' Please consider using the`keras.utils.Sequence'\n ' class.'))\n if steps is None:\n if use_sequence_api:\n steps = len(generator)\n else:\n raise ValueError('`steps=None` is only valid for a generator'\n ' based on the `keras.utils.Sequence` class.'\n ' Please specify `steps` or use the'\n ' `keras.utils.Sequence` class.')\n enqueuer = None\n\n try:\n if workers > 0:\n if use_sequence_api:\n enqueuer = OrderedEnqueuer(\n generator,\n use_multiprocessing=use_multiprocessing)\n else:\n enqueuer = GeneratorEnqueuer(\n generator,\n use_multiprocessing=use_multiprocessing)\n enqueuer.start(workers=workers, max_queue_size=max_queue_size)\n output_generator = enqueuer.get()\n else:\n if use_sequence_api:\n output_generator = iter_sequence_infinite(generator)\n else:\n output_generator = generator\n\n if verbose == 1:\n progbar = Progbar(target=steps)\n\n while steps_done < steps:\n generator_output = next(output_generator)\n if isinstance(generator_output, tuple):\n # Compatibility with the generators\n # used for training.\n if len(generator_output) == 2:\n x, _ = generator_output\n elif len(generator_output) == 3:\n x, _, _ = generator_output\n else:\n raise ValueError('Output of generator should be '\n 'a tuple `(x, y, sample_weight)` '\n 'or `(x, y)`. Found: ' +\n str(generator_output))\n else:\n # Assumes a generator that only\n # yields inputs (not targets and sample weights).\n x = generator_output\n\n outs = model.predict_on_batch(x)\n outs = to_list(outs)\n\n if not all_outs:\n for out in outs:\n all_outs.append([])\n\n for i, out in enumerate(outs):\n all_outs[i].append(out)\n steps_done += 1\n if verbose == 1:\n progbar.update(steps_done)\n\n finally:\n if enqueuer is not None:\n enqueuer.stop()\n\n if len(all_outs) == 1:\n if steps_done == 1:\n return all_outs[0][0]\n else:\n return np.concatenate(all_outs[0])\n if steps_done == 1:\n return [out[0] for out in all_outs]\n else:\n return [np.concatenate(out) for out in all_outs]\n", "import pytest\nfrom numpy.testing import assert_allclose\nimport numpy as np\nimport scipy.sparse as sparse\nimport warnings\n\nfrom keras import backend as K\nfrom keras.backend import floatx, set_floatx, variable\nfrom keras.utils.conv_utils import convert_kernel\nfrom keras.backend import numpy_backend as KNP\n\n\ntry:\n from keras.backend import cntk_backend as KC\nexcept ImportError:\n KC = None\n warnings.warn('Could not import the CNTK backend')\n\ntry:\n from keras.backend import tensorflow_backend as KTF\nexcept ImportError:\n KTF = None\n warnings.warn('Could not import the TensorFlow backend.')\n\ntry:\n from keras.backend import theano_backend as KTH\nexcept ImportError:\n KTH = None\n warnings.warn('Could not import the Theano backend')\n\nif K.backend() == 'theano':\n WITH_NP = [KTH, KNP]\nelif K.backend() == 'cntk':\n WITH_NP = [KC, KNP]\nelse:\n WITH_NP = [KTF, KNP]\n\n\ndef check_dtype(var, dtype):\n if K._BACKEND == 'theano':\n assert var.dtype == dtype\n else:\n assert var.dtype.name == '%s_ref' % dtype\n\n\ndef cntk_func_tensors(function_name, shapes_or_vals, **kwargs):\n placeholders = []\n variables = []\n for shape_or_val in shapes_or_vals:\n if isinstance(shape_or_val, tuple):\n shape = shape_or_val\n placeholders.append(KC.placeholder(shape))\n else:\n value = shape_or_val\n variables.append(KC.variable(value))\n\n output_cntk = getattr(KC, function_name)(*(placeholders + variables), **kwargs)\n cntk_func = KC.function(placeholders, [output_cntk])\n return output_cntk, cntk_func\n\n\ndef parse_shape_or_val(shape_or_val):\n if isinstance(shape_or_val, np.ndarray):\n return shape_or_val.shape, shape_or_val\n else:\n return shape_or_val, np.random.random(shape_or_val).astype(np.float32) - 0.5\n\n\ndef assert_list_pairwise(z_list,\n shape=True,\n allclose=True,\n itself=False,\n atol=1e-05):\n for (z1, z2) in zip(z_list[1:], z_list[:-1]):\n if shape:\n assert z1.shape == z2.shape\n if allclose:\n assert_allclose(z1, z2, atol=atol)\n if itself:\n assert z1 == z2\n\n\ndef assert_list_keras_shape(t_list, z_list):\n for t, z in zip(t_list, z_list):\n if hasattr(t, '_keras_shape') and len(t._keras_shape) > 1:\n for i, s in enumerate(t._keras_shape):\n if s:\n assert t._keras_shape[i] == z.shape[i]\n\n\ndef check_single_tensor_operation(function_name,\n x_shape_or_val,\n backend_list,\n **kwargs):\n shape_or_val = kwargs.pop('shape_or_val', True)\n assert_value_equality = kwargs.pop('assert_value_equality', True)\n cntk_dynamicity = kwargs.pop('cntk_dynamicity', False)\n\n if shape_or_val:\n x_shape, x_val = parse_shape_or_val(x_shape_or_val)\n\n t_list = []\n z_list = []\n for k in backend_list:\n if shape_or_val:\n if (k == KC) & (cntk_dynamicity):\n t, f = cntk_func_tensors(function_name, [x_shape], **kwargs)\n z = f([x_val])[0]\n else:\n t = getattr(k, function_name)(k.variable(x_val), **kwargs)\n z = k.eval(t)\n else:\n t = getattr(k, function_name)(x_shape_or_val, **kwargs)\n z = k.eval(t)\n t_list += [t]\n z_list += [z]\n\n assert_list_pairwise(z_list, allclose=assert_value_equality)\n assert_list_keras_shape(t_list, z_list)\n\n\ndef check_two_tensor_operation(function_name,\n x_shape_or_val,\n y_shape_or_val,\n backend_list,\n **kwargs):\n concat_args = kwargs.pop('concat_args', False)\n cntk_dynamicity = kwargs.pop('cntk_dynamicity', False)\n cntk_two_dynamicity = kwargs.pop('cntk_two_dynamicity', False)\n\n x_shape, x_val = parse_shape_or_val(x_shape_or_val)\n y_shape, y_val = parse_shape_or_val(y_shape_or_val)\n\n t_list = []\n z_list = []\n for k in backend_list:\n if (k == KC) & (cntk_dynamicity):\n t, f = cntk_func_tensors(function_name, [x_shape, y_val], **kwargs)\n z = f([x_val])[0]\n elif (k == KC) & (cntk_two_dynamicity):\n t, f = cntk_func_tensors(function_name, [x_shape, y_shape], **kwargs)\n z = f([x_val, y_val])[0]\n elif (k == KTH) & (function_name[:4] == 'conv'):\n t = getattr(k, function_name)(\n k.variable(x_val), k.variable(convert_kernel(y_val)), **kwargs)\n z = k.eval(t)\n elif concat_args:\n t = getattr(k, function_name)(\n [k.variable(x_val), k.variable(y_val)], **kwargs)\n z = k.eval(t)\n else:\n t = getattr(k, function_name)(\n k.variable(x_val), k.variable(y_val), **kwargs)\n z = k.eval(t)\n t_list += [t]\n z_list += [z]\n\n assert_list_pairwise(z_list)\n assert_list_keras_shape(t_list, z_list)\n\n\ndef check_composed_tensor_operations(first_function_name,\n first_function_args,\n second_function_name,\n second_function_args,\n input_shape,\n backend_list):\n val = np.random.random(input_shape) - 0.5\n\n z_list = []\n for k in backend_list:\n x = k.variable(val)\n y = getattr(k, first_function_name)(x, **first_function_args)\n z = k.eval(getattr(k, second_function_name)(y, **second_function_args))\n z_list += [z]\n\n assert_list_pairwise(z_list)\n\n\nclass TestBackend(object):\n\n def test_is_keras_tensor(self):\n np_var = np.array([1, 2])\n with pytest.raises(ValueError):\n K.is_keras_tensor(np_var)\n\n keras_var = K.variable(np_var)\n assert K.is_keras_tensor(keras_var) is False\n keras_placeholder = K.placeholder(shape=(2, 4, 5))\n assert K.is_keras_tensor(keras_placeholder) is False\n\n def test_set_learning_phase(self):\n # not supported learning_phase\n with pytest.raises(ValueError):\n K.set_learning_phase(2)\n\n def test_eye(self):\n check_single_tensor_operation('eye', 3, WITH_NP, shape_or_val=False)\n\n def test_ones(self):\n check_single_tensor_operation('ones', (3, 5, 10, 8),\n WITH_NP, shape_or_val=False)\n\n def test_zeros(self):\n check_single_tensor_operation('zeros', (3, 5, 10, 8),\n WITH_NP, shape_or_val=False)\n\n def test_ones_like(self):\n check_single_tensor_operation('ones_like', (3, 5, 10, 8),\n WITH_NP, shape_or_val=True)\n\n def test_zeros_like(self):\n check_single_tensor_operation('zeros_like', (3, 5, 10, 8),\n WITH_NP, shape_or_val=True)\n\n def test_linear_operations(self):\n check_two_tensor_operation('dot', (4, 2), (2, 4), WITH_NP)\n check_two_tensor_operation('dot', (4, 2), (5, 2, 3), WITH_NP)\n\n check_two_tensor_operation('batch_dot', (4, 2, 3), (4, 5, 3),\n WITH_NP, cntk_two_dynamicity=True, axes=(2, 2))\n check_two_tensor_operation('batch_dot', (4, 2, 3), (4, 3),\n WITH_NP, cntk_two_dynamicity=True, axes=(2, 1))\n check_two_tensor_operation('batch_dot', (4, 2), (4, 2, 3),\n WITH_NP, cntk_two_dynamicity=True, axes=(1, 1))\n check_two_tensor_operation('batch_dot', (32, 20), (32, 20),\n WITH_NP, cntk_two_dynamicity=True, axes=1)\n check_two_tensor_operation('batch_dot', (32, 20), (32, 20),\n WITH_NP, cntk_two_dynamicity=True, axes=(1, 1))\n check_two_tensor_operation('batch_dot', (4, 2, 3), (4, 5, 3),\n WITH_NP, axes=(2, 2))\n check_two_tensor_operation('batch_dot', (4, 2, 3), (4, 3),\n WITH_NP, axes=(2, 1))\n check_two_tensor_operation('batch_dot', (4, 2), (4, 2, 3),\n WITH_NP, axes=(1, 1))\n check_two_tensor_operation('batch_dot', (32, 20), (32, 20),\n WITH_NP, axes=1)\n check_two_tensor_operation('batch_dot', (32, 20), (32, 20),\n WITH_NP, axes=(1, 1))\n\n check_single_tensor_operation('transpose', (4, 2), WITH_NP)\n check_single_tensor_operation('reverse', (4, 3, 2), WITH_NP, axes=1)\n if K.backend() != 'cntk':\n check_single_tensor_operation('reverse', (4, 3, 2), WITH_NP, axes=(1, 2))\n\n def test_random_variables(self):\n check_single_tensor_operation('random_uniform_variable', (2, 3), WITH_NP,\n low=0., high=1.,\n shape_or_val=False,\n assert_value_equality=False)\n check_single_tensor_operation('random_normal_variable', (2, 3), WITH_NP,\n mean=0., scale=1.,\n shape_or_val=False,\n assert_value_equality=False)\n\n def test_batch_dot_shape(self):\n # Note : batch_dot implementation is different for\n # placeholders and variables in CNTK backend\n\n test_cases = []\n test_cases.append([(None, 3, 4, 5), (None, 2, 3, 4), (2, 3)])\n test_cases.append([(None, 3, 4, 5), (None, 2, 4), 2])\n test_cases.append([(None, 3, 4), (None, 2, 3, 4), (2, 3)])\n test_cases.append([(None, 4, 3), (None, 3, 5), (2, 1)])\n test_cases.append([(None, 4), (None, 3, 4), (1, 2)])\n test_cases.append([(None, 4), (None, 4), None])\n\n batch_size = 7\n\n def batch_shape(shape):\n return (batch_size, ) + shape[1:]\n\n def random(shape):\n return np.random.random(batch_shape(shape))\n\n for x_shape, y_shape, axes in test_cases:\n x_np = random(x_shape)\n y_np = random(y_shape)\n z_np = KNP.batch_dot(x_np, y_np, axes)\n\n # test with placeholders\n x = K.placeholder(shape=x_shape)\n y = K.placeholder(shape=y_shape)\n z = K.batch_dot(x, y, axes)\n\n z_shape = K.int_shape(z)\n if z_shape is not None:\n assert z_shape[1:] == z_np.shape[1:]\n\n f = K.function([x, y], [z])\n\n assert_allclose(f([x_np, y_np])[0], z_np, atol=1e-05)\n\n # test with variables\n x = K.variable(x_np)\n y = K.variable(y_np)\n z = K.batch_dot(x, y, axes)\n\n z_shape = K.int_shape(z)\n if z_shape is not None:\n assert z_shape == z_np.shape\n\n z = K.eval(z)\n assert_allclose(z, z_np, atol=1e-05)\n\n def test_shape_operations(self):\n check_two_tensor_operation('concatenate', (4, 3), (4, 2), WITH_NP,\n axis=-1, concat_args=True)\n\n check_single_tensor_operation('reshape', (4, 2), WITH_NP, shape=(8, 1))\n check_single_tensor_operation('permute_dimensions', (4, 2, 3), WITH_NP,\n pattern=(2, 0, 1))\n check_single_tensor_operation('repeat', (4, 1), WITH_NP, n=3)\n check_single_tensor_operation('flatten', (4, 1), WITH_NP)\n check_single_tensor_operation('batch_flatten', (20, 2, 5), WITH_NP,\n cntk_dynamicity=True)\n check_single_tensor_operation('expand_dims', (4, 3), WITH_NP, axis=-1)\n check_single_tensor_operation('expand_dims', (4, 3, 2), WITH_NP, axis=1)\n check_single_tensor_operation('squeeze', (4, 3, 1), WITH_NP, axis=2)\n check_single_tensor_operation('squeeze', (4, 1, 1), WITH_NP, axis=1)\n check_composed_tensor_operations('reshape', {'shape': (4, 3, 1, 1)},\n 'squeeze', {'axis': 2},\n (4, 3, 1, 1), WITH_NP)\n\n @pytest.mark.skipif(K.backend() != 'theano',\n reason='We only test the shape inference of the '\n 'theano backend.')\n def test_none_shape_operations(self):\n # Test shape inference when input\n # shape has `None` entries\n x = K.placeholder((3, None, 4))\n\n y = K.batch_flatten(x)\n if hasattr(y, '_keras_shape'):\n assert y._keras_shape == (3, None)\n\n y = K.flatten(x)\n if hasattr(y, '_keras_shape'):\n assert y._keras_shape == (None,)\n\n def test_repeat_elements(self):\n reps = 3\n for ndims in [1, 2, 3]:\n shape = np.arange(2, 2 + ndims)\n arr = np.arange(np.prod(shape)).reshape(shape)\n\n for rep_axis in range(ndims):\n check_single_tensor_operation('repeat_elements', arr, WITH_NP,\n rep=reps, axis=rep_axis)\n\n if K.backend() != 'cntk':\n shape = list(shape)\n shape[rep_axis] = None\n x = K.placeholder(shape=shape)\n y = K.repeat_elements(x, reps, axis=rep_axis)\n assert y._keras_shape == tuple(shape)\n assert y._keras_shape == K.int_shape(y)\n\n def test_tile(self):\n shape = (3, 4)\n arr = np.arange(np.prod(shape)).reshape(shape)\n check_single_tensor_operation('tile', arr, WITH_NP, n=[2, 1])\n check_single_tensor_operation('tile', (2, 5), WITH_NP, n=[5, 2])\n\n # test theano shape inference when\n # input shape has None entries\n if K.backend() == 'theano':\n x = K.placeholder(shape=(None, 4))\n n = 2\n y = K.tile(x, n)\n assert y._keras_shape == (None, 8)\n n = (4, 3)\n y = K.tile(x, n)\n assert y._keras_shape == (None, 12)\n\n def test_gather(self):\n shape = (10, 2, 3)\n ref = np.arange(np.prod(shape)).reshape(shape)\n inds = [1, 3, 7, 9]\n t_list = [k.gather(k.variable(ref), k.variable(inds, dtype='int32'))\n for k in WITH_NP]\n z_list = [k.eval(k.gather(k.variable(ref), k.variable(inds, dtype='int32')))\n for k in WITH_NP]\n\n assert_list_pairwise(z_list)\n assert_list_keras_shape(t_list, z_list)\n\n # test theano shape inference when\n # input shape has None entries\n if K.backend() == 'theano':\n x = K.placeholder(shape=(None, 3, 4))\n indices = K.placeholder(shape=(5, 6), dtype='int32')\n y = K.gather(x, indices)\n assert y._keras_shape == (5, 6, 3, 4)\n\n @pytest.mark.parametrize('function_name',\n ['get_value', 'count_params',\n 'int_shape', 'get_variable_shape'])\n def test_value_manipulation(self, function_name):\n val = np.random.random((4, 2))\n v_list = [getattr(k, function_name)(k.variable(val))\n for k in WITH_NP]\n\n if function_name == 'get_value':\n assert_list_pairwise(v_list)\n else:\n assert_list_pairwise(v_list, shape=False, allclose=False, itself=True)\n\n def test_print_tensor(self):\n check_single_tensor_operation('print_tensor', (), WITH_NP)\n check_single_tensor_operation('print_tensor', (2,), WITH_NP)\n check_single_tensor_operation('print_tensor', (4, 3), WITH_NP)\n check_single_tensor_operation('print_tensor', (1, 2, 3), WITH_NP)\n\n def test_elementwise_operations(self):\n check_single_tensor_operation('max', (4, 2), WITH_NP)\n check_single_tensor_operation('max', (4, 2), WITH_NP, axis=1, keepdims=True)\n check_single_tensor_operation('max', (4, 2, 3), WITH_NP, axis=[1, -1])\n\n check_single_tensor_operation('min', (4, 2), WITH_NP)\n check_single_tensor_operation('min', (4, 2), WITH_NP, axis=1, keepdims=True)\n check_single_tensor_operation('min', (4, 2, 3), WITH_NP, axis=[1, -1])\n\n check_single_tensor_operation('mean', (4, 2), WITH_NP)\n check_single_tensor_operation('mean', (4, 2), WITH_NP, axis=1, keepdims=True)\n check_single_tensor_operation('mean', (4, 2, 3),\n WITH_NP, axis=-1, keepdims=True)\n check_single_tensor_operation('mean', (4, 2, 3), WITH_NP, axis=[1, -1])\n\n check_single_tensor_operation('var', (4, 2), WITH_NP)\n check_single_tensor_operation('var', (4, 2), WITH_NP, axis=1, keepdims=True)\n check_single_tensor_operation('var', (4, 2, 3), WITH_NP, axis=[1, -1])\n\n check_single_tensor_operation('std', (4, 2), WITH_NP)\n check_single_tensor_operation('std', (4, 2), WITH_NP, axis=1, keepdims=True)\n check_single_tensor_operation('std', (4, 2, 3), WITH_NP, axis=[1, -1])\n\n check_single_tensor_operation('logsumexp', (4, 2), WITH_NP)\n check_single_tensor_operation('logsumexp', (4, 2),\n WITH_NP, axis=1, keepdims=True)\n check_single_tensor_operation('logsumexp', (4, 2, 3), WITH_NP, axis=[1, -1])\n\n check_single_tensor_operation('prod', (4, 2), WITH_NP)\n check_single_tensor_operation('prod', (4, 2), WITH_NP, axis=1, keepdims=True)\n check_single_tensor_operation('prod', (4, 2, 3), WITH_NP, axis=[1, -1])\n\n check_single_tensor_operation('any', (4, 2), WITH_NP)\n check_single_tensor_operation('any', (4, 2), WITH_NP, axis=1, keepdims=True)\n check_single_tensor_operation('any', (4, 2, 3), WITH_NP, axis=[1, -1])\n\n check_single_tensor_operation('all', (4, 2), WITH_NP)\n check_single_tensor_operation('all', (4, 2), WITH_NP, axis=1, keepdims=True)\n check_single_tensor_operation('all', (4, 2, 3), WITH_NP, axis=[1, -1])\n\n check_single_tensor_operation('argmax', (4, 2), WITH_NP)\n check_single_tensor_operation('argmax', (4, 2), WITH_NP, axis=1)\n\n check_single_tensor_operation('argmin', (4, 2), WITH_NP)\n check_single_tensor_operation('argmin', (4, 2), WITH_NP, axis=1)\n\n check_single_tensor_operation('square', (4, 2), WITH_NP)\n check_single_tensor_operation('abs', (4, 2), WITH_NP)\n check_single_tensor_operation('sqrt', (4, 2), WITH_NP)\n check_single_tensor_operation('exp', (4, 2), WITH_NP)\n\n check_single_tensor_operation('round', (4, 2), WITH_NP)\n check_single_tensor_operation('sign', (4, 2), WITH_NP)\n check_single_tensor_operation('pow', (4, 2), WITH_NP, a=3)\n check_single_tensor_operation('clip', (4, 2), WITH_NP, min_value=0.4,\n max_value=0.6)\n\n check_single_tensor_operation('cos', (4, 2), WITH_NP)\n check_single_tensor_operation('sin', (4, 2), WITH_NP)\n\n # two-tensor ops\n check_two_tensor_operation('equal', (4, 2), (4, 2), WITH_NP)\n check_two_tensor_operation('not_equal', (4, 2), (4, 2), WITH_NP)\n check_two_tensor_operation('greater', (4, 2), (4, 2), WITH_NP)\n check_two_tensor_operation('greater_equal', (4, 2), (4, 2), WITH_NP)\n check_two_tensor_operation('less', (4, 2), (4, 2), WITH_NP)\n check_two_tensor_operation('less_equal', (4, 2), (4, 2), WITH_NP)\n check_two_tensor_operation('maximum', (4, 2), (4, 2), WITH_NP)\n check_two_tensor_operation('minimum', (4, 2), (4, 2), WITH_NP)\n\n @pytest.mark.skipif(K.backend() == 'cntk', reason='cntk does not support '\n 'cumsum and cumprod yet')\n def test_cumsum_cumprod(self):\n check_single_tensor_operation('cumsum', (4, 2), WITH_NP)\n check_single_tensor_operation('cumsum', (4, 2), WITH_NP, axis=1)\n\n check_single_tensor_operation('cumprod', (4, 2), WITH_NP)\n check_single_tensor_operation('cumprod', (4, 2), WITH_NP, axis=1)\n\n @pytest.mark.skipif(K.backend() == 'cntk',\n reason='cntk return -85.1 for zero or '\n 'negative number, not nan, so can\\'t '\n 'compare with other backend.')\n def test_log(self):\n check_single_tensor_operation('log', (4, 2), WITH_NP)\n\n @pytest.mark.skipif(K.backend() == 'cntk',\n reason='cntk doesn\\'t support gradient in this way.')\n def test_gradient(self):\n val = np.random.random((4, 2))\n x_list = [k.variable(val) for k in [KTH, KTF]]\n z_list = []\n zero_list = []\n for x, k in zip(x_list, [KTH, KTF]):\n exp = x * k.exp(x)\n loss = k.sum(exp)\n zero_loss = k.stop_gradient(loss)\n grad = k.gradients(loss, [exp])\n zero_grad = k.gradients(loss + zero_loss, [exp])\n z_list.append(k.eval(grad[0]))\n zero_list.append(k.eval(zero_grad[0]))\n\n assert_list_pairwise(z_list)\n assert_list_pairwise(zero_list)\n for i in range(len(z_list)):\n assert_allclose(zero_list[i], z_list[i], atol=1e-05)\n\n def test_stop_gradient(self):\n # This test checks the consistency of the stop_gradient backend API.\n # It doesn't check the functionality (which is checked at the\n # test_gradient test).\n val = np.random.random((4, 2))\n a = K.variable(val)\n b = K.square(a)\n c, d = K.stop_gradient([a, b])\n e = K.stop_gradient(b)\n\n @pytest.mark.skipif(K.backend() == 'cntk',\n reason='cntk currently not support function in this '\n 'way, so can\\'t test as this.')\n def test_function(self):\n test_backend = [KTH, KTF]\n val = np.random.random((4, 2))\n input_val = np.random.random((4, 2))\n\n f_list = []\n x_list = []\n for k in test_backend:\n x = k.variable(val)\n x_list.append(x)\n y = k.placeholder(ndim=2)\n exp = k.square(x) + y\n update = x * 2\n f = k.function([y], [exp], updates=[(x, update)])\n f_list.append(f)\n\n function_outputs_list = [f([input_val])[0] for f in f_list]\n assert_list_pairwise(function_outputs_list)\n\n new_val_list = [k.get_value(x) for x, k in zip(x_list, test_backend)]\n assert_list_pairwise(new_val_list)\n\n @pytest.mark.skipif(K.backend() != 'tensorflow',\n reason='Uses the `fetches` argument.')\n def test_function_tf_fetches(self):\n # Additional operations can be passed to tf.Session().run() via its\n # `fetches` arguments. In contrast to `updates` argument of\n # KTF.function() these do not have control dependency on `outputs`, so\n # they can run in parallel. Also they should not contribute to output of\n # KTF.function().\n\n x = K.variable(0.)\n y = K.variable(0.)\n x_placeholder = K.placeholder(shape=())\n y_placeholder = K.placeholder(shape=())\n\n f = K.function(inputs=[x_placeholder, y_placeholder],\n outputs=[x_placeholder + y_placeholder],\n updates=[(x, x_placeholder + 1.)],\n fetches=[K.update(y, 5.)])\n output = f([10., 20.])\n assert output == [30.]\n assert K.get_session().run(fetches=[x, y]) == [11., 5.]\n\n @pytest.mark.skipif(K.backend() != 'tensorflow',\n reason='Uses the `feed_dict` argument.')\n def test_function_tf_feed_dict(self):\n # Additional substitutions can be passed to `tf.Session().run()` via its\n # `feed_dict` arguments. Note that the feed_dict is passed once in the\n # constructor but we can modify the values in the dictionary. Through\n # this feed_dict we can provide additional substitutions besides Keras\n # inputs.\n\n x = K.variable(0.)\n y = K.variable(0.)\n x_placeholder = K.placeholder(shape=())\n y_placeholder = K.placeholder(shape=())\n\n feed_dict = {y_placeholder: 3.}\n\n f = K.function(inputs=[x_placeholder],\n outputs=[x_placeholder + 1.],\n updates=[(x, x_placeholder + 10.)],\n feed_dict=feed_dict,\n fetches=[K.update(y, y_placeholder * 10.)])\n output = f([10.])\n assert output == [11.]\n assert K.get_session().run(fetches=[x, y]) == [20., 30.]\n\n # updated value in feed_dict will be modified within the K.function()\n feed_dict[y_placeholder] = 4.\n output = f([20.])\n assert output == [21.]\n assert K.get_session().run(fetches=[x, y]) == [30., 40.]\n\n @pytest.mark.skipif(K.backend() != 'tensorflow',\n reason='Uses the `options` and `run_metadata` arguments.')\n def test_function_tf_run_options_with_run_metadata(self):\n from tensorflow.core.protobuf import config_pb2\n x_placeholder = K.placeholder(shape=())\n y_placeholder = K.placeholder(shape=())\n\n run_options = config_pb2.RunOptions(output_partition_graphs=True)\n run_metadata = config_pb2.RunMetadata()\n # enable run_options.\n f = K.function(inputs=[x_placeholder, y_placeholder],\n outputs=[x_placeholder + y_placeholder],\n options=run_options,\n run_metadata=run_metadata)\n output = f([10., 20.])\n assert output == [30.]\n assert len(run_metadata.partition_graphs) > 0\n # disable run_options.\n f = K.function(inputs=[x_placeholder, y_placeholder],\n outputs=[x_placeholder + y_placeholder],\n run_metadata=run_metadata)\n output = f([10., 20.])\n assert output == [30.]\n assert len(run_metadata.partition_graphs) == 0\n\n @pytest.mark.skipif(K.backend() != 'tensorflow',\n reason='Uses the `string` type for a tensor.')\n def test_function_tf_string_input(self):\n # Test functions with string inputs.\n\n x_placeholder = K.placeholder(shape=(), dtype=\"string\")\n x_identity = K.identity(x_placeholder)\n\n f = K.function(inputs=[x_placeholder], outputs=[x_identity])\n output = f([b'test'])\n assert output == [b'test']\n\n def test_rnn(self):\n # implement a simple RNN\n num_samples = 4\n input_dim = 5\n output_dim = 3\n timesteps = 6\n\n _, x = parse_shape_or_val((num_samples, timesteps, input_dim))\n _, h0 = parse_shape_or_val((num_samples, output_dim))\n _, wi = parse_shape_or_val((input_dim, output_dim))\n _, wh = parse_shape_or_val((output_dim, output_dim))\n mask = np.random.randint(2, size=(num_samples, timesteps))\n\n x_k = K.variable(x)\n h0_k = [K.variable(h0)]\n wi_k = K.variable(wi)\n wh_k = K.variable(wh)\n mask_k = K.variable(mask)\n\n def rnn_fn(x_k, h_k):\n assert len(h_k) == 1\n y_k = K.dot(x_k, wi_k) + K.dot(h_k[0], wh_k)\n return y_k, [y_k]\n\n # test default setup\n last_output_list = []\n outputs_list = []\n state_list = []\n\n kwargs_list = [\n {'go_backwards': False, 'mask': None},\n {'go_backwards': False, 'mask': None, 'unroll': True,\n 'input_length': timesteps},\n {'go_backwards': True, 'mask': None},\n {'go_backwards': True, 'mask': None, 'unroll': True,\n 'input_length': timesteps},\n {'go_backwards': False, 'mask': mask_k},\n {'go_backwards': False, 'mask': mask_k, 'unroll': True,\n 'input_length': timesteps},\n ]\n\n for (i, kwargs) in enumerate(kwargs_list):\n last_y1, y1, h1 = KNP.rnn(x, [wi, wh, None], h0, **kwargs)\n last_y2, y2, h2 = K.rnn(rnn_fn, x_k, h0_k, **kwargs)\n\n assert len(h2) == 1\n last_y2 = K.eval(last_y2)\n y2 = K.eval(y2)\n h1 = h1[:, -1]\n h2 = K.eval(h2[0])\n\n if kwargs['mask'] is not None:\n last_y1 = last_y1 * np.expand_dims(mask[:, -1], -1)\n last_y2 = last_y2 * np.expand_dims(mask[:, -1], -1)\n y1 = y1 * np.expand_dims(mask, -1)\n y2 = y2 * np.expand_dims(mask, -1)\n h1 = h1 * np.expand_dims(mask[:, -1], -1)\n h2 = h2 * np.expand_dims(mask[:, -1], -1)\n\n last_output_list.append(last_y2)\n outputs_list.append(y2)\n state_list.append(h2)\n\n if i % 2 == 0:\n assert_allclose(last_y1, last_y2, atol=1e-05)\n assert_allclose(y1, y2, atol=1e-05)\n assert_allclose(h1, h2, atol=1e-05)\n else:\n assert_allclose(last_output_list[i - 1], last_output_list[i],\n atol=1e-05)\n assert_allclose(outputs_list[i - 1], outputs_list[i], atol=1e-05)\n assert_allclose(state_list[i - 1], state_list[i], atol=1e-05)\n\n def test_rnn_additional_states(self):\n # implement a simple RNN with an additional state\n # whose shape is different from that of the output\n num_samples = 4\n input_dim = 5\n output_dim = 3\n timesteps = 6\n\n _, x = parse_shape_or_val((num_samples, timesteps, input_dim))\n _, h0 = parse_shape_or_val((num_samples, output_dim))\n _, wi = parse_shape_or_val((input_dim, output_dim))\n _, wh = parse_shape_or_val((output_dim, output_dim))\n mask = np.random.randint(2, size=(num_samples, timesteps))\n\n x_k = K.variable(x)\n h0_k = [K.variable(h0), K.variable(np.concatenate([h0, h0], axis=-1))]\n wi_k = K.variable(wi)\n wh_k = K.variable(wh)\n mask_k = K.variable(mask)\n\n def rnn_fn(x_k, h_k):\n assert len(h_k) == 2\n y_k = K.dot(x_k, wi_k) + K.dot(h_k[0], wh_k)\n return y_k, [y_k, K.concatenate([y_k, y_k], axis=-1)]\n\n # test default setup\n last_output_list = []\n outputs_list = []\n state_list = []\n\n kwargs_list = [\n {'go_backwards': False, 'mask': None},\n {'go_backwards': False, 'mask': None, 'unroll': True,\n 'input_length': timesteps},\n {'go_backwards': True, 'mask': None},\n {'go_backwards': True, 'mask': None, 'unroll': True,\n 'input_length': timesteps},\n {'go_backwards': False, 'mask': mask_k},\n {'go_backwards': False, 'mask': mask_k, 'unroll': True,\n 'input_length': timesteps},\n ]\n\n for (i, kwargs) in enumerate(kwargs_list):\n last_y1, y1, h1 = KNP.rnn(x, [wi, wh, None], h0, **kwargs)\n last_y2, y2, h2 = K.rnn(rnn_fn, x_k, h0_k, **kwargs)\n\n assert len(h2) == 2\n last_y2 = K.eval(last_y2)\n y2 = K.eval(y2)\n h11 = h1[:, -1]\n h12 = np.concatenate([h1[:, -1], h1[:, -1]], axis=-1)\n h21 = K.eval(h2[0])\n h22 = K.eval(h2[1])\n\n if kwargs['mask'] is not None:\n last_y1 = last_y1 * np.expand_dims(mask[:, -1], -1)\n last_y2 = last_y2 * np.expand_dims(mask[:, -1], -1)\n y1 = y1 * np.expand_dims(mask, -1)\n y2 = y2 * np.expand_dims(mask, -1)\n h11 = h11 * np.expand_dims(mask[:, -1], -1)\n h21 = h21 * np.expand_dims(mask[:, -1], -1)\n h12 = h12 * np.expand_dims(mask[:, -1], -1)\n h22 = h22 * np.expand_dims(mask[:, -1], -1)\n\n last_output_list.append(last_y2)\n outputs_list.append(y2)\n state_list.append((h21, h22))\n\n if i % 2 == 0:\n assert_allclose(last_y1, last_y2, atol=1e-05)\n assert_allclose(y1, y2, atol=1e-05)\n assert_allclose(h11, h21, atol=1e-05)\n assert_allclose(h12, h22, atol=1e-05)\n else:\n assert_allclose(last_output_list[i - 1], last_output_list[i],\n atol=1e-05)\n assert_allclose(outputs_list[i - 1], outputs_list[i], atol=1e-05)\n assert_allclose(state_list[i - 1][0], state_list[i][0], atol=1e-05)\n assert_allclose(state_list[i - 1][1], state_list[i][1], atol=1e-05)\n\n def test_rnn_no_states(self):\n # implement a simple RNN without states\n input_dim = 8\n output_dim = 4\n timesteps = 5\n\n _, x = parse_shape_or_val((32, timesteps, input_dim))\n _, wi = parse_shape_or_val((input_dim, output_dim))\n\n x_k = K.variable(x)\n wi_k = K.variable(wi)\n\n def rnn_fn(x_k, h_k):\n assert len(h_k) == 0\n y_k = K.dot(x_k, wi_k)\n return y_k, []\n\n last_y1, y1, h1 = KNP.rnn(x, [wi, None, None], None,\n go_backwards=False, mask=None)\n last_y2, y2, h2 = K.rnn(rnn_fn, x_k, [],\n go_backwards=False, mask=None)\n\n assert len(h2) == 0\n last_y2 = K.eval(last_y2)\n y2 = K.eval(y2)\n\n assert_allclose(last_y1, last_y2, atol=1e-05)\n assert_allclose(y1, y2, atol=1e-05)\n\n def test_rnn_output_and_state_masking_independent(self):\n num_samples = 2\n num_timesteps = 4\n state_and_io_size = 5\n mask_last_num_timesteps = 2 # for second sample only\n\n # a step function that just outputs inputs,\n # but increments states +1 per timestep\n def step_function(inputs, states):\n return inputs, [s + 1 for s in states]\n\n inputs_vals = np.random.random(\n (num_samples, num_timesteps, state_and_io_size))\n initial_state_vals = np.random.random((num_samples, state_and_io_size))\n # masking of two last timesteps for second sample only\n mask_vals = np.ones((num_samples, num_timesteps))\n mask_vals[1, -mask_last_num_timesteps:] = 0\n\n # outputs expected to be same as inputs for the first sample\n expected_outputs = inputs_vals.copy()\n # but for the second sample all outputs in masked region should be the same\n # as last output before masked region\n expected_outputs[1, -mask_last_num_timesteps:] = \\\n expected_outputs[1, -(mask_last_num_timesteps + 1)]\n\n expected_state = initial_state_vals.copy()\n # first state should be incremented for every timestep (no masking)\n expected_state[0] += num_timesteps\n # second state should not be incremented for last two timesteps\n expected_state[1] += (num_timesteps - mask_last_num_timesteps)\n\n # verify same expected output for `unroll=true/false`\n inputs = K.variable(inputs_vals)\n initial_states = [K.variable(initial_state_vals)]\n mask = K.variable(mask_vals)\n for unroll in [True, False]:\n last_output, outputs, last_states = K.rnn(\n step_function,\n inputs,\n initial_states,\n mask=mask,\n unroll=unroll,\n input_length=num_timesteps if unroll else None)\n\n assert_allclose(K.eval(outputs), expected_outputs)\n assert_allclose(K.eval(last_states[0]), expected_state)\n\n @pytest.mark.skipif(K.backend() == 'cntk', reason='Not supported')\n def test_rnn_output_num_dim_larger_than_2_masking(self):\n num_samples = 3\n num_timesteps = 4\n num_features = 5\n\n def step_function(inputs, states):\n outputs = K.tile(K.expand_dims(inputs), [1, 1, 2])\n return outputs, states\n\n inputs_vals = np.random.random((num_samples, num_timesteps, num_features))\n initial_state_vals = np.random.random((num_samples, 6))\n mask_vals = np.ones((num_samples, num_timesteps))\n mask_vals[-1, -1] = 0 # final timestep masked for last sample\n\n expected_outputs = np.repeat(inputs_vals[..., None], repeats=2, axis=-1)\n # for the last sample, the final timestep (in masked region) should be the\n # same as the second to final output (before masked region)\n expected_outputs[-1, -1] = expected_outputs[-1, -2]\n\n inputs = K.variable(inputs_vals)\n initial_states = [K.variable(initial_state_vals)]\n mask = K.variable(mask_vals)\n for unroll in [True, False]:\n last_output, outputs, last_states = K.rnn(\n step_function,\n inputs,\n initial_states,\n mask=mask,\n unroll=unroll,\n input_length=num_timesteps if unroll else None)\n\n assert_allclose(K.eval(outputs), expected_outputs)\n\n @pytest.mark.skipif(K.backend() == 'cntk', reason='Not supported')\n def test_rnn_state_num_dim_larger_than_2_masking(self):\n num_samples = 3\n num_timesteps = 4\n\n def step_function(inputs, states):\n return inputs, [s + 1 for s in states]\n\n inputs_vals = np.random.random((num_samples, num_timesteps, 5))\n initial_state_vals = np.random.random((num_samples, 6, 7))\n mask_vals = np.ones((num_samples, num_timesteps))\n mask_vals[0, -2:] = 0 # final two timesteps masked for first sample\n\n expected_last_state = initial_state_vals.copy()\n expected_last_state[0] += (num_timesteps - 2)\n expected_last_state[1:] += num_timesteps\n\n inputs = K.variable(inputs_vals)\n initial_states = [K.variable(initial_state_vals)]\n mask = K.variable(mask_vals)\n for unroll in [True, False]:\n last_output, outputs, last_states = K.rnn(\n step_function,\n inputs,\n initial_states,\n mask=mask,\n unroll=unroll,\n input_length=num_timesteps if unroll else None)\n\n # not updated last timestep:\n assert_allclose(K.eval(last_states[0]), expected_last_state)\n\n @pytest.mark.parametrize('x_np,axis,keepdims', [\n (np.array([1.1, 0.8, 0.9]), 0, False),\n (np.array([[1.1, 0.8, 0.9]]), 0, False),\n (np.array([[1.1, 0.8, 0.9]]), 1, False),\n (np.array([[1.1, 0.8, 0.9]]), -1, False),\n (np.array([[1.1, 0.8, 0.9]]), 1, True),\n (np.array([[1.1], [1.2]]), 0, False),\n (np.array([[1.1], [1.2]]), 1, False),\n (np.array([[1.1], [1.2]]), -1, False),\n (np.array([[1.1], [1.2]]), -1, True),\n (np.array([[1.1, 1.2, 1.3], [0.9, 0.7, 1.4]]), None, False),\n (np.array([[1.1, 1.2, 1.3], [0.9, 0.7, 1.4]]), 0, False),\n (np.array([[1.1, 1.2, 1.3], [0.9, 0.7, 1.4]]), 1, False),\n (np.array([[1.1, 1.2, 1.3], [0.9, 0.7, 1.4]]), -1, False),\n ])\n def test_logsumexp(self, x_np, axis, keepdims):\n '''\n Check if K.logsumexp works properly for values close to one.\n '''\n x = K.variable(x_np)\n assert_allclose(K.eval(K.logsumexp(x, axis=axis, keepdims=keepdims)),\n np.log(np.sum(np.exp(x_np), axis=axis, keepdims=keepdims)),\n rtol=1e-5)\n\n @pytest.mark.skipif(K.backend() != 'tensorflow',\n reason='The optimization is applied only with TensorFlow.')\n def test_logsumexp_optim(self):\n '''\n Check if optimization works.\n '''\n x_np = np.array([1e+4, 1e-4])\n result = K.eval(K.logsumexp(K.variable(x_np), axis=0))\n assert_allclose(result, 1e4, rtol=1e-5)\n\n def test_switch(self):\n # scalar\n val = np.random.random()\n z_list = []\n for k in WITH_NP:\n x = k.variable(val)\n x = k.switch(k.greater_equal(x, 0.5), x * 0.1, x * 0.2)\n z_list.append(k.eval(x))\n assert_list_pairwise(z_list)\n # non scalar\n shapes = []\n shapes.append([(4, 3, 2), (4, 3, 2), (4, 3, 2)])\n shapes.append([(4, 3,), (4, 3, 2), (4, 3, 2)])\n shapes.append([(4,), (4, 3, 2), (4, 3, 2)])\n for s in shapes:\n z_list = []\n arrays = list(map(np.random.random, s))\n for k in WITH_NP:\n x, then_expr, else_expr = map(k.variable, arrays)\n cond = k.greater_equal(x, 0.5)\n z_list.append(k.eval(k.switch(cond, then_expr, else_expr)))\n assert_list_pairwise(z_list)\n\n def test_dropout(self):\n val = np.random.random((100, 100))\n z_list = [k.eval(k.dropout(k.variable(val), level=0.2))\n for k in WITH_NP]\n assert_list_pairwise(z_list, allclose=False)\n # dropout patterns are different, only check mean\n for i in range(len(z_list) - 1):\n assert np.abs(z_list[i].mean() - z_list[i + 1].mean()) < 0.05\n\n z_list = [k.eval(k.dropout(k.variable(val), level=0.2,\n noise_shape=list(val.shape)))\n for k in WITH_NP]\n assert_list_pairwise(z_list, allclose=False)\n # dropout patterns are different, only check mean\n for i in range(len(z_list) - 1):\n assert np.abs(z_list[i].mean() - z_list[i + 1].mean()) < 0.05\n\n # Test invalid use cases\n with pytest.raises(ValueError):\n z = K.dropout(K.variable(val), level=-0.5)\n\n @pytest.mark.parametrize('alpha,max_value,threshold', [\n (0.0, None, 0.0), # standard relu\n (0.1, None, 0.0), # set alpha only\n (0.0, 5.0, 0.0), # set max_value only\n (0.0, None, 0.8), # set threshold only\n (0.1, 5.0, 0.0), # set alpha and max_value\n (0.1, None, 0.8), # set alpha and threshold\n (0.0, 5.0, 0.8), # set max_value and threshold\n (0.1, 5.0, 0.8), # set all\n (0.1, 0.0, 0.8), # max_value is zero\n (0.1, 5.0, -2.8), # threshold is negative\n (0.1, 9.0, 0.8), # max_value > 6\n ])\n def test_relu(self, alpha, max_value, threshold):\n check_single_tensor_operation('relu', (4, 2), WITH_NP, alpha=alpha,\n max_value=max_value, threshold=threshold)\n\n def test_nn_operations(self):\n check_single_tensor_operation('softplus', (4, 10), WITH_NP)\n check_single_tensor_operation('elu', (4, 10), WITH_NP, alpha=0.5)\n\n check_single_tensor_operation('sigmoid', (4, 2), WITH_NP)\n check_single_tensor_operation('hard_sigmoid', (4, 2), WITH_NP)\n check_single_tensor_operation('tanh', (4, 2), WITH_NP)\n\n check_single_tensor_operation('softmax', (4, 10), WITH_NP)\n check_single_tensor_operation('softmax', (4, 5, 3), WITH_NP, axis=1)\n check_single_tensor_operation('softmax', (4, 5, 3, 10), WITH_NP, axis=2)\n\n check_two_tensor_operation('binary_crossentropy', (4, 2), (4, 2),\n WITH_NP, from_logits=True)\n # cross_entropy call require the label is a valid probability distribution,\n # otherwise it is garbage in garbage out...\n # due to the algo difference, we can't guarantee CNTK has the same result\n # on the garbage input.\n # so create a separate test case for valid label input\n if K.backend() != 'cntk':\n check_two_tensor_operation('categorical_crossentropy', (4, 2), (4, 2),\n WITH_NP, from_logits=True)\n xval = np.asarray([[0.26157712, 0.0432167], [-0.43380741, 0.30559841],\n [0.20225059, -0.38956559], [-0.13805378, 0.08506755]],\n dtype=np.float32)\n yval = np.asarray([[0.46221867, 0.53778133], [0.51228984, 0.48771016],\n [0.64916514, 0.35083486], [0.47028078, 0.52971922]],\n dtype=np.float32)\n check_two_tensor_operation('categorical_crossentropy', yval, xval, WITH_NP,\n cntk_two_dynamicity=True, from_logits=True)\n check_two_tensor_operation('binary_crossentropy', (4, 2), (4, 2),\n WITH_NP, from_logits=False)\n check_two_tensor_operation('categorical_crossentropy', (4, 2), (4, 2),\n WITH_NP, from_logits=False)\n\n check_single_tensor_operation('l2_normalize', (4, 3), WITH_NP, axis=-1)\n check_single_tensor_operation('l2_normalize', (4, 3), WITH_NP, axis=1)\n\n def test_in_top_k(self):\n batch_size = 20\n num_classes = 10\n\n # Random prediction test case\n predictions = np.random.random((batch_size, num_classes)).astype('float32')\n targets = np.random.randint(num_classes, size=batch_size, dtype='int32')\n\n # (k == 0 or k > num_classes) does not raise an error\n # but just return an unmeaningful tensor.\n for k in range(num_classes + 1):\n z_list = [b.eval(b.in_top_k(b.variable(predictions, dtype='float32'),\n b.variable(targets, dtype='int32'), k))\n for b in [KTH, KTF]]\n assert_list_pairwise(z_list)\n\n # Identical prediction test case:\n # randomly set half of the predictions to an identical value\n num_identical = num_classes // 2\n for i in range(batch_size):\n idx_identical = np.random.choice(num_classes,\n size=num_identical, replace=False)\n predictions[i, idx_identical] = predictions[i, 0]\n targets = np.zeros(batch_size, dtype='int32')\n\n for k in range(1, num_classes + 1):\n z_list = [b.eval(b.in_top_k(b.variable(predictions, dtype='float32'),\n b.variable(targets, dtype='int32'), k))\n for b in [KTH, KTF]]\n assert_list_pairwise(z_list)\n\n @pytest.mark.parametrize('op,input_shape,kernel_shape,padding,data_format', [\n ('conv1d', (2, 8, 2), (3, 2, 3), 'same', 'channels_last'),\n ('conv1d', (1, 8, 2), (3, 2, 3), 'valid', 'channels_last'),\n ('conv1d', (1, 2, 8), (3, 2, 3), 'valid', 'channels_first'),\n ('conv2d', (2, 3, 4, 5), (3, 3, 3, 2), 'same', 'channels_first'),\n ('conv2d', (2, 3, 5, 6), (4, 3, 3, 4), 'valid', 'channels_first'),\n ('conv2d', (1, 6, 5, 3), (3, 4, 3, 2), 'valid', 'channels_last'),\n ('conv2d', (1, 7, 6, 3), (3, 3, 3, 4), 'same', 'channels_last'),\n ('conv3d', (2, 3, 4, 5, 4), (3, 3, 3, 3, 4), 'same', 'channels_first'),\n ('conv3d', (2, 3, 5, 4, 6), (3, 2, 4, 3, 4), 'valid', 'channels_first'),\n ('conv3d', (1, 2, 2, 2, 1), (2, 2, 2, 1, 1), 'valid', 'channels_last'),\n ('conv3d', (1, 3, 5, 4, 2), (3, 3, 3, 2, 3), 'same', 'channels_last'),\n ])\n def test_conv(self, op, input_shape, kernel_shape, padding, data_format):\n check_two_tensor_operation(\n op, input_shape, kernel_shape, WITH_NP,\n padding=padding, data_format=data_format,\n cntk_dynamicity=True)\n\n @pytest.mark.parametrize(\n 'op,input_shape,kernel_shape,output_shape,padding,data_format', [\n ('conv2d_transpose', (2, 5, 6, 3), (3, 3, 2, 3), (2, 5, 6, 2),\n 'same', 'channels_last'),\n ('conv2d_transpose', (2, 3, 8, 9), (3, 3, 2, 3), (2, 2, 8, 9),\n 'same', 'channels_first'),\n ])\n def test_conv_transpose(self,\n op,\n input_shape,\n kernel_shape,\n output_shape,\n padding,\n data_format):\n check_two_tensor_operation(\n op, input_shape, kernel_shape, WITH_NP,\n output_shape=output_shape, padding=padding, data_format=data_format,\n cntk_dynamicity=True)\n\n @pytest.mark.skipif((K.backend() == 'cntk' and K.dev.type() == 0),\n reason='cntk only supports dilated conv on GPU')\n @pytest.mark.parametrize(\n 'op,input_shape,kernel_shape,padding,data_format,dilation_rate', [\n ('conv1d', (2, 8, 3), (4, 3, 2), 'valid', 'channels_last', 2),\n ('conv1d', (2, 3, 8), (4, 3, 2), 'valid', 'channels_first', 2),\n ('conv2d', (2, 8, 9, 3), (3, 3, 3, 2),\n 'same', 'channels_last', (2, 2)),\n ('conv2d', (2, 3, 9, 8), (4, 3, 3, 4),\n 'valid', 'channels_first', (2, 2)),\n ('conv3d', (2, 5, 4, 6, 3), (2, 2, 3, 3, 4),\n 'valid', 'channels_last', (2, 2, 2)),\n ('conv3d', (2, 3, 5, 4, 6), (2, 2, 3, 3, 4),\n 'same', 'channels_first', (2, 2, 2)),\n ])\n def test_dilated_conv(self,\n op,\n input_shape,\n kernel_shape,\n padding,\n data_format,\n dilation_rate):\n check_two_tensor_operation(\n op, input_shape, kernel_shape, WITH_NP,\n padding=padding, data_format=data_format,\n dilation_rate=dilation_rate, cntk_dynamicity=True)\n\n @pytest.mark.skipif((K.backend() == 'cntk' and K.dev.type() == 0),\n reason='cntk only supports dilated conv transpose on GPU')\n @pytest.mark.parametrize(\n 'op,input_shape,kernel_shape,output_shape,padding,data_format,dilation_rate',\n [\n ('conv2d_transpose', (2, 5, 6, 3), (3, 3, 2, 3), (2, 5, 6, 2),\n 'same', 'channels_last', (2, 2)),\n ('conv2d_transpose', (2, 3, 8, 9), (3, 3, 2, 3), (2, 2, 8, 9),\n 'same', 'channels_first', (2, 2)),\n ])\n def test_dilated_conv_transpose(self,\n op,\n input_shape,\n kernel_shape,\n output_shape,\n padding,\n data_format,\n dilation_rate):\n check_two_tensor_operation(\n op, input_shape, kernel_shape, WITH_NP, output_shape=output_shape,\n padding=padding, data_format=data_format, dilation_rate=dilation_rate,\n cntk_dynamicity=True)\n\n @pytest.mark.parametrize('op,input_shape,kernel_shape,padding,data_format', [\n ('depthwise_conv2d', (2, 3, 4, 5), (3, 3, 3, 2), 'same', 'channels_first'),\n ('depthwise_conv2d', (2, 3, 5, 6), (4, 3, 3, 4), 'valid', 'channels_first'),\n ('depthwise_conv2d', (1, 6, 5, 3), (3, 4, 3, 2), 'valid', 'channels_last'),\n ('depthwise_conv2d', (1, 7, 6, 3), (3, 3, 3, 4), 'same', 'channels_last'),\n ])\n def test_depthwise_conv(self,\n op,\n input_shape,\n kernel_shape,\n padding,\n data_format):\n check_two_tensor_operation(\n op, input_shape, kernel_shape, WITH_NP,\n padding=padding, data_format=data_format,\n cntk_dynamicity=True)\n\n @pytest.mark.parametrize(\n 'op,input_shape,pool_size,strides,padding,data_format,pool_mode', [\n ('pool2d', (2, 3, 7, 7), (3, 3), (1, 1),\n 'same', 'channels_first', 'avg'),\n ('pool2d', (3, 3, 8, 5), (2, 3), (1, 1),\n 'valid', 'channels_first', 'max'),\n ('pool2d', (2, 9, 5, 3), (3, 2), (1, 1),\n 'valid', 'channels_last', 'avg'),\n ('pool2d', (3, 6, 7, 3), (3, 3), (1, 1),\n 'same', 'channels_last', 'max'),\n ('pool3d', (2, 3, 7, 7, 7), (3, 3, 3), (1, 1, 1),\n 'same', 'channels_first', 'avg'),\n ('pool3d', (3, 3, 8, 5, 9), (2, 3, 2), (1, 1, 1),\n 'valid', 'channels_first', 'max'),\n ('pool3d', (2, 8, 9, 5, 3), (3, 2, 3), (1, 1, 1),\n 'valid', 'channels_last', 'avg'),\n ('pool3d', (3, 5, 6, 7, 3), (3, 3, 3), (1, 1, 1),\n 'same', 'channels_last', 'max'),\n ])\n def test_pool(self,\n op,\n input_shape,\n pool_size,\n strides,\n padding,\n data_format,\n pool_mode):\n check_single_tensor_operation(\n op, input_shape, WITH_NP,\n pool_size=pool_size, strides=strides,\n padding=padding, data_format=data_format, pool_mode=pool_mode,\n cntk_dynamicity=True)\n\n @pytest.mark.parametrize(\n 'op,input_shape,kernel_shape,depth_multiplier,padding,data_format', [\n ('separable_conv1d', (2, 8, 2), (3,), 1, 'same', 'channels_last'),\n ('separable_conv1d', (1, 8, 2), (3,), 2, 'valid', 'channels_last'),\n ('separable_conv2d', (2, 3, 4, 5), (3, 3), 1, 'same', 'channels_first'),\n ('separable_conv2d', (2, 3, 5, 6), (4, 3), 2, 'valid', 'channels_first'),\n ('separable_conv2d', (1, 6, 5, 3), (3, 4), 1, 'valid', 'channels_last'),\n ('separable_conv2d', (1, 7, 6, 3), (3, 3), 2, 'same', 'channels_last'),\n ])\n def test_separable_conv(self,\n op,\n input_shape,\n kernel_shape,\n depth_multiplier,\n padding,\n data_format):\n if data_format == 'channels_first':\n input_depth = input_shape[1]\n else:\n input_depth = input_shape[-1]\n _, x = parse_shape_or_val(input_shape)\n _, depthwise = parse_shape_or_val(kernel_shape +\n (input_depth, depth_multiplier))\n _, pointwise = parse_shape_or_val((1,) * len(kernel_shape) +\n (input_depth * depth_multiplier, 7))\n y1 = KNP.separable_conv(x, depthwise, pointwise,\n padding=padding, data_format=data_format)\n if K.backend() == 'cntk':\n _, cntk_func = cntk_func_tensors(\n op, [input_shape, depthwise, pointwise],\n padding=padding, data_format=data_format)\n y2 = cntk_func([x])[0]\n else:\n y2 = K.eval(getattr(K, op)(\n K.variable(x),\n K.variable(depthwise), K.variable(pointwise),\n padding=padding, data_format=data_format))\n assert_allclose(y1, y2, atol=1e-05)\n\n def test_random_normal(self):\n # test standard normal as well as a normal with a different set of parameters\n for mean, std in [(0., 1.), (-10., 5.)]:\n rand = K.eval(K.random_normal((300, 200),\n mean=mean, stddev=std, seed=1337))\n assert rand.shape == (300, 200)\n assert np.abs(np.mean(rand) - mean) < std * 0.015\n assert np.abs(np.std(rand) - std) < std * 0.015\n\n # test that random_normal also generates different values when used\n # within a function\n r = K.random_normal((10, 10), mean=mean, stddev=std, seed=1337)\n samples = np.array([K.eval(r) for _ in range(200)])\n assert np.abs(np.mean(samples) - mean) < std * 0.015\n assert np.abs(np.std(samples) - std) < std * 0.015\n\n def test_random_uniform(self):\n min_val = -1.\n max_val = 1.\n rand = K.eval(K.random_uniform((200, 100), min_val, max_val))\n assert rand.shape == (200, 100)\n assert np.abs(np.mean(rand)) < 0.015\n assert max_val - 0.015 < np.max(rand) <= max_val\n assert min_val + 0.015 > np.min(rand) >= min_val\n\n r = K.random_uniform((10, 10), minval=min_val, maxval=max_val)\n samples = np.array([K.eval(r) for _ in range(200)])\n assert np.abs(np.mean(samples)) < 0.015\n assert max_val - 0.015 < np.max(samples) <= max_val\n assert min_val + 0.015 > np.min(samples) >= min_val\n\n def test_random_binomial(self):\n p = 0.5\n rand = K.eval(K.random_binomial((200, 100), p))\n assert rand.shape == (200, 100)\n assert np.abs(np.mean(rand) - p) < 0.015\n assert np.max(rand) == 1\n assert np.min(rand) == 0\n\n r = K.random_binomial((10, 10), p)\n samples = np.array([K.eval(r) for _ in range(200)])\n assert np.abs(np.mean(samples) - p) < 0.015\n assert np.max(samples) == 1\n assert np.min(samples) == 0\n\n def test_truncated_normal(self):\n mean = 0.\n std = 1.\n min_val = -2.\n max_val = 2.\n rand = K.eval(K.truncated_normal((300, 200),\n mean=mean, stddev=std, seed=1337))\n assert rand.shape == (300, 200)\n assert np.abs(np.mean(rand) - mean) < 0.015\n assert np.max(rand) <= max_val\n assert np.min(rand) >= min_val\n\n # assumption in initializers.VarianceScaling\n assert np.abs(np.std(rand) - std * 0.87962) < 0.015\n\n def test_conv_invalid_use(self):\n dummy_x_1d = K.variable(np.ones((4, 8, 2)))\n dummy_w_1d = K.variable(np.ones((3, 2, 3)))\n dummy_x_2d = K.variable(np.ones((2, 3, 4, 5)))\n dummy_w_2d = K.variable(np.ones((2, 2, 3, 4)))\n dummy_x_3d = K.variable(np.ones((2, 3, 4, 5, 4)))\n dummy_w_3d = K.variable(np.ones((2, 2, 2, 3, 4)))\n dummy_w1x1_2d = K.variable(np.ones((1, 1, 12, 7)))\n\n with pytest.raises(ValueError):\n K.conv1d(dummy_x_1d, dummy_w_1d, data_format='channels_middle')\n\n with pytest.raises(ValueError):\n K.conv2d(dummy_x_2d, dummy_w_2d, data_format='channels_middle')\n\n with pytest.raises(ValueError):\n K.conv3d(dummy_x_3d, dummy_w_3d, data_format='channels_middle')\n\n if K.backend() != 'theano':\n with pytest.raises(ValueError):\n K.separable_conv2d(dummy_x_2d, dummy_w_2d, dummy_w1x1_2d,\n data_format='channels_middle')\n\n with pytest.raises(ValueError):\n K.depthwise_conv2d(dummy_x_2d, dummy_w_2d,\n data_format='channels_middle')\n\n if K.backend() == 'cntk':\n with pytest.raises(ValueError):\n K.separable_conv2d(dummy_x_2d, dummy_w_2d, dummy_w1x1_2d,\n dilation_rate=(1, 2))\n with pytest.raises(ValueError):\n K.separable_conv2d(dummy_x_2d, dummy_w_2d, dummy_w1x1_2d,\n strides=(2, 2), dilation_rate=(1, 2))\n with pytest.raises(ValueError):\n K.depthwise_conv2d(dummy_x_2d, dummy_w_2d,\n dilation_rate=(1, 2))\n with pytest.raises(ValueError):\n K.depthwise_conv2d(dummy_x_2d, dummy_w_2d,\n strides=(2, 2), dilation_rate=(1, 2))\n\n def test_pooling_invalid_use(self):\n for (input_shape, pool_size) in zip([(5, 10, 12, 3), (5, 10, 12, 6, 3)],\n [(2, 2), (2, 2, 2)]):\n x = K.variable(np.random.random(input_shape))\n if len(pool_size) == 2:\n with pytest.raises(ValueError):\n K.pool2d(x, pool_size=pool_size, data_format='channels_middle')\n with pytest.raises(ValueError):\n K.pool2d(x, pool_size=pool_size, padding='twice')\n with pytest.raises(ValueError):\n K.pool2d(x, pool_size=pool_size, pool_mode='median')\n else:\n with pytest.raises(ValueError):\n K.pool3d(x, pool_size=pool_size, data_format='channels_middle')\n with pytest.raises(ValueError):\n K.pool3d(x, pool_size=pool_size, padding='twice')\n with pytest.raises(ValueError):\n K.pool3d(x, pool_size=pool_size, pool_mode='median')\n\n def test_resize_images(self):\n for data_format in ['channels_first', 'channels_last']:\n shape = (5, 5)\n if data_format == 'channels_first':\n x_shape = (2, 3) + shape\n elif data_format == 'channels_last':\n x_shape = (2,) + shape + (3,)\n check_single_tensor_operation('resize_images', x_shape,\n WITH_NP, cntk_dynamicity=True,\n height_factor=2,\n width_factor=2,\n data_format=data_format)\n\n # Test invalid use cases\n xval = np.random.random(x_shape)\n with pytest.raises(ValueError):\n K.resize_images(K.variable(xval), 2, 2,\n data_format='channels_middle')\n\n @staticmethod\n def _helper_bilinear(data_format, height_factor, width_factor):\n x_shape = (2, 3, 4, 5)\n check_single_tensor_operation('resize_images', x_shape,\n [KTF, KTH],\n height_factor=height_factor,\n width_factor=width_factor,\n data_format=data_format,\n interpolation='bilinear')\n\n @pytest.mark.skipif(K.backend() == 'cntk', reason='Not supported.')\n @pytest.mark.parametrize('data_format', ['channels_first', 'channels_last'])\n def test_resize_images_bilinear(self, data_format):\n self._helper_bilinear(data_format, 2, 2)\n with pytest.raises(NotImplementedError):\n self._helper_bilinear(data_format, 4, 4)\n\n def test_resize_volumes(self):\n for data_format in ['channels_first', 'channels_last']:\n shape = (5, 5, 5)\n if data_format == 'channels_first':\n x_shape = (2, 3) + shape\n elif data_format == 'channels_last':\n x_shape = (2,) + shape + (3,)\n check_single_tensor_operation('resize_volumes', x_shape,\n WITH_NP, cntk_dynamicity=True,\n depth_factor=2,\n height_factor=2,\n width_factor=2,\n data_format=data_format)\n\n # Test invalid use cases\n xval = np.random.random(x_shape)\n with pytest.raises(ValueError):\n K.resize_volumes(K.variable(xval), 2, 2, 2,\n data_format='channels_middle')\n\n def test_temporal_padding(self):\n check_single_tensor_operation('temporal_padding', (4, 3, 3),\n WITH_NP)\n check_single_tensor_operation('temporal_padding', (2, 3, 4),\n WITH_NP, padding=(1, 2))\n\n def test_spatial_2d_padding(self):\n padding = ((1, 2), (2, 1))\n for data_format in ['channels_first', 'channels_last']:\n shape = (5, 5)\n if data_format == 'channels_first':\n x_shape = (1, 3) + shape\n else:\n x_shape = (1,) + shape + (3,)\n check_single_tensor_operation('spatial_2d_padding', x_shape, WITH_NP,\n padding=padding, data_format=data_format)\n # Check handling of dynamic shapes.\n if K in [KTF, KTH]:\n x = K.placeholder(shape=(1, None, None, 1))\n y = K.spatial_2d_padding(x, padding=padding, data_format='channels_last')\n assert K.int_shape(y) == (1, None, None, 1)\n\n # Test invalid use cases\n xval = np.random.random(x_shape)\n with pytest.raises(ValueError):\n K.spatial_2d_padding(K.variable(xval), padding=padding,\n data_format='channels_middle')\n\n def test_spatial_3d_padding(self):\n padding = ((1, 2), (2, 1), (1, 2))\n for data_format in ['channels_first', 'channels_last']:\n shape = (5, 5, 5)\n if data_format == 'channels_first':\n x_shape = (1, 3) + shape\n else:\n x_shape = (1,) + shape + (3,)\n check_single_tensor_operation('spatial_3d_padding', x_shape, WITH_NP,\n padding=padding, data_format=data_format)\n # Check handling of dynamic shapes.\n if K in [KTF, KTH]:\n x = K.placeholder(shape=(1, None, None, None, 1))\n y = K.spatial_3d_padding(x, padding=padding, data_format='channels_last')\n assert K.int_shape(y) == (1, None, None, None, 1)\n\n # Test invalid use cases\n xval = np.random.random(x_shape)\n with pytest.raises(ValueError):\n K.spatial_3d_padding(K.variable(xval), padding=padding,\n data_format='channels_middle')\n\n def test_bias_add(self):\n for data_format in ['channels_first', 'channels_last']:\n for shape in [(), (3,), (2, 3), (5, 3, 2)]:\n if data_format == 'channels_first':\n x_shape = (1, 4) + shape\n else:\n x_shape = (1,) + shape + (4,)\n bias_shape = (4,)\n check_two_tensor_operation('bias_add', x_shape, bias_shape,\n WITH_NP, cntk_dynamicity=True,\n data_format=data_format)\n\n if data_format == 'channels_first':\n x_shape = (20, 6, 10)\n else:\n x_shape = (20, 10, 6)\n check_two_tensor_operation('bias_add', x_shape, (10, 6),\n WITH_NP, cntk_dynamicity=True,\n data_format=data_format)\n\n # Test invalid use cases\n x = K.variable(np.random.random(x_shape))\n b = K.variable(np.random.random(bias_shape))\n with pytest.raises(ValueError):\n K.bias_add(x, b, data_format='channels_middle')\n\n @pytest.mark.skipif(K.backend() != 'theano',\n reason='Specific to Theano.')\n @pytest.mark.parametrize('x_shape', [(1, 4, 2, 3), (1, 2, 3, 4)])\n def test_batchnorm_th(self, x_shape):\n x_val = np.random.random(x_shape).astype(np.float32)\n x = K.variable(x_val)\n z, _, _ = K.normalize_batch_in_training(\n x, None, None, reduction_axes='per-activation')\n z = K.eval(z)\n assert z.shape == x_shape\n\n @pytest.mark.skipif(K.backend() != 'tensorflow',\n reason='Specific to Tensorflow.')\n @pytest.mark.parametrize('x_shape', [(1, 4, 2, 3), (1, 2, 3, 4)])\n def test_batchnorm_tf(self, x_shape):\n x_val = np.random.random(x_shape).astype(np.float32)\n x = K.variable(x_val)\n z, _, _ = K.normalize_batch_in_training(\n x, None, None, reduction_axes=[0, 1, 2, 3])\n z = K.eval(z)\n assert z.shape == x_shape\n\n @pytest.mark.skipif(K.backend() != 'cntk', reason='Specific to CNTK.')\n @pytest.mark.parametrize('x_shape', [(1, 4, 2, 3), (1, 2, 3, 4)])\n def test_batchnorm_cntk(self, x_shape):\n x_val = np.random.random(x_shape).astype(np.float32)\n x = K.placeholder(x_shape)\n z, _, _ = K.normalize_batch_in_training(\n x, None, None, reduction_axes=[0, 1, 2, 3])\n z = K.function([x], [z])([x_val])[0]\n assert z.shape == x_shape\n\n # the Theano and TensorFlow CTC code use different methods to ensure\n # numerical stability. The Theano code subtracts out the max\n # before the final log, so the results are different but scale\n # identically and still train properly\n @pytest.mark.skipif(K.backend() == 'cntk', reason='Not supported.')\n def test_ctc(self):\n if K.backend() == 'theano':\n ref = [1.73308, 3.81351]\n else:\n ref = [3.34211, 5.42262]\n # simplified version of TensorFlow's test\n\n label_lens = np.expand_dims(np.asarray([5, 4]), 1)\n input_lens = np.expand_dims(np.asarray([5, 5]), 1) # number of timesteps\n\n # dimensions are batch x time x categories\n labels = np.asarray([[0, 1, 2, 1, 0], [0, 1, 1, 0, -1]])\n inputs = np.asarray(\n [[[0.633766, 0.221185, 0.0917319, 0.0129757, 0.0142857, 0.0260553],\n [0.111121, 0.588392, 0.278779, 0.0055756, 0.00569609, 0.010436],\n [0.0357786, 0.633813, 0.321418, 0.00249248, 0.00272882, 0.0037688],\n [0.0663296, 0.643849, 0.280111, 0.00283995, 0.0035545, 0.00331533],\n [0.458235, 0.396634, 0.123377, 0.00648837, 0.00903441, 0.00623107]],\n [[0.30176, 0.28562, 0.0831517, 0.0862751, 0.0816851, 0.161508],\n [0.24082, 0.397533, 0.0557226, 0.0546814, 0.0557528, 0.19549],\n [0.230246, 0.450868, 0.0389607, 0.038309, 0.0391602, 0.202456],\n [0.280884, 0.429522, 0.0326593, 0.0339046, 0.0326856, 0.190345],\n [0.423286, 0.315517, 0.0338439, 0.0393744, 0.0339315, 0.154046]]],\n dtype=np.float32)\n\n k_labels = K.variable(labels, dtype=\"int32\")\n k_inputs = K.variable(inputs, dtype=\"float32\")\n k_input_lens = K.variable(input_lens, dtype=\"int32\")\n k_label_lens = K.variable(label_lens, dtype=\"int32\")\n res = K.eval(K.ctc_batch_cost(k_labels, k_inputs, k_input_lens,\n k_label_lens))\n if K.backend() == 'theano':\n assert_allclose(res[0, :], ref, atol=1e-05)\n else:\n assert_allclose(res[:, 0], ref, atol=1e-05)\n\n # test when batch_size = 1, that is, one sample only\n # get only first sample from above test case\n if K.backend() == 'theano':\n ref = [1.73308]\n else:\n ref = [3.34211]\n\n input_lens = np.expand_dims(np.asarray([5]), 1)\n label_lens = np.expand_dims(np.asarray([5]), 1)\n\n labels = np.asarray([[0, 1, 2, 1, 0]])\n inputs = np.asarray(\n [[[0.633766, 0.221185, 0.0917319, 0.0129757, 0.0142857, 0.0260553],\n [0.111121, 0.588392, 0.278779, 0.0055756, 0.00569609, 0.010436],\n [0.0357786, 0.633813, 0.321418, 0.00249248, 0.00272882, 0.0037688],\n [0.0663296, 0.643849, 0.280111, 0.00283995, 0.0035545, 0.00331533],\n [0.458235, 0.396634, 0.123377, 0.00648837, 0.00903441, 0.00623107]]],\n dtype=np.float32)\n\n k_labels = K.variable(labels, dtype=\"int32\")\n k_inputs = K.variable(inputs, dtype=\"float32\")\n k_input_lens = K.variable(input_lens, dtype=\"int32\")\n k_label_lens = K.variable(label_lens, dtype=\"int32\")\n res = K.eval(K.ctc_batch_cost(k_labels, k_inputs, k_input_lens,\n k_label_lens))\n if K.backend() == 'theano':\n assert_allclose(res[0, :], ref, atol=1e-05)\n else:\n assert_allclose(res[:, 0], ref, atol=1e-05)\n\n @pytest.mark.skipif(K.backend() != 'tensorflow',\n reason='Test adapted from tensorflow.')\n def test_ctc_decode_greedy(self):\n \"\"\"Test two batch entries - best path decoder.\"\"\"\n max_time_steps = 6\n\n seq_len_0 = 4\n input_prob_matrix_0 = np.asarray(\n [[1.0, 0.0, 0.0, 0.0], # t=0\n [0.0, 0.0, 0.4, 0.6], # t=1\n [0.0, 0.0, 0.4, 0.6], # t=2\n [0.0, 0.9, 0.1, 0.0], # t=3\n [0.0, 0.0, 0.0, 0.0], # t=4 (ignored)\n [0.0, 0.0, 0.0, 0.0]], # t=5 (ignored)\n dtype=np.float32)\n\n seq_len_1 = 5\n # dimensions are time x depth\n\n input_prob_matrix_1 = np.asarray(\n [[0.1, 0.9, 0.0, 0.0], # t=0\n [0.0, 0.9, 0.1, 0.0], # t=1\n [0.0, 0.0, 0.1, 0.9], # t=2\n [0.0, 0.9, 0.1, 0.1], # t=3\n [0.9, 0.1, 0.0, 0.0], # t=4\n [0.0, 0.0, 0.0, 0.0]], # t=5 (ignored)\n dtype=np.float32)\n\n # len max_time_steps array of batch_size x depth matrices\n inputs = [np.vstack([input_prob_matrix_0[t, :],\n input_prob_matrix_1[t, :]])\n for t in range(max_time_steps)]\n\n # change tensorflow order to keras backend order\n inputs = np.asarray(inputs).transpose((1, 0, 2))\n\n # batch_size length vector of sequence_lengths\n input_length = np.array([seq_len_0, seq_len_1], dtype=np.int32)\n\n decode_pred_np, log_prob_pred_np = KNP.ctc_decode(inputs,\n input_length, greedy=True)\n inputs = K.variable(inputs)\n input_length = K.variable(input_length)\n decode_pred_tf, log_prob_pred_tf = K.ctc_decode(inputs,\n input_length, greedy=True)\n\n assert len(decode_pred_tf) == 1\n\n decode_pred = K.eval(decode_pred_tf[0])\n log_prob_pred = K.eval(log_prob_pred_tf)\n\n assert np.alltrue(decode_pred_np == decode_pred)\n assert np.allclose(log_prob_pred_np, log_prob_pred)\n\n @pytest.mark.skipif(K.backend() != 'tensorflow',\n reason='Beam search is only implemented with '\n 'the TensorFlow backend.')\n def test_ctc_decode_beam_search(self):\n \"\"\"Test one batch, two beams - hibernating beam search.\"\"\"\n\n depth = 6\n\n seq_len_0 = 5\n input_prob_matrix_0 = np.asarray(\n [[0.30999, 0.309938, 0.0679938, 0.0673362, 0.0708352, 0.173908],\n [0.215136, 0.439699, 0.0370931, 0.0393967, 0.0381581, 0.230517],\n [0.199959, 0.489485, 0.0233221, 0.0251417, 0.0233289, 0.238763],\n [0.279611, 0.452966, 0.0204795, 0.0209126, 0.0194803, 0.20655],\n [0.51286, 0.288951, 0.0243026, 0.0220788, 0.0219297, 0.129878],\n # Random entry added in at time=5\n [0.155251, 0.164444, 0.173517, 0.176138, 0.169979, 0.160671]],\n dtype=np.float32)\n\n # len max_time_steps array of batch_size x depth matrices\n inputs = ([input_prob_matrix_0[t, :][np.newaxis, :]\n for t in range(seq_len_0)] + # Pad to max_time_steps = 8\n 2 * [np.zeros((1, depth), dtype=np.float32)])\n\n inputs = K.variable(np.asarray(inputs).transpose((1, 0, 2)))\n\n # batch_size length vector of sequence_lengths\n input_length = K.variable(np.array([seq_len_0], dtype=np.int32))\n # batch_size length vector of negative log probabilities\n log_prob_truth = np.array([\n 0.584855, # output beam 0\n 0.389139 # output beam 1\n ], np.float32)[np.newaxis, :]\n\n decode_truth = [np.array([1, 0]), np.array([0, 1, 0])]\n\n beam_width = 2\n top_paths = 2\n\n decode_pred_tf, log_prob_pred_tf = K.ctc_decode(inputs,\n input_length,\n greedy=False,\n beam_width=beam_width,\n top_paths=top_paths)\n\n assert len(decode_pred_tf) == top_paths\n\n log_prob_pred = K.eval(log_prob_pred_tf)\n\n for i in range(top_paths):\n assert np.alltrue(decode_truth[i] == K.eval(decode_pred_tf[i]))\n\n assert np.allclose(log_prob_truth, log_prob_pred)\n\n def test_one_hot(self):\n input_length = 10\n num_classes = 20\n batch_size = 30\n indices = np.random.randint(0, num_classes, size=(batch_size, input_length))\n oh = KNP.one_hot(np.int32(indices), num_classes)\n koh = K.eval(K.one_hot(K.variable(indices, dtype='int32'), num_classes))\n assert np.all(koh == oh)\n\n @pytest.mark.skipif((K.backend() == 'cntk'\n or (K.backend() == 'theano' and not K.th_sparse_module)),\n reason='Sparse tensors are not supported in cntk '\n 'and Theano has some dependency issues for sparse.')\n def test_sparse_dot(self):\n x_d = np.array([0, 7, 2, 3], dtype=np.float32)\n x_r = np.array([0, 2, 2, 3], dtype=np.int64)\n x_c = np.array([4, 3, 2, 3], dtype=np.int64)\n\n x_sparse = sparse.csr_matrix((x_d, (x_r, x_c)), shape=(4, 5))\n x_dense = x_sparse.toarray()\n\n W = np.random.random((5, 4))\n t_W = K.variable(W)\n k_s = K.eval(K.dot(K.variable(x_sparse), t_W))\n k_d = K.eval(K.dot(K.variable(x_dense), t_W))\n\n assert k_s.shape == k_d.shape\n assert_allclose(k_s, k_d, atol=1e-05)\n\n @pytest.mark.skipif((K.backend() == 'cntk'\n or (K.backend() == 'theano' and not K.th_sparse_module)),\n reason='Sparse tensors are not supported in cntk '\n 'and Theano has some dependency issues for sparse.')\n def test_sparse_concat(self):\n x_d = np.array([0, 7, 2, 3], dtype=np.float32)\n x_r = np.array([0, 2, 2, 3], dtype=np.int64)\n x_c = np.array([4, 3, 2, 3], dtype=np.int64)\n\n x_sparse_1 = sparse.csr_matrix((x_d, (x_r, x_c)), shape=(4, 5))\n\n x_d = np.array([0, 7, 2, 3], dtype=np.float32)\n x_r = np.array([0, 2, 2, 3], dtype=np.int64)\n x_c = np.array([4, 3, 2, 3], dtype=np.int64)\n\n x_sparse_2 = sparse.csr_matrix((x_d, (x_r, x_c)), shape=(4, 5))\n\n x_dense_1 = x_sparse_1.toarray()\n x_dense_2 = x_sparse_2.toarray()\n\n k_s = K.concatenate([K.variable(x_sparse_1), K.variable(x_sparse_2)])\n assert K.is_sparse(k_s)\n\n k_s_d = K.eval(k_s)\n\n k_d = K.eval(K.concatenate([K.variable(x_dense_1), K.variable(x_dense_2)]))\n\n assert k_s_d.shape == k_d.shape\n assert_allclose(k_s_d, k_d, atol=1e-05)\n\n @pytest.mark.skipif(K.backend() == 'cntk', reason='Not supported.')\n def test_map(self):\n x = np.random.rand(10, 3).astype(np.float32)\n vx = K.variable(x)\n kx = K.eval(K.map_fn(K.sum, vx))\n # make sure we can also walk the indexes in tensorflow which we\n # can't without specifying dtype\n kx2 = K.eval(K.map_fn(\n lambda i: K.sum(vx[i]),\n K.arange(10),\n dtype=K.floatx()\n ))\n\n assert (10,) == kx.shape\n assert (10,) == kx2.shape\n assert_allclose(x.sum(axis=1), kx, atol=1e-05)\n assert_allclose(kx, kx2, atol=1e-05)\n\n @pytest.mark.skipif(K.backend() == 'cntk', reason='Not supported.')\n def test_foldl(self):\n x = np.random.rand(10, 3).astype(np.float32)\n kx = K.eval(K.foldl(lambda a, b: a + b, K.variable(x)))\n\n assert (3,) == kx.shape\n assert_allclose(x.sum(axis=0), kx, atol=1e-05)\n\n @pytest.mark.skipif(K.backend() == 'cntk', reason='Not supported.')\n def test_foldr(self):\n # This test aims to make sure that we walk the array from right to left\n # and checks it in the following way: multiplying left to right 1e-40\n # cannot be held into a float32 so it causes an underflow while from\n # right to left we have no such problem and the result is larger\n x = np.array([1e-20, 1e-20, 10, 10, 10], dtype=np.float32)\n vx = K.variable(x)\n p1 = K.eval(K.foldl(lambda a, b: a * b, vx))\n p2 = K.eval(K.foldr(lambda a, b: a * b, vx))\n\n assert p1 < p2\n assert 9e-38 < p2 <= 1e-37\n\n @pytest.mark.skipif(K.backend() == 'cntk',\n reason='cntk has issues with negative number.')\n def test_arange(self):\n for test_value in (-20, 0, 1, 10):\n a_list = []\n dtype_list = []\n for k in WITH_NP:\n t = k.arange(test_value)\n a = k.eval(t)\n assert np.array_equal(a, np.arange(test_value))\n dtype_list.append(k.dtype(t))\n a_list.append(a)\n\n for i in range(len(a_list) - 1):\n assert np.array_equal(a_list[i], a_list[i + 1])\n\n for start, stop, step in ((0, 5, 1), (-5, 5, 2), (0, 1, 2)):\n a_list = []\n for k in WITH_NP:\n a = k.eval(k.arange(start, stop, step))\n assert np.array_equal(a, np.arange(start, stop, step))\n a_list.append(a)\n for i in range(len(a_list) - 1):\n assert np.array_equal(a_list[i], a_list[i + 1])\n\n for dtype in ('int32', 'int64', 'float32', 'float64'):\n for k in WITH_NP:\n t = k.arange(10, dtype=dtype)\n assert k.dtype(t) == dtype\n\n start = K.constant(1, dtype='int32')\n t = K.arange(start)\n assert len(K.eval(t)) == 1\n\n start = K.constant(-1, dtype='int32')\n t = K.arange(start)\n assert len(K.eval(t)) == 0\n\n @pytest.mark.parametrize('training', [True, False])\n def test_in_train_phase(self, training):\n check_two_tensor_operation('in_train_phase', (3, 3), (2, 2), WITH_NP,\n training=training)\n check_two_tensor_operation('in_train_phase', (2, 3), (2, 3), WITH_NP,\n training=training)\n\n @pytest.mark.parametrize('training', [True, False])\n def test_in_test_phase(self, training):\n check_two_tensor_operation('in_test_phase', (3, 3), (2, 2), WITH_NP,\n training=training)\n check_two_tensor_operation('in_test_phase', (2, 3), (2, 3), WITH_NP,\n training=training)\n\n def test_setfloatx_incorrect_values(self):\n # Keep track of the old value\n old_floatx = floatx()\n # Try some incorrect values\n initial = floatx()\n for value in ['', 'beerfloat', 123]:\n with pytest.raises(ValueError):\n set_floatx(value)\n assert floatx() == initial\n # Restore old value\n set_floatx(old_floatx)\n\n def test_setfloatx_correct_values(self):\n # Keep track of the old value\n old_floatx = floatx()\n # Check correct values\n for value in ['float16', 'float32', 'float64']:\n set_floatx(value)\n assert floatx() == value\n # Restore old value\n set_floatx(old_floatx)\n\n @pytest.mark.skipif((K.backend() == 'cntk'),\n reason='cntk does not support float16')\n def test_set_floatx(self):\n \"\"\"\n Make sure that changes to the global floatx are effectively\n taken into account by the backend.\n \"\"\"\n # Keep track of the old value\n old_floatx = floatx()\n\n set_floatx('float16')\n var = variable([10])\n check_dtype(var, 'float16')\n\n set_floatx('float64')\n var = variable([10])\n check_dtype(var, 'float64')\n\n # Restore old value\n set_floatx(old_floatx)\n\n def test_dtype(self):\n assert K.dtype(K.variable(1, dtype='float64')) == 'float64'\n assert K.dtype(K.variable(1, dtype='float32')) == 'float32'\n assert K.dtype(K.variable(1, dtype='float16')) == 'float16'\n\n def test_variable_support_bool_dtype(self):\n # Github issue: 7819\n if K.backend() == 'tensorflow':\n assert K.dtype(K.variable(1, dtype='int16')) == 'int16'\n assert K.dtype(K.variable(False, dtype='bool')) == 'bool'\n with pytest.raises(TypeError):\n K.variable('', dtype='unsupported')\n\n def test_clip_supports_tensor_arguments(self):\n # GitHub issue: 11435\n x = K.variable([-10., -5., 0., 5., 10.])\n min_value = K.variable([-5., -4., 0., 3., 5.])\n max_value = K.variable([5., 4., 1., 4., 9.])\n\n assert np.allclose(K.eval(K.clip(x, min_value, max_value)),\n np.asarray([-5., -4., 0., 4., 9.], dtype=np.float32))\n\n\nif __name__ == '__main__':\n pytest.main([__file__])\n", "\"\"\"Utilities related to layer/model functionality.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom .conv_utils import convert_kernel\nfrom .. import backend as K\nimport numpy as np\n\n\ndef count_params(weights):\n \"\"\"Count the total number of scalars composing the weights.\n\n # Arguments\n weights: An iterable containing the weights on which to compute params\n\n # Returns\n The total number of scalars composing the weights\n \"\"\"\n return int(np.sum([K.count_params(p) for p in set(weights)]))\n\n\ndef print_summary(model, line_length=None, positions=None, print_fn=None):\n \"\"\"Prints a summary of a model.\n\n # Arguments\n model: Keras model instance.\n line_length: Total length of printed lines\n (e.g. set this to adapt the display to different\n terminal window sizes).\n positions: Relative or absolute positions of log elements in each line.\n If not provided, defaults to `[.33, .55, .67, 1.]`.\n print_fn: Print function to use.\n It will be called on each line of the summary.\n You can set it to a custom function\n in order to capture the string summary.\n It defaults to `print` (prints to stdout).\n \"\"\"\n if print_fn is None:\n print_fn = print\n\n if model.__class__.__name__ == 'Sequential':\n sequential_like = True\n elif not model._is_graph_network:\n # We treat subclassed models as a simple sequence of layers,\n # for logging purposes.\n sequential_like = True\n else:\n sequential_like = True\n nodes_by_depth = model._nodes_by_depth.values()\n nodes = []\n for v in nodes_by_depth:\n if (len(v) > 1) or (len(v) == 1 and len(v[0].inbound_layers) > 1):\n # if the model has multiple nodes\n # or if the nodes have multiple inbound_layers\n # the model is no longer sequential\n sequential_like = False\n break\n nodes += v\n if sequential_like:\n # search for shared layers\n for layer in model.layers:\n flag = False\n for node in layer._inbound_nodes:\n if node in nodes:\n if flag:\n sequential_like = False\n break\n else:\n flag = True\n if not sequential_like:\n break\n\n if sequential_like:\n line_length = line_length or 65\n positions = positions or [.45, .85, 1.]\n if positions[-1] <= 1:\n positions = [int(line_length * p) for p in positions]\n # header names for the different log elements\n to_display = ['Layer (type)', 'Output Shape', 'Param #']\n else:\n line_length = line_length or 98\n positions = positions or [.33, .55, .67, 1.]\n if positions[-1] <= 1:\n positions = [int(line_length * p) for p in positions]\n # header names for the different log elements\n to_display = ['Layer (type)',\n 'Output Shape',\n 'Param #',\n 'Connected to']\n relevant_nodes = []\n for v in model._nodes_by_depth.values():\n relevant_nodes += v\n\n def print_row(fields, positions):\n line = ''\n for i in range(len(fields)):\n if i > 0:\n line = line[:-1] + ' '\n line += str(fields[i])\n line = line[:positions[i]]\n line += ' ' * (positions[i] - len(line))\n print_fn(line)\n\n print_fn('_' * line_length)\n print_row(to_display, positions)\n print_fn('=' * line_length)\n\n def print_layer_summary(layer):\n try:\n output_shape = layer.output_shape\n except AttributeError:\n output_shape = 'multiple'\n name = layer.name\n cls_name = layer.__class__.__name__\n fields = [name + ' (' + cls_name + ')',\n output_shape, layer.count_params()]\n print_row(fields, positions)\n\n def print_layer_summary_with_connections(layer):\n \"\"\"Prints a summary for a single layer.\n\n # Arguments\n layer: target layer.\n \"\"\"\n try:\n output_shape = layer.output_shape\n except AttributeError:\n output_shape = 'multiple'\n connections = []\n for node in layer._inbound_nodes:\n if relevant_nodes and node not in relevant_nodes:\n # node is not part of the current network\n continue\n for i in range(len(node.inbound_layers)):\n inbound_layer = node.inbound_layers[i].name\n inbound_node_index = node.node_indices[i]\n inbound_tensor_index = node.tensor_indices[i]\n connections.append(inbound_layer +\n '[' + str(inbound_node_index) + '][' +\n str(inbound_tensor_index) + ']')\n\n name = layer.name\n cls_name = layer.__class__.__name__\n if not connections:\n first_connection = ''\n else:\n first_connection = connections[0]\n fields = [name +\n ' (' + cls_name + ')',\n output_shape,\n layer.count_params(),\n first_connection]\n print_row(fields, positions)\n if len(connections) > 1:\n for i in range(1, len(connections)):\n fields = ['', '', '', connections[i]]\n print_row(fields, positions)\n\n layers = model.layers\n for i in range(len(layers)):\n if sequential_like:\n print_layer_summary(layers[i])\n else:\n print_layer_summary_with_connections(layers[i])\n if i == len(layers) - 1:\n print_fn('=' * line_length)\n else:\n print_fn('_' * line_length)\n\n model._check_trainable_weights_consistency()\n if hasattr(model, '_collected_trainable_weights'):\n trainable_count = count_params(model._collected_trainable_weights)\n else:\n trainable_count = count_params(model.trainable_weights)\n\n non_trainable_count = count_params(model.non_trainable_weights)\n\n print_fn(\n 'Total params: {:,}'.format(trainable_count + non_trainable_count))\n print_fn('Trainable params: {:,}'.format(trainable_count))\n print_fn('Non-trainable params: {:,}'.format(non_trainable_count))\n print_fn('_' * line_length)\n\n\ndef convert_all_kernels_in_model(model):\n \"\"\"Converts all convolution kernels in a model from Theano to TensorFlow.\n\n Also works from TensorFlow to Theano.\n\n # Arguments\n model: target model for the conversion.\n \"\"\"\n # Note: SeparableConvolution not included\n # since only supported by TF.\n conv_classes = {\n 'Conv1D',\n 'Conv2D',\n 'Conv3D',\n 'Conv2DTranspose',\n }\n to_assign = []\n for layer in model.layers:\n if layer.__class__.__name__ in conv_classes:\n original_kernel = K.get_value(layer.kernel)\n converted_kernel = convert_kernel(original_kernel)\n to_assign.append((layer.kernel, converted_kernel))\n K.batch_set_value(to_assign)\n\n\ndef convert_dense_weights_data_format(dense,\n previous_feature_map_shape,\n target_data_format='channels_first'):\n \"\"\"Utility useful when changing a convnet's `data_format`.\n\n When porting the weights of a convnet from one data format to the other,\n if the convnet includes a `Flatten` layer\n (applied to the last convolutional feature map)\n followed by a `Dense` layer, the weights of that `Dense` layer\n should be updated to reflect the new dimension ordering.\n\n # Arguments\n dense: The target `Dense` layer.\n previous_feature_map_shape: A shape tuple of 3 integers,\n e.g. `(512, 7, 7)`. The shape of the convolutional\n feature map right before the `Flatten` layer that\n came before the target `Dense` layer.\n target_data_format: One of \"channels_last\", \"channels_first\".\n Set it \"channels_last\"\n if converting a \"channels_first\" model to \"channels_last\",\n or reciprocally.\n \"\"\"\n assert target_data_format in {'channels_last', 'channels_first'}\n kernel, bias = dense.get_weights()\n for i in range(kernel.shape[1]):\n if target_data_format == 'channels_first':\n c, h, w = previous_feature_map_shape\n original_fm_shape = (h, w, c)\n ki = kernel[:, i].reshape(original_fm_shape)\n ki = np.transpose(ki, (2, 0, 1)) # last -> first\n else:\n h, w, c = previous_feature_map_shape\n original_fm_shape = (c, h, w)\n ki = kernel[:, i].reshape(original_fm_shape)\n ki = np.transpose(ki, (1, 2, 0)) # first -> last\n kernel[:, i] = np.reshape(ki, (np.prod(previous_feature_map_shape),))\n dense.set_weights([kernel, bias])\n\n\ndef get_source_inputs(tensor, layer=None, node_index=None):\n \"\"\"Returns the list of input tensors necessary to compute `tensor`.\n\n Output will always be a list of tensors\n (potentially with 1 element).\n\n # Arguments\n tensor: The tensor to start from.\n layer: Origin layer of the tensor. Will be\n determined via tensor._keras_history if not provided.\n node_index: Origin node index of the tensor.\n\n # Returns\n List of input tensors.\n \"\"\"\n if not hasattr(tensor, '_keras_history'):\n return tensor\n\n if layer is None or node_index:\n layer, node_index, _ = tensor._keras_history\n if not layer._inbound_nodes:\n return [tensor]\n else:\n node = layer._inbound_nodes[node_index]\n if not node.inbound_layers:\n # Reached an Input layer, stop recursion.\n return node.input_tensors\n else:\n source_tensors = []\n for i in range(len(node.inbound_layers)):\n x = node.input_tensors[i]\n layer = node.inbound_layers[i]\n node_index = node.node_indices[i]\n previous_sources = get_source_inputs(x,\n layer,\n node_index)\n # Avoid input redundancy.\n for x in previous_sources:\n if x not in source_tensors:\n source_tensors.append(x)\n return source_tensors\n", "import pytest\nimport numpy as np\n\nfrom keras import initializers\nfrom keras import backend as K\n\n# 2D tensor test fixture\nFC_SHAPE = (200, 100)\n\n# 4D convolution in th order. This shape has the same effective shape as FC_SHAPE\nCONV_SHAPE = (25, 25, 20, 20)\n\n\ndef _runner(init, shape, target_mean=None, target_std=None,\n target_max=None, target_min=None):\n variable = K.variable(init(shape))\n output = K.get_value(variable)\n lim = 3e-2\n if target_std is not None:\n assert abs(output.std() - target_std) < lim\n if target_mean is not None:\n assert abs(output.mean() - target_mean) < lim\n if target_max is not None:\n assert abs(output.max() - target_max) < lim\n if target_min is not None:\n assert abs(output.min() - target_min) < lim\n\n\[email protected]('tensor_shape', [FC_SHAPE, CONV_SHAPE], ids=['FC', 'CONV'])\ndef test_uniform(tensor_shape):\n _runner(initializers.RandomUniform(minval=-1, maxval=1), tensor_shape,\n target_mean=0., target_max=1, target_min=-1)\n\n\[email protected]('tensor_shape', [FC_SHAPE, CONV_SHAPE], ids=['FC', 'CONV'])\ndef test_normal(tensor_shape):\n _runner(initializers.RandomNormal(mean=0, stddev=1), tensor_shape,\n target_mean=0., target_std=1)\n\n\[email protected]('tensor_shape', [FC_SHAPE, CONV_SHAPE], ids=['FC', 'CONV'])\ndef test_truncated_normal(tensor_shape):\n _runner(initializers.TruncatedNormal(mean=0, stddev=1), tensor_shape,\n target_mean=0., target_max=2, target_min=-2)\n\n\[email protected]('tensor_shape', [FC_SHAPE, CONV_SHAPE], ids=['FC', 'CONV'])\ndef test_constant(tensor_shape):\n _runner(initializers.Constant(2), tensor_shape,\n target_mean=2, target_max=2, target_min=2)\n\n\[email protected]('tensor_shape', [FC_SHAPE, CONV_SHAPE], ids=['FC', 'CONV'])\ndef test_lecun_uniform(tensor_shape):\n fan_in, _ = initializers._compute_fans(tensor_shape)\n std = np.sqrt(1. / fan_in)\n _runner(initializers.lecun_uniform(), tensor_shape,\n target_mean=0., target_std=std)\n\n\[email protected]('tensor_shape', [FC_SHAPE, CONV_SHAPE], ids=['FC', 'CONV'])\ndef test_glorot_uniform(tensor_shape):\n fan_in, fan_out = initializers._compute_fans(tensor_shape)\n std = np.sqrt(2. / (fan_in + fan_out))\n _runner(initializers.glorot_uniform(), tensor_shape,\n target_mean=0., target_std=std)\n\n\[email protected]('tensor_shape', [FC_SHAPE, CONV_SHAPE], ids=['FC', 'CONV'])\ndef test_he_uniform(tensor_shape):\n fan_in, _ = initializers._compute_fans(tensor_shape)\n std = np.sqrt(2. / fan_in)\n _runner(initializers.he_uniform(), tensor_shape,\n target_mean=0., target_std=std)\n\n\[email protected]('tensor_shape', [FC_SHAPE, CONV_SHAPE], ids=['FC', 'CONV'])\ndef test_lecun_normal(tensor_shape):\n fan_in, _ = initializers._compute_fans(tensor_shape)\n std = np.sqrt(1. / fan_in)\n _runner(initializers.lecun_normal(), tensor_shape,\n target_mean=0., target_std=std)\n\n\[email protected]('tensor_shape', [FC_SHAPE, CONV_SHAPE], ids=['FC', 'CONV'])\ndef test_glorot_normal(tensor_shape):\n fan_in, fan_out = initializers._compute_fans(tensor_shape)\n std = np.sqrt(2. / (fan_in + fan_out))\n _runner(initializers.glorot_normal(), tensor_shape,\n target_mean=0., target_std=std)\n\n\[email protected]('tensor_shape', [FC_SHAPE, CONV_SHAPE], ids=['FC', 'CONV'])\ndef test_he_normal(tensor_shape):\n fan_in, _ = initializers._compute_fans(tensor_shape)\n std = np.sqrt(2. / fan_in)\n _runner(initializers.he_normal(), tensor_shape,\n target_mean=0., target_std=std)\n\n\[email protected]('tensor_shape', [FC_SHAPE, CONV_SHAPE], ids=['FC', 'CONV'])\ndef test_orthogonal(tensor_shape):\n _runner(initializers.orthogonal(), tensor_shape,\n target_mean=0.)\n\n\[email protected]('tensor_shape',\n [(100, 100), (10, 20), (30, 80), (1, 2, 3, 4)],\n ids=['FC', 'RNN', 'RNN_INVALID', 'CONV'])\ndef test_identity(tensor_shape):\n if len(tensor_shape) > 2 or max(tensor_shape) % min(tensor_shape) != 0:\n with pytest.raises(ValueError):\n _runner(initializers.identity(), tensor_shape,\n target_mean=1. / tensor_shape[0], target_max=1.)\n else:\n _runner(initializers.identity(), tensor_shape,\n target_mean=1. / tensor_shape[0], target_max=1.)\n\n\[email protected]('tensor_shape', [FC_SHAPE, CONV_SHAPE], ids=['FC', 'CONV'])\ndef test_zero(tensor_shape):\n _runner(initializers.zeros(), tensor_shape,\n target_mean=0., target_max=0.)\n\n\[email protected]('tensor_shape', [FC_SHAPE, CONV_SHAPE], ids=['FC', 'CONV'])\ndef test_one(tensor_shape):\n _runner(initializers.ones(), tensor_shape,\n target_mean=1., target_max=1.)\n\n\nif __name__ == '__main__':\n pytest.main([__file__])\n" ]
[ [ "numpy.concatenate", "numpy.average", "numpy.float64" ], [ "tensorflow.core.protobuf.config_pb2.RunMetadata", "numpy.expand_dims", "numpy.asarray", "tensorflow.core.protobuf.config_pb2.RunOptions", "numpy.all", "numpy.concatenate", "numpy.max", "numpy.alltrue", "numpy.mean", "numpy.exp", "numpy.random.randint", "numpy.allclose", "numpy.arange", "numpy.std", "numpy.repeat", "numpy.zeros", "numpy.random.choice", "numpy.min", "scipy.sparse.csr_matrix", "numpy.random.rand", "numpy.testing.assert_allclose", "numpy.array", "numpy.random.random", "numpy.array_equal", "numpy.int32", "numpy.ones", "numpy.prod", "numpy.vstack" ], [ "numpy.prod", "numpy.transpose" ], [ "numpy.sqrt" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
nikosavola/qiskit-metal
[ "04c9e5c0b5573699564244127aa58b447cdf6f66" ]
[ "qiskit_metal/qlibrary/tlines/anchored_path.py" ]
[ "# -*- coding: utf-8 -*-\n\n# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2017, 2021.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\"\"\"Anchored path.\"\"\"\n\nimport numpy as np\n\nfrom collections import OrderedDict\nfrom qiskit_metal import Dict\nfrom qiskit_metal.qlibrary.core import QRoute, QRoutePoint\nfrom qiskit_metal.toolbox_metal import math_and_overrides as mao\nfrom qiskit_metal.toolbox_metal.exceptions import QiskitMetalDesignError\nfrom collections.abc import Mapping\nfrom shapely.ops import cascaded_union\nfrom shapely.geometry import CAP_STYLE\nimport geopandas as gpd\n\n\ndef intersecting(a: np.array, b: np.array, c: np.array, d: np.array) -> bool:\n \"\"\"Returns whether segment ab intersects or overlaps with segment cd, where\n a, b, c, and d are all coordinates.\n\n .. meta::\n Anchored Path\n\n Args:\n a (np.array): Coordinate\n b (np.array): Coordinate\n c (np.array): Coordinate\n d (np.array): Coordinate\n\n Returns:\n bool: True if intersecting, False otherwise\n \"\"\"\n\n x0_start, y0_start = a\n x0_end, y0_end = b\n x1_start, y1_start = c\n x1_end, y1_end = d\n if (x0_start == x0_end) and (x1_start == x1_end):\n # 2 vertical lines intersect only if they completely overlap at some point(s)\n if x0_end == x1_start:\n # Same x-intercept -> potential overlap, so check y coordinate\n # Distinct, non-overlapping segments if and only if min y coord of one is above max y coord of the other\n return not ((min(y0_start, y0_end) > max(y1_start, y1_end)) or\n (min(y1_start, y1_end) > max(y0_start, y0_end)))\n return False # Parallel lines with different x-intercepts don't overlap\n elif (x0_start == x0_end) or (x1_start == x1_end):\n # One segment is vertical, the other is not\n # Express non-vertical line in the form of y = mx + b and check y value\n if x1_start == x1_end:\n # Exchange names; the analysis below assumes that line 0 is the vertical one\n x0_start, x0_end, x1_start, x1_end = x1_start, x1_end, x0_start, x0_end\n y0_start, y0_end, y1_start, y1_end = y1_start, y1_end, y0_start, y0_end\n m = (y1_end - y1_start) / (x1_end - x1_start)\n b = (x1_end * y1_start - x1_start * y1_end) / (x1_end - x1_start)\n if min(x1_start, x1_end) <= x0_start <= max(x1_start, x1_end):\n if min(y0_start, y0_end) <= m * x0_start + b <= max(\n y0_start, y0_end):\n return True\n return False\n else:\n # Neither line is vertical; check slopes and y-intercepts\n b0 = (y0_start * x0_end - y0_end * x0_start) / (\n x0_end - x0_start) # y-intercept of line 0\n b1 = (y1_start * x1_end - y1_end * x1_start) / (\n x1_end - x1_start) # y-intercept of line 1\n if (x1_end - x1_start) * (y0_end - y0_start) == (x0_end - x0_start) * (\n y1_end - y1_start):\n # Lines have identical slopes\n if b0 == b1:\n # Same y-intercept -> potential overlap, so check x coordinate\n # Distinct, non-overlapping segments if and only if min x coord of one exceeds max x coord of the other\n return not ((min(x0_start, x0_end) > max(x1_start, x1_end)) or\n (min(x1_start, x1_end) > max(x0_start, x0_end)))\n return False # Parallel lines with different y-intercepts don't overlap\n else:\n # Lines not parallel so must intersect somewhere -> examine slopes m0 and m1\n m0 = (y0_end - y0_start) / (x0_end - x0_start) # slope of line 0\n m1 = (y1_end - y1_start) / (x1_end - x1_start) # slope of line 1\n x_intersect = (b1 - b0) / (m0 - m1\n ) # x coordinate of intersection point\n if min(x0_start, x0_end) <= x_intersect <= max(x0_start, x0_end):\n if min(x1_start, x1_end) <= x_intersect <= max(\n x1_start, x1_end):\n return True\n return False\n\n\nclass RouteAnchors(QRoute):\n \"\"\"Creates and connects a series of anchors through which the Route passes.\n\n QRoute Default Options:\n * pin_inputs: Dict\n * start_pin: Dict -- Component and pin string pair. Define which pin to start from\n * component: '' -- Name of component to start from, which has a pin\n * pin: '' -- Name of pin used for pin_start\n * end_pin=Dict -- Component and pin string pair. Define which pin to start from\n * component: '' -- Name of component to end on, which has a pin\n * pin: '' -- Name of pin used for pin_end\n * fillet: '0'\n * lead: Dict\n * start_straight: '0mm' -- Lead-in, defined as the straight segment extension from start_pin. Defaults to 0.1um.\n * end_straight: '0mm' -- Lead-out, defined as the straight segment extension from end_pin. Defaults to 0.1um.\n * start_jogged_extension: '' -- Lead-in, jogged extension of lead-in. Described as list of tuples\n * end_jogged_extension: '' -- Lead-out, jogged extension of lead-out. Described as list of tuples\n * total_length: '7mm'\n * trace_width: 'cpw_width' -- Defines the width of the line. Defaults to 'cpw_width'.\n\n Default Options:\n * anchors: OrderedDict -- Intermediate anchors only; doesn't include endpoints\n * advanced: Dict\n * avoid_collision: 'false' -- true/false, defines if the route needs to avoid collisions. Defaults to 'false'.\n \"\"\"\n\n component_metadata = Dict(short_name='cpw')\n \"\"\"Component metadata\"\"\"\n\n default_options = Dict(\n anchors=OrderedDict(\n ), # Intermediate anchors only; doesn't include endpoints\n # Example: {1: np.array([x1, y1]), 2: np.array([x2, y2])}\n # startpin -> startpin + leadin -> anchors -> endpin + leadout -> endpin\n advanced=Dict(avoid_collision='false'))\n \"\"\"Default options\"\"\"\n\n TOOLTIP = \"\"\"Creates and connects a series of anchors through which the Route passes.\"\"\"\n\n from shapely.ops import cascaded_union\n from matplotlib import pyplot as plt\n import geopandas as gpd\n\n from shapely.geometry import CAP_STYLE, JOIN_STYLE\n\n def unobstructed_close_up(self, segment: list, component_name: str) -> bool:\n \"\"\"Checks whether the given component's perimeter intersects or\n overlaps a given segment.\n\n Args:\n segment (list): 2 vertices, in the form [np.array([x0, y0]), np.array([x1, y1])]\n component_name (str): Alphanumeric component name\n\n Returns:\n bool: True is no obstacles\n \"\"\"\n # transform path to polygons\n paths_converted = []\n paths = self.design.components[component_name].qgeometry_table('path')\n for _, row in paths.iterrows():\n paths_converted.append(row['geometry'].buffer(\n row['width'] / 2, cap_style=CAP_STYLE.flat))\n # merge all the polygons\n polygons = self.design.components[component_name].qgeometry_list('poly')\n boundary = gpd.GeoSeries(cascaded_union(polygons + paths_converted))\n boundary_coords = list(boundary.geometry.exterior[0].coords)\n if any(\n intersecting(segment[0], segment[1], boundary_coords[i],\n boundary_coords[i + 1])\n for i in range(len(boundary_coords) - 1)):\n # At least 1 intersection with the actual component contour; do not proceed!\n return False\n # All clear, no intersections\n return True\n\n def unobstructed(self, segment: list) -> bool:\n \"\"\"Check that no component's bounding box in self.design intersects or\n overlaps a given segment.\n\n Args:\n segment (list): 2 vertices, in the form [np.array([x0, y0]), np.array([x1, y1])]\n\n Returns:\n bool: True is no obstacles\n \"\"\"\n\n # assumes rectangular bounding boxes\n for component in self.design.components:\n if component == self.name:\n continue\n xmin, ymin, xmax, ymax = self.design.components[\n component].qgeometry_bounds()\n # p, q, r, s are corner coordinates of each bounding box\n p, q, r, s = [\n np.array([xmin, ymin]),\n np.array([xmin, ymax]),\n np.array([xmax, ymin]),\n np.array([xmax, ymax])\n ]\n if any(\n intersecting(segment[0], segment[1], k, l)\n for k, l in [(p, q), (p, r), (r, s), (q, s)]):\n # At least 1 intersection with the component bounding box. Check the actual contour.\n if not self.unobstructed_close_up(segment, component):\n # At least 1 intersection with the actual component contour; do not proceed!\n return False\n # All clear, no intersections\n return True\n\n def connect_simple(self, start_pt: QRoutePoint,\n end_pt: QRoutePoint) -> np.ndarray:\n \"\"\"Try connecting start and end with single or 2-segment/S-shaped CPWs\n if possible.\n\n Args:\n start_pt (QRoutePoint): QRoutePoint of the start\n end_pt (QRoutePoint): QRoutePoint of the end\n\n Returns:\n List of vertices of a CPW going from start to end\n\n Raises:\n QiskitMetalDesignError: If the connect_simple() has failed.\n \"\"\"\n avoid_collision = self.parse_options().advanced.avoid_collision\n\n start_direction = start_pt.direction\n start = start_pt.position\n end_direction = end_pt.direction\n end = end_pt.position\n\n # end_direction originates strictly from endpoint + leadout (NOT intermediate stopping anchors)\n self.assign_direction_to_anchor(start_pt, end_pt)\n stop_direction = end_pt.direction\n\n if (start[0] == end[0]) or (start[1] == end[1]):\n # Matching x or y coordinates -> check if endpoints can be connected with a single segment\n if mao.dot(start_direction, end - start) >= 0:\n # Start direction and end - start for CPW must not be anti-aligned\n if (end_direction is None) or (mao.dot(end - start,\n end_direction) <= 0):\n # If leadout + end has been reached, the single segment CPW must not be aligned with its direction\n return np.empty((0, 2), float)\n else:\n # If the endpoints don't share a common x or y value:\n # designate them as 2 corners of an axis aligned rectangle\n # and check if both start and end directions are aligned with\n # the displacement vectors between start/end and\n # either of the 2 remaining corners (\"perfect alignment\").\n corner1 = np.array([start[0],\n end[1]]) # x coordinate matches with start\n corner2 = np.array([end[0],\n start[1]]) # x coordinate matches with end\n if avoid_collision:\n # Check for collisions at the outset to avoid repeat work\n startc1end = bool(\n self.unobstructed([start, corner1]) and\n self.unobstructed([corner1, end]))\n startc2end = bool(\n self.unobstructed([start, corner2]) and\n self.unobstructed([corner2, end]))\n else:\n startc1end = startc2end = True\n if (mao.dot(start_direction, corner1 - start) > 0) and startc1end:\n # corner1 is \"in front of\" the start_pt\n if (end_direction is None) or (mao.dot(end_direction,\n corner1 - end) >= 0):\n # corner1 is also \"in front of\" the end_pt\n return np.expand_dims(corner1, axis=0)\n elif (mao.dot(start_direction, corner2 - start) > 0) and startc2end:\n # corner2 is \"in front of\" the start_pt\n if (end_direction is None) or (mao.dot(end_direction,\n corner2 - end) >= 0):\n # corner2 is also \"in front of\" the end_pt\n return np.expand_dims(corner2, axis=0)\n # In notation below, corners 3 and 4 correspond to\n # the ends of the segment bisecting the longer rectangle formed by start and end\n # while the segment formed by corners 5 and 6 bisect the shorter rectangle\n if stop_direction[\n 0]: # \"Wide\" rectangle -> vertical middle segment is more natural\n corner3 = np.array([(start[0] + end[0]) / 2, start[1]])\n corner4 = np.array([(start[0] + end[0]) / 2, end[1]])\n corner5 = np.array([start[0], (start[1] + end[1]) / 2])\n corner6 = np.array([end[0], (start[1] + end[1]) / 2])\n else: # \"Tall\" rectangle -> horizontal middle segment is more natural\n corner3 = np.array([start[0], (start[1] + end[1]) / 2])\n corner4 = np.array([end[0], (start[1] + end[1]) / 2])\n corner5 = np.array([(start[0] + end[0]) / 2, start[1]])\n corner6 = np.array([(start[0] + end[0]) / 2, end[1]])\n if avoid_collision:\n startc3c4end = bool(\n self.unobstructed([start, corner3]) and\n self.unobstructed([corner3, corner4]) and\n self.unobstructed([corner4, end]))\n startc5c6end = bool(\n self.unobstructed([start, corner5]) and\n self.unobstructed([corner5, corner6]) and\n self.unobstructed([corner6, end]))\n else:\n startc3c4end = startc5c6end = True\n if (mao.dot(start_direction, stop_direction) < 0) and (mao.dot(\n start_direction, corner3 - start) > 0) and startc3c4end:\n if (end_direction is None) or (mao.dot(end_direction,\n corner4 - end) > 0):\n # Perfectly aligned S-shaped CPW\n return np.vstack((corner3, corner4))\n # Relax constraints and check if imperfect 2-segment or S-segment works,\n # where \"imperfect\" means 1 or more dot products of directions\n # between successive segments is 0; otherwise return an empty list\n if (mao.dot(start_direction, corner1 - start) >= 0) and startc1end:\n if (end_direction is None) or (mao.dot(end_direction,\n corner1 - end) >= 0):\n return np.expand_dims(corner1, axis=0)\n if (mao.dot(start_direction, corner2 - start) >= 0) and startc2end:\n if (end_direction is None) or (mao.dot(end_direction,\n corner2 - end) >= 0):\n return np.expand_dims(corner2, axis=0)\n if (mao.dot(start_direction, corner3 - start) >=\n 0) and startc3c4end:\n if (end_direction is None) or (mao.dot(end_direction,\n corner4 - end) >= 0):\n return np.vstack((corner3, corner4))\n if (mao.dot(start_direction, corner5 - start) >=\n 0) and startc5c6end:\n if (end_direction is None) or (mao.dot(end_direction,\n corner6 - end) >= 0):\n return np.vstack((corner5, corner6))\n raise QiskitMetalDesignError(\n \"connect_simple() has failed. This might be due to one of two reasons. \"\n f\"1. Either one of the start point {start} or the end point {end} \"\n \"provided are inside the bounding box of another QComponent. \"\n \"Please move the point, or setup a \\\"lead\\\" to exit the QComponent area. \"\n \"2. none of the 4 routing possibilities of this algorithm \"\n \"(^|_, ^^|, __|, _|^) can complete. Please use Pathfinder instead\")\n\n def free_manhattan_length_anchors(self):\n \"\"\"Computes the free-flight manhattan distance between start_pt and\n end_pt passing through all of the given anchor points.\n\n Returns:\n float: Total length connecting all points in order\n \"\"\"\n anchors = self.parse_options().anchors\n reference = [self.head.get_tip().position]\n reference.extend(list(anchors.values()))\n reference.append(self.tail.get_tip().position)\n\n length = 0\n for i in range(1, len(reference)):\n length += abs(reference[i][0] -\n reference[i - 1][0]) + abs(reference[i][1] -\n reference[i - 1][1])\n return length\n\n def trim_pts(self):\n \"\"\"Crops the sequence of points to concatenate.\n\n For example, if a segment between two anchors has no points,\n then the segment is eliminated (only anchor points will do).\n Modified directly the self.intermediate_pts, thus nothing is\n returned.\n \"\"\"\n if isinstance(self.intermediate_pts, Mapping):\n keys_to_delete = set()\n for key, value in self.intermediate_pts.items():\n if value is None:\n keys_to_delete.add(key)\n try:\n # value is a list\n if not value:\n keys_to_delete.add(key)\n except ValueError:\n # value is a numpy\n if not value.size:\n keys_to_delete.add(key)\n for key in keys_to_delete:\n del self.intermediate_pts[key]\n\n def make(self):\n \"\"\"Generates path from start pin to end pin.\"\"\"\n p = self.parse_options()\n anchors = p.anchors\n\n # Set the CPW pins and add the points/directions to the lead-in/out arrays\n self.set_pin(\"start\")\n self.set_pin(\"end\")\n\n # Align the lead-in/out to the input options set from the user\n start_point = self.set_lead(\"start\")\n end_point = self.set_lead(\"end\")\n\n self.intermediate_pts = OrderedDict()\n for arc_num, coord in anchors.items():\n arc_pts = self.connect_simple(self.get_tip(), QRoutePoint(coord))\n if arc_pts is None:\n self.intermediate_pts[arc_num] = [coord]\n else:\n self.intermediate_pts[arc_num] = np.concatenate(\n [arc_pts, [coord]], axis=0)\n arc_pts = self.connect_simple(self.get_tip(), end_point)\n if arc_pts is not None:\n self.intermediate_pts[len(anchors)] = np.array(arc_pts)\n\n # concatenate all points, transforming the dictionary into a single numpy array\n self.trim_pts()\n self.intermediate_pts = np.concatenate(list(\n self.intermediate_pts.values()),\n axis=0)\n\n # Make points into elements\n self.make_elements(self.get_points())\n" ]
[ [ "numpy.expand_dims", "numpy.empty", "numpy.concatenate", "numpy.array", "numpy.vstack" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
LeoTafti/darts
[ "210605fafb730de564e3d723ab3919ed94da42b9", "210605fafb730de564e3d723ab3919ed94da42b9", "210605fafb730de564e3d723ab3919ed94da42b9", "210605fafb730de564e3d723ab3919ed94da42b9" ]
[ "darts/tests/test_missing_values.py", "darts/tests/test_transformer.py", "darts/models/regression_model.py", "examples/M4_competition/evaluate_groe_R.py" ]
[ "import unittest\nimport pandas as pd\nimport numpy as np\n\nfrom ..timeseries import TimeSeries\nfrom ..utils.missing_values import auto_fillna, na_ratio\n\n\nclass MissingValuesTestCase(unittest.TestCase):\n\n time = pd.date_range('20130101', '20130130')\n lin = [float(i) for i in range(len(time))]\n cub = [float(i - 4) ** 2 for i in range(len(time))]\n series1: TimeSeries = TimeSeries.from_times_and_values(time, np.array([2.0] * len(time)))\n series2: TimeSeries = TimeSeries.from_times_and_values(time, np.array(lin))\n series3: TimeSeries = TimeSeries.from_times_and_values(time, np.array([10] * 10 + lin[-20:]))\n series4: TimeSeries = TimeSeries.from_times_and_values(time, np.array(lin[:20] + [19] * 10))\n series5: TimeSeries = TimeSeries.from_times_and_values(time, np.array(cub))\n series6: TimeSeries = TimeSeries.from_times_and_values(time, [0] * 2 + cub[2:-2] + [-1] * 2)\n\n def test_fill_constant(self):\n seriesA: TimeSeries = TimeSeries.from_times_and_values(\n self.time,\n np.array([np.nan] * 5 + [2.0] * 5 + [np.nan] * 5 + [2.0] * 10 + [np.nan] * 5)\n )\n\n # Check that no changes are made if there are no missing values\n self.assertEqual(self.series1, auto_fillna(self.series1))\n\n # Check that a constant function is filled to a constant function\n self.assertEqual(self.series1, auto_fillna(seriesA))\n\n def test_linear(self):\n seriesB: TimeSeries = TimeSeries.from_times_and_values(self.time,\n np.array(self.lin[:10] + [np.nan] * 10 + self.lin[-10:]))\n\n # Check for linear interpolation part\n self.assertEqual(self.series2, auto_fillna(seriesB))\n\n def test_bfill(self):\n seriesC: TimeSeries = TimeSeries.from_times_and_values(self.time,\n np.array([np.nan] * 10 + self.lin[-20:]))\n\n # Check that auto-backfill works properly\n self.assertEqual(self.series3, auto_fillna(seriesC))\n\n def test_ffil(self):\n seriesD: TimeSeries = TimeSeries.from_times_and_values(self.time,\n np.array(self.lin[:20] + [np.nan] * 10))\n\n self.assertEqual(self.series4, auto_fillna(seriesD))\n\n def test_fill_quad(self):\n seriesE: TimeSeries = TimeSeries.from_times_and_values(self.time,\n np.array(self.cub[:10] + [np.nan] * 10 + self.cub[-10:]))\n self.assertEqual(self.series5, round(auto_fillna(seriesE, method='quadratic'), 7))\n\n def test_multivariate_fill(self):\n seriesA: TimeSeries = TimeSeries.from_times_and_values(\n self.time,\n np.array([np.nan] * 5 + [2.0] * 5 + [np.nan] * 5 + [2.0] * 10 + [np.nan] * 5)\n )\n seriesB: TimeSeries = TimeSeries.from_times_and_values(self.time,\n np.array(self.lin[:10] + [np.nan] * 10 + self.lin[-10:]))\n self.assertEqual(self.series1.stack(self.series2), auto_fillna(seriesA.stack(seriesB)))\n\n def test__na_ratio(self):\n seriesF = TimeSeries.from_times_and_values(self.time, list(range(27)) + [np.nan] * 3)\n\n # univariate case\n self.assertEqual(na_ratio(seriesF), 0.1)\n\n # multivariate case\n self.assertEqual(na_ratio(seriesF.stack(seriesF)), 0.1)\n", "import unittest\nimport logging\n\nimport numpy as np\nfrom sklearn.preprocessing import MinMaxScaler, StandardScaler\n\nfrom ..preprocessing import ScalerWrapper\nfrom ..utils import timeseries_generation as tg\n\n\nclass TransformerTestCase(unittest.TestCase):\n __test__ = True\n series1 = tg.random_walk_timeseries(length=100) * 20 - 10.\n series2 = series1.stack(tg.random_walk_timeseries(length=100) * 20 - 100.)\n\n @classmethod\n def setUpClass(cls):\n logging.disable(logging.CRITICAL)\n\n def test_scaling(self):\n self.series3 = self.series1[:1]\n transformer1 = ScalerWrapper(MinMaxScaler(feature_range=(0, 2)))\n transformer2 = ScalerWrapper(StandardScaler())\n\n series1_tr1 = transformer1.fit_transform(self.series1)\n series1_tr2 = transformer2.fit_transform(self.series1)\n series3_tr2 = transformer2.transform(self.series3)\n\n transformer3 = ScalerWrapper(MinMaxScaler(feature_range=(0, 2)))\n transformer4 = ScalerWrapper(StandardScaler())\n\n series2_tr3 = transformer3.fit_transform(self.series2)\n series2_tr4 = transformer4.fit_transform(self.series2)\n\n # should comply with scaling constraints\n self.assertAlmostEqual(min(series1_tr1.values().flatten()), 0.)\n self.assertAlmostEqual(max(series1_tr1.values().flatten()), 2.)\n self.assertAlmostEqual(np.mean(series1_tr2.values().flatten()), 0.)\n self.assertAlmostEqual(np.std(series1_tr2.values().flatten()), 1.)\n\n self.assertAlmostEqual(min(series2_tr3.values().flatten()), 0.)\n self.assertAlmostEqual(max(series2_tr3.values().flatten()), 2.)\n self.assertAlmostEqual(np.mean(series2_tr4.values().flatten()), 0.)\n self.assertAlmostEqual(np.std(series2_tr4.values().flatten()), 1.)\n\n # test inverse transform\n series1_recovered = transformer2.inverse_transform(series1_tr2)\n series2_recovered = transformer3.inverse_transform(series2_tr3)\n series3_recovered = transformer2.inverse_transform(series3_tr2)\n np.testing.assert_almost_equal(series1_recovered.values().flatten(), self.series1.values().flatten())\n np.testing.assert_almost_equal(series2_recovered.values().flatten(), self.series2.values().flatten())\n self.assertEqual(series1_recovered.width, self.series1.width)\n self.assertEqual(series2_recovered.width, self.series2.width)\n self.assertEqual(series3_recovered, series1_recovered[:1])\n", "\"\"\"\nRegression Model Base Class\n---------------------------\n\nA regression model predicts values for a time series :math:`Y_t` as a function\nof :math:`N` \"features\" time series :math:`X^i_t`:\n\n.. math:: Y_t = f(X^1_t, ..., X^N_t),\n\nwhere :math:`t` denotes the time step. Here, the function :math:`f()` is not necessarily linear.\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\n\nfrom abc import ABC, abstractmethod\nfrom typing import List, Iterable, Union, Any\n\nfrom ..timeseries import TimeSeries\nfrom ..logging import raise_if_not, get_logger, raise_log\nfrom ..utils import _build_tqdm_iterator, _with_sanity_checks, _get_timestamp_at_point, _backtest_general_checks\n\nlogger = get_logger(__name__)\n\n\n# TODO: Extend this to a \"DynamicRegressiveModel\" class, which acts on List[List[TimeSeries]].\n# TODO: The first List[] would contain time-sliding lists of time series, letting the model\n# TODO: be able to learn how to change weights over time. When len() of outer List[] is 0 it's a particular case\nclass RegressionModel(ABC):\n @abstractmethod\n def __init__(self):\n \"\"\" Regression Model.\n\n This is the base class for all regression models.\n \"\"\"\n\n # Stores training date information:\n self.train_features: List[TimeSeries] = None\n self.train_target: TimeSeries = None\n\n # state\n self._fit_called = False\n\n @abstractmethod\n def fit(self, train_features: List[TimeSeries], train_target: TimeSeries) -> None:\n \"\"\" Fits/trains the model using the provided list of features time series and the target time series.\n\n Parameters\n ----------\n train_features\n A list of features time series, all of the same length as the target series\n train_target\n A target time series, of the same length as the features series\n \"\"\"\n\n raise_if_not(len(train_features) > 0, 'Need at least one feature series', logger)\n raise_if_not(all([s.has_same_time_as(train_target) for s in train_features]),\n 'All provided time series must have the same time index', logger)\n self.train_features = train_features\n self.train_target = train_target\n self._fit_called = True\n\n @abstractmethod\n def predict(self, features: List[TimeSeries]) -> TimeSeries:\n \"\"\" Predicts values of the target time series, given a list of features time series\n\n Parameters\n ----------\n features\n The list of features time series, of the same length\n\n Returns\n -------\n TimeSeries\n A series containing the predicted targets, of the same length as the features series\n \"\"\"\n\n if (not self._fit_called):\n raise_log(Exception('fit() must be called before predict()'), logger)\n\n length_ok = len(features) == len(self.train_features)\n dimensions_ok = all(features[i].width == self.train_features[i].width for i in range(len(features)))\n raise_if_not(length_ok and dimensions_ok,\n 'The number and dimensionalities of all given features must correspond to those used for'\n ' training.', logger)\n\n def _backtest_sanity_checks(self, *args: Any, **kwargs: Any) -> None:\n \"\"\"Sanity checks for the backtest function\n\n Parameters\n ----------\n args\n The args parameter(s) provided to the backtest function.\n kwargs\n The kwargs paramter(s) provided to the backtest function.\n\n Raises\n ------\n ValueError\n when a check on the parameter does not pass.\n \"\"\"\n\n # parse args\n feature_series = args[0]\n target_series = args[1]\n\n raise_if_not(all([s.has_same_time_as(target_series) for s in feature_series]), 'All provided time series must '\n 'have the same time index', logger)\n\n _backtest_general_checks(target_series, kwargs)\n\n def _backtest_model_specific_sanity_checks(self, *args: Any, **kwargs: Any) -> None:\n \"\"\"Method to be overriden in subclass for model specific sanity checks\"\"\"\n pass\n\n @_with_sanity_checks(\"_backtest_sanity_checks\", \"_backtest_model_specific_sanity_checks\")\n def backtest(self,\n feature_series: Iterable[TimeSeries],\n target_series: TimeSeries,\n start: Union[pd.Timestamp, float, int] = 0.7,\n forecast_horizon: int = 1,\n stride: int = 1,\n trim_to_series: bool = True,\n verbose=False) -> TimeSeries:\n \"\"\" A function for backtesting `RegressionModel`'s.\n\n This function computes the time series of historical predictions\n that would have been obtained, if the current model had been used to predict `target_series`\n using the `feature_series`, with a certain time horizon.\n\n To this end, it repeatedly builds a training set composed of both features and targets,\n from `feature_series` and `target_series`, respectively.\n It trains the current model on the training set, emits a (point) prediction for a fixed\n forecast horizon, and then moves the end of the training set forward by `stride`\n time steps. The resulting predictions are then returned.\n\n This always re-trains the models on the entire available history,\n corresponding an expending window strategy.\n\n Parameters\n ----------\n feature_series\n A list of time series representing the features for the regression model (independent variables)\n target_series\n The univariate target time series for the regression model (dependent variable)\n start\n The first prediction time, at which a prediction is computed for a future time\n forecast_horizon\n The forecast horizon for the point predictions\n stride\n The number of time steps (the unit being the frequency of `series`) between two consecutive predictions.\n trim_to_series\n Whether the predicted series has the end trimmed to match the end of the main series\n verbose\n Whether to print progress\n\n Returns\n -------\n TimeSeries\n A time series containing the forecast values when successively applying\n the current model with the specified forecast horizon.\n \"\"\"\n start = _get_timestamp_at_point(start, target_series)\n\n # build the prediction times in advance (to be able to use tqdm)\n if trim_to_series:\n last_pred_time = target_series.time_index()[-forecast_horizon - stride]\n else:\n last_pred_time = target_series.time_index()[-stride - 1]\n\n # build the prediction times in advance (to be able to use tqdm)\n pred_times = [start]\n while pred_times[-1] <= last_pred_time:\n pred_times.append(pred_times[-1] + target_series.freq() * stride)\n\n # what we'll return\n values = []\n times = []\n\n iterator = _build_tqdm_iterator(pred_times, verbose)\n\n for pred_time in iterator:\n # build train/val series\n train_features = [s.drop_after(pred_time) for s in feature_series]\n train_target = target_series.drop_after(pred_time)\n val_features = [s.slice_n_points_after(pred_time, forecast_horizon) for s in feature_series]\n\n self.fit(train_features, train_target)\n pred = self.predict(val_features)\n values.append(pred.values()[-1]) # store the N-th point\n times.append(pred.end_time()) # store the N-th timestamp\n\n return TimeSeries.from_times_and_values(pd.DatetimeIndex(times), np.array(values))\n\n def residuals(self) -> TimeSeries:\n \"\"\" Computes the time series of residuals of this model on the training time series\n\n The residuals are computed as\n\n .. math:: z_t := y_t - \\\\hat{y}_t,\n\n where :math:`y_t` is the actual target time series over the training set,\n and :math:`\\\\hat{y}_t` is the time series of predicted targets, over the training set.\n\n Returns\n -------\n TimeSeries\n The time series containing the residuals\n \"\"\"\n\n if (not self._fit_called):\n raise_log(Exception('fit() must be called before predict()'), logger)\n\n train_pred = self.predict(self.train_features)\n return self.train_target - train_pred\n", "\"\"\"Reproducing 6th place winning model from M4 competition\n Generalised Rolling Origin Evaluation (GROE)\n\"\"\"\n\nfrom darts import TimeSeries\nfrom darts.models import NaiveSeasonal\nfrom darts.models.forecasting_model import ForecastingModel\n\nimport pandas as pd\nimport numpy as np\nfrom tqdm import tqdm_notebook as tqdm\nimport pickle\n\nimport rpy2.robjects as robjects\nfrom rpy2.rinterface_lib.embedded import RRuntimeError\nfrom rpy2.robjects import pandas2ri\n\nfrom M4_metrics import owa_m4, mase_m4, smape_m4\n\n\nrstring = \"\"\"\n function(input, fh, fq){\n library(forecast)\n input <- ts(input, frequency=fq)\n SeasonalityTest <- function(input, ppy){\n #Used to determine whether a time series is seasonal\n tcrit <- 1.645\n if (length(input)<3*ppy){\n test_seasonal <- FALSE\n }else{\n xacf <- acf(input, plot = FALSE)$acf[-1, 1, 1]\n clim <- tcrit/sqrt(length(input)) * sqrt(cumsum(c(1, 2 * xacf^2)))\n test_seasonal <- ( abs(xacf[ppy]) > clim[ppy] )\n\n if (is.na(test_seasonal)==TRUE){ test_seasonal <- FALSE }\n }\n\n return(test_seasonal)\n }\n ppy <- frequency(input) ; ST <- F\n if (ppy>1){ ST <- SeasonalityTest(input,ppy) }\n if (ST==T){\n Dec <- decompose(input,type=\"multiplicative\")\n des_input <- input/Dec$seasonal\n SIout <- head(rep(Dec$seasonal[(length(Dec$seasonal)-ppy+1):length(Dec$seasonal)], fh), fh)\n }else{\n des_input <- input ; SIout <- rep(1, fh)\n }\n list(des_input, SIout)\n }\n\"\"\"\n\n# Test if a seasonality of period m exists, and extract it\ntest_seasonality = robjects.r(rstring)\n# Use example\n# des_input, seasonOut = test_seasonality(train.values(), len(test), m)\n\nrstring = \"\"\"\n function(input, fh, fq){\n library(forecTheta)\n input <- ts(input, frequency=fq)\n out_otm <- otm(input, fh, level=c(95,95))\n out_otm$mean\n }\n\"\"\"\n# OTM model that returns a forecast of horizon fh\nrOTM = robjects.r(rstring)\n# Use example\n# outOTM = rOTM(train.values(), len(test), m)\n# forecast_otm = TimeSeries.from_times_and_values(test.time_index(), outOTM)\n\nrstring = \"\"\"\n function(input, fh, fq){\n library(forecTheta)\n input <- ts(input, frequency=fq)\n out_dotm <- dotm(input, fh, level=c(95,95))\n out_dotm$mean\n }\n\"\"\"\n# DOTM model that returns a forecast of horizon fh\nrDOTM = robjects.r(rstring)\n# Use example\n# outDOTM = rDOTM(train.values(), len(test), m)\n# forecast_dotm = TimeSeries.from_times_and_values(test.time_index(), outDOTM)\n\nrstring = \"\"\"\n function(input, fq){\n library(forecast)\n input <- ts(input, frequency=fq)\n out_ets <- ets(input)\n paste0(out_ets$components[1:3], collapse='')\n }\n\"\"\"\n# Train an ETS model and return its parameters\ngetETScomponent = robjects.r(rstring)\n\nrstring = \"\"\"\n function(input, fh, fq, model){\n library(forecast)\n input <- ts(input, frequency=fq)\n out_ets <- forecast(ets(input, model=model),h=fh, level=0)\n out_ets$mean\n }\n\"\"\"\n# forecast an ETS model given its parameters\nrETS = robjects.r(rstring)\n# Use example\n# modelETS = getETScomponent(train.values(), m)\n# out_ets = rETS(train.values(), len(test), m, modelETS)\n# forecast_ets = TimeSeries.from_times_and_values(test.time_index(), out_ets)\n\nrstring = \"\"\"\n function(input, fq){\n library(forecast)\n input <- ts(input, frequency=fq)\n out_arima <- auto.arima(input)\n out_arima\n }\n\"\"\"\n# Train an AutoARIMA model\ngetARIMAcomponent = robjects.r(rstring)\n\nrstring = \"\"\"\n function(input, fh, fq, model){\n library(forecast)\n input <- ts(input, frequency=fq)\n out_arima <- forecast(Arima(y=input, model=model), h=fh, level=0)\n out_arima$mean\n }\n\"\"\"\n# forecast a given ARIMA model\nrARIMA = robjects.r(rstring)\n# Use example\n# arima_model = getARIMAcomponent(train.values(), m)\n# out_arima = rARIMA(train.values(), len(test), m, arima_model)\n# forecast_arima = TimeSeries.from_times_and_values(test.time_index(), out_arima)\n\nrstring = \"\"\"\n function(input, fh, fq){\n library(forecast)\n input <- ts(input, frequency=fq)\n SeasonalityTest <- function(input, ppy){\n #Used to determine whether a time series is seasonal\n tcrit <- 1.645\n if (length(input)<3*ppy){\n test_seasonal <- FALSE\n }else{\n xacf <- acf(input, plot = FALSE)$acf[-1, 1, 1]\n clim <- tcrit/sqrt(length(input)) * sqrt(cumsum(c(1, 2 * xacf^2)))\n test_seasonal <- ( abs(xacf[ppy]) > clim[ppy] )\n\n if (is.na(test_seasonal)==TRUE){ test_seasonal <- FALSE }\n }\n\n return(test_seasonal)\n }\n ppy <- frequency(input) ; ST <- F\n if (ppy>1){ ST <- SeasonalityTest(input,ppy) }\n if (ST==T){\n Dec <- decompose(input,type=\"multiplicative\")\n des_input <- input/Dec$seasonal\n SIout <- head(rep(Dec$seasonal[(length(Dec$seasonal)-ppy+1):length(Dec$seasonal)], fh), fh)\n }else{\n des_input <- input ; SIout <- rep(1, fh)\n }\n naive(des_input, h=fh)$mean*SIout\n }\n\"\"\"\n# Apply a naive model on deseasonalized time series, and reseason the forecast (naive2 model)\nrNaive2 = robjects.r(rstring)\n# Use example\n# out_naive2 = rNaive2(train.values(), len(test), m)\n# forecast_naive2 = TimeSeries.from_times_and_values(test.time_index(), out_naive2)\n\n\ndef groe_owa(ts: TimeSeries, model: ForecastingModel, fq: int, n1: int, m: int, p: int) -> float:\n \"\"\"\n Implementation of Generalized Rolling Origin Evaluation using OWA score.\n\n The concept is to cross-validate a model on a time series with rolling origin, using OWA score from M4 competition.\n\n Parameters\n -----------\n ts\n The time series object to use to cross-validate\n model\n The Darts model to evaluate\n fq\n Period of the seasonality of the time series\n n1\n First origin to use for the cross-validation\n m\n Stride used for rolling the origin\n p\n number of stride to operate\n Returns\n -------\n Float\n sum of OWA score for all different origins\n If there is an error with one of the origin, return 0.\n \"\"\"\n # todo: Implement generalized version from R\n n = len(ts)\n errors = []\n for i in range(p):\n # if origin is further than end timestamp, end function\n if n1 + i * m >= n:\n break\n ni = n1 + i * m\n npred = n - ni\n train = ts[:ni]\n test = ts[ni:]\n\n forecast_naive2 = rNaive2(train.values(), npred, fq)\n forecast_naive2 = TimeSeries.from_times_and_values(test.time_index(), forecast_naive2)\n try:\n error_ase_n2 = mase_m4(train, test, forecast_naive2)\n error_sape_n2 = smape_m4(test, forecast_naive2)\n except ValueError:\n errors.append(0)\n continue\n try:\n model.fit(train)\n forecast = model.predict(npred)\n except RRuntimeError:\n errors.append(0)\n continue\n try:\n error_ase = mase_m4(train, test, forecast)\n error_sape = smape_m4(test, forecast)\n OWA = 0.5 * (error_sape / error_sape_n2) + 0.5 * (error_ase / error_ase_n2)\n errors.append(np.sum(OWA))\n except ValueError:\n errors.append(0)\n errors = np.sum(errors)\n return errors\n\n\nclass RModel(ForecastingModel):\n \"\"\"\n Wrapper around R function that takes a time series and return a forecast\n \"\"\"\n def __init__(self, rmodel, m, **info):\n super().__init__()\n self.rmodel = rmodel\n self.m = m\n self.info = info\n self.values = None\n\n def fit(self, ts):\n super().fit(ts)\n self.values = ts.values()\n\n def predict(self, n):\n super().predict(n)\n out = self.rmodel(self.values, n, self.m, **self.info)\n return self._build_forecast_series(out)\n\n\ndef fallback(mase_all, smape_all, train, test, m):\n naive = NaiveSeasonal(K=m)\n naive.fit(train)\n forecast = naive.predict(len(test))\n mase_all.append(np.vstack([\n mase_m4(train, test, forecast, m=m),\n ]))\n smape_all.append(np.vstack([\n smape_m4(test, forecast),\n ]))\n\n\nif __name__ == \"__main__\":\n pandas2ri.activate()\n\n data_categories = ['Macro', 'Micro', 'Demographic', 'Industry', 'Finance', 'Other']\n data_freq = ['Yearly', 'Quarterly', 'Monthly', 'Weekly', 'Daily', 'Hourly']\n info_dataset = pd.read_csv('dataset/m4/M4-info.csv', delimiter=',').set_index('M4id')\n\n for freq in data_freq:\n ts_train = pickle.load(open(\"dataset/train_\" + freq + \".pkl\", \"rb\"))\n ts_test = pickle.load(open(\"dataset/test_\" + freq + \".pkl\", \"rb\"))\n mase_all = []\n smape_all = []\n m = info_dataset.Frequency[freq[0] + '1']\n for train, test in tqdm(zip(ts_train, ts_test)):\n model_arima = getARIMAcomponent(train.values(), m)\n model_ets = getETScomponent(train.values(), m)\n otm_model = RModel(rOTM, m)\n dotm_model = RModel(rDOTM, m)\n arima_model = RModel(rARIMA, m, model=model_arima)\n ets_model = RModel(rETS, m, model=model_ets)\n models = [otm_model, dotm_model, arima_model, ets_model]\n model_predictions = []\n for model in models:\n try:\n model.fit(train)\n model_predictions.append(model.predict(len(test)))\n except RRuntimeError:\n fallback(mase_all, smape_all, train, test, m)\n continue\n criterion = [\n groe_owa(train, otm_model, m, max(5, len(train) - len(test)),\n int(np.floor(len(test) / 6)), 6),\n groe_owa(train, dotm_model, m, max(5, len(train) - len(test)),\n int(np.floor(len(test) / 6)), 6),\n groe_owa(train, arima_model, m, max(5, len(train) - len(test)),\n int(np.floor(len(test) / 6)), 6),\n groe_owa(train, ets_model, m, max(5, len(train) - len(test)),\n int(np.floor(len(test) / 6)), 6)]\n\n if not np.all(np.array(criterion) > 0):\n fallback(mase_all, smape_all, train, test, m)\n continue\n\n Score = 1 / np.array(criterion)\n pesos = Score / Score.sum()\n\n groe_ensemble = 0\n for prediction, weight in zip(model_predictions, pesos):\n groe_ensemble = prediction * weight + groe_ensemble\n if (groe_ensemble.univariate_values() < 0).any():\n indices = test.time_index()[groe_ensemble.univariate_values() < 0]\n groe_ensemble = groe_ensemble.update(indices, np.zeros(len(indices)))\n\n mase_all.append(np.vstack([\n mase_m4(train, test, groe_ensemble, m=m),\n ]))\n smape_all.append(np.vstack([\n smape_m4(test, groe_ensemble),\n ]))\n print(\"MASE GROE: {:.3f}\".format(np.nanmean(np.stack(mase_all), axis=(0, 2))))\n print(\"sMAPE GROE: {:.3f}\".format(np.nanmean(np.stack(smape_all), axis=(0, 2))))\n print(\"OWA GROE: {:.3f}\".format(owa_m4(freq,\n np.nanmean(np.stack(smape_all), axis=(0, 2)),\n np.nanmean(np.stack(mase_all), axis=(0, 2)))))\n pickle.dump(mase_all, open(\"groeR_mase_\" + freq + \".pkl\", \"wb\"))\n pickle.dump(smape_all, open(\"groeR_smape_\" + freq + \".pkl\", \"wb\"))\n" ]
[ [ "numpy.array", "pandas.date_range" ], [ "sklearn.preprocessing.StandardScaler", "sklearn.preprocessing.MinMaxScaler" ], [ "numpy.array", "pandas.DatetimeIndex" ], [ "numpy.array", "pandas.read_csv", "numpy.sum", "numpy.stack" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
susan120433/distribution-is-all-you-need
[ "d91510d07384ade86f271e48b1784039f15e0d48" ]
[ "gaussian.py" ]
[ "\"\"\"\n Code by Tae-Hwan Hung(@graykode)\n https://en.wikipedia.org/wiki/Normal_distribution\n\"\"\"\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\ndef gaussian(x, n):\n u = x.mean()\n s = x.std()\n\n # divide [x.min(), x.max()] by n\n x = np.linspace(x.min(), x.max(), n)\n\n a = ((x - u) ** 2) / (2 * (s ** 2))\n y = 1 / (s * np.sqrt(2 * np.pi)) * np.exp(-a)\n\n return x, y, x.mean(), x.std()\n\nx = np.arange(-100, 100) # define range of x\nx, y, u, s = gaussian(x, 10000)\n\nplt.plot(x, y, label=r'$\\mu=%.2f,\\ \\sigma=%.2f$' % (u, s))\nplt.legend()\nplt.savefig('graph/gaussian.png')\nplt.show()\n" ]
[ [ "matplotlib.pyplot.legend", "numpy.sqrt", "numpy.arange", "matplotlib.pyplot.savefig", "matplotlib.pyplot.plot", "numpy.exp", "matplotlib.pyplot.show" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
VTXNN/E2E
[ "b877d1bf7fdb94f885804ac42f41cc9ecc3102f5" ]
[ "Ops/test_ops.py" ]
[ "import tensorflow as tf\nimport os\nimport math\nimport time\nimport numpy\nimport unittest\n\ntf.compat.v1.disable_eager_execution()\n\nfrom sklearn.neighbors import KernelDensity\n\nfrom kde_histogram import KDEHistogram\nfrom histogram_max import *\n\n\nsession = None\ndef get_session():\n global session\n if not session:\n session = tf.compat.v1.Session()\n return session\n\n\ndef makeModel(nelem,nbins,start,end,kernel,bandwidth):\n class TestModel():\n def __init__(self):\n self.nelem = nelem\n self.nbins = nbins\n self.start = start\n self.end = end\n self.kernel = kernel\n self.bandwidth = bandwidth\n\n self.values = tf.keras.layers.Input(shape=(self.nelem,))\n self.weights = tf.keras.layers.Input(shape=(self.nelem,))\n self.factors = tf.keras.layers.Input(shape=(self.nbins,))\n\n self.hist = KDEHistogram(\n nbins=self.nbins,\n start=self.start,\n end=self.end,\n kernel=\"flat\",\n bandwidth_hist=self.bandwidth,\n bandwidth_grad=self.bandwidth,\n add_overflow = False\n )([self.values,self.weights])\n\n self.model = tf.keras.Model(inputs=[self.values,self.weights],outputs=[self.hist])\n\n score = tf.keras.layers.Lambda(lambda x: tf.multiply(x[0],x[1]))([self.hist,self.factors])\n self.score = tf.keras.layers.Lambda(lambda x: tf.reduce_sum(x))(score)\n\n self.model = tf.keras.Model(inputs=[self.values,self.weights],outputs=[self.hist])\n self.model.compile(loss='mse', optimizer='sgd') #dummy\n\n self.gradients = tf.gradients(self.score,[self.values,self.weights])\n \n def getHist(self,valuesArray,weightsArray):\n return self.model.predict_on_batch([valuesArray,weightsArray])\n \n def getScore(self,valuesArray,weightsArray,factorsArray):\n sess = get_session()\n scoreArray = sess.run(self.score, feed_dict = {\n self.values: valuesArray,\n self.weights: weightsArray,\n self.factors: factorsArray\n })\n return scoreArray\n def getGrad(self,valuesArray,weightsArray,factorsArray):\n sess = get_session()\n gradientsList = sess.run(self.gradients, feed_dict = {\n self.values: valuesArray,\n self.weights: weightsArray,\n self.factors: factorsArray\n })\n return gradientsList\n \n return TestModel()\n\nclass KDETest(unittest.TestCase):\n\n def testHist(self):\n for nelem in [1,23]:\n for nbins in [1,2,17]:\n for start in [-10,0,3]:\n for d in [1,11]:\n #nelem = 10\n #nbins = 2 \n #start = -10\n end = start+d\n kernel='flat'\n bandwidth = 1e-12\n \n testModel = makeModel(nelem,nbins,start,end,kernel,bandwidth)\n\n for i in range(0,5):\n valuesArray = numpy.zeros((1,nelem))\n weightsArray = numpy.zeros((1,nelem))\n factorsArray = numpy.zeros((1,nbins))\n \n for j in range(nelem):\n valuesArray[0,j] = i*j+j*0.2-i*0.3+i*i+0.01\n weightsArray[0,j] = i*i-10*j+i*j*j-0.25*i-2\n for j in range(nbins):\n factorsArray[0,j] = i*i*j-j*0.5+i*i*0.07-3\n \n histArray = testModel.getHist(valuesArray,weightsArray)[0]\n histArrayRef = numpy.histogram(\n valuesArray[0,:], \n bins=nbins, \n range=(start,end),\n weights=weightsArray[0,:]\n )\n for j in range(nbins):\n self.assertEqual(histArray[j],histArrayRef[0][j])\n \n \n def testGrad(self):\n for nelem in [1,11]:\n for nbins in [1,17]:\n for start in [-10,0,3]:\n for d in [1,11]:\n for bandwidth in [1e-12,0.1,2]:\n #nelem = 10\n #nbins = 2 \n #start = -10\n end = start+d\n kernel='flat'\n \n testModel = makeModel(nelem,nbins,start,end,kernel,bandwidth)\n \n sess = get_session()\n\n for i in range(3):\n valuesArray = numpy.zeros((1,nelem))\n weightsArray = numpy.zeros((1,nelem))\n factorsArray = numpy.zeros((1,nbins))\n \n for j in range(nelem):\n valuesArray[0,j] = i*j+j*0.2-i*0.3+i*i+0.01\n weightsArray[0,j] = i*i-10*j+i*j*j-0.25*i-2\n for j in range(nbins):\n factorsArray[0,j] = i*i*j-j*0.5+i*i*0.07-3\n \n gradientsList = testModel.getGrad(\n valuesArray,\n weightsArray,\n factorsArray\n )\n \n \n for j in range(nelem):\n hV = 1e-2*(end-start)/nbins\n hW = math.fabs(weightsArray[0,j]*1e-2)+1e-6\n diff = numpy.zeros(valuesArray.shape)\n diff[0,j]=1.\n scoreValueDiff = (testModel.getScore(\n valuesArray+diff*hV,\n weightsArray,\n factorsArray\n ) - testModel.getScore(\n valuesArray-diff*hV,\n weightsArray,\n factorsArray\n ))/(2*hV)\n scoreWeightDiff = (testModel.getScore(\n valuesArray,\n weightsArray+diff*hW,\n factorsArray\n ) - testModel.getScore(\n valuesArray,\n weightsArray-diff*hW,\n factorsArray\n ))/(2*hW)\n '''\n if bandwidth>hV:\n print (\n j,\n gradientsList[0][0,j],\n scoreValueDiff,\n gradientsList[0][0,j]-scoreValueDiff,\n hV\n )\n \n self.assertTrue(\n math.fabs(gradientsList[0][0,j]-scoreValueDiff)<(20*hV)\n )\n '''\n self.assertTrue(\n math.fabs(gradientsList[1][0,j]-scoreWeightDiff)<(2*hW)\n )\n \n \nclass HistogramMaxSampleTest(unittest.TestCase):\n def testHistSingle(self):\n sess = get_session()\n for n in range(2,200,10):\n hists = tf.compat.v1.placeholder(tf.float32, shape=(1, n,1))\n histMax = histogram_max_sample_module.histogram_max_sample(hists)\n for i in range(hists.shape[1]):\n val = numpy.zeros(hists.shape)\n val[0,i,0] = 1\n self.assertEqual(sess.run(histMax,feed_dict={\n hists:val\n })[0,0],i)\n \n \n def testHistSample(self):\n sess = get_session()\n hists = tf.compat.v1.placeholder(tf.float32, shape=(100, 200,1))\n histMax = histogram_max_sample_module.histogram_max_sample(hists)\n \n val = numpy.zeros(hists.shape)\n \n for b in range(hists.shape[0]):\n for n in range(5):\n i = int(numpy.random.uniform(0,int(hists.shape[1])))\n val[b,i,0] = numpy.random.uniform(0.1,0.9)\n val/=numpy.sum(val,axis=1,keepdims=True)\n \n \n result = numpy.zeros(hists.shape)\n \n for t in range(10000):\n sampled = sess.run(histMax,feed_dict={hists:val})\n for b in range(hists.shape[0]):\n result[b,int(sampled[b,0]),0] += 1.\n \n result/=numpy.sum(result,axis=1,keepdims=True)\n \n p = 0\n f = 0 \n for b in range(hists.shape[0]):\n for i in range(hists.shape[1]):\n if val[b,i,0]>0.01:\n if math.fabs(val[b,i,0]-result[b,i,0])/val[b,i,0]<0.1:\n p += 1\n else:\n f += 1\n #require >90% to pass\n self.assertTrue(f<0.1*p)\n \n \n\nif __name__ == '__main__':\n test_suite = unittest.TestSuite()\n test_suite.addTest(KDETest('testHist'))\n test_suite.addTest(KDETest('testGrad'))\n test_suite.addTest(HistogramMaxSampleTest('testHistSingle'))\n test_suite.addTest(HistogramMaxSampleTest('testHistSample'))\n unittest.runner.TextTestRunner(verbosity=2).run(test_suite)\n \n \n" ]
[ [ "tensorflow.multiply", "numpy.histogram", "tensorflow.reduce_sum", "tensorflow.gradients", "tensorflow.keras.Model", "tensorflow.compat.v1.Session", "tensorflow.compat.v1.placeholder", "tensorflow.compat.v1.disable_eager_execution", "numpy.random.uniform", "numpy.zeros", "numpy.sum", "tensorflow.keras.layers.Input" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "2.2", "2.3", "2.4", "2.5", "2.6" ] } ]
wenh06/colour
[ "445fdad2711ae39c95b4375166905568d24a95f4", "445fdad2711ae39c95b4375166905568d24a95f4" ]
[ "colour/plotting/characterisation.py", "colour/colorimetry/generation.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nCharacterisation Plotting\n=========================\n\nDefines the characterisation plotting objects:\n\n- :func:`colour.plotting.plot_single_colour_checker`\n- :func:`colour.plotting.plot_multi_colour_checkers`\n\"\"\"\n\nfrom __future__ import division\n\nimport numpy as np\n\nfrom colour.models import xyY_to_XYZ\nfrom colour.plotting import (\n CONSTANTS_COLOUR_STYLE, ColourSwatch, XYZ_to_plotting_colourspace, artist,\n filter_colour_checkers, plot_multi_colour_swatches, override_style, render)\n\n__author__ = 'Colour Developers'\n__copyright__ = 'Copyright (C) 2013-2020 - Colour Developers'\n__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'\n__maintainer__ = 'Colour Developers'\n__email__ = '[email protected]'\n__status__ = 'Production'\n\n__all__ = ['plot_single_colour_checker', 'plot_multi_colour_checkers']\n\n\n@override_style(\n **{\n 'axes.grid': False,\n 'xtick.bottom': False,\n 'ytick.left': False,\n 'xtick.labelbottom': False,\n 'ytick.labelleft': False,\n })\ndef plot_single_colour_checker(\n colour_checker='ColorChecker24 - After November 2014', **kwargs):\n \"\"\"\n Plots given colour checker.\n\n Parameters\n ----------\n colour_checker : unicode or ColourChecker, optional\n Color checker to plot. ``colour_checker`` can be of any type or form\n supported by the\n :func:`colour.plotting.filter_colour_checkers` definition.\n\n Other Parameters\n ----------------\n \\\\**kwargs : dict, optional\n {:func:`colour.plotting.artist`,\n :func:`colour.plotting.plot_multi_colour_swatches`,\n :func:`colour.plotting.render`},\n Please refer to the documentation of the previously listed definitions.\n\n Returns\n -------\n tuple\n Current figure and axes.\n\n Examples\n --------\n >>> plot_single_colour_checker('ColorChecker 2005') # doctest: +ELLIPSIS\n (<Figure size ... with 1 Axes>, <...AxesSubplot...>)\n\n .. image:: ../_static/Plotting_Plot_Single_Colour_Checker.png\n :align: center\n :alt: plot_single_colour_checker\n \"\"\"\n\n return plot_multi_colour_checkers([colour_checker], **kwargs)\n\n\n@override_style(\n **{\n 'axes.grid': False,\n 'xtick.bottom': False,\n 'ytick.left': False,\n 'xtick.labelbottom': False,\n 'ytick.labelleft': False,\n })\ndef plot_multi_colour_checkers(colour_checkers, **kwargs):\n \"\"\"\n Plots and compares given colour checkers.\n\n Parameters\n ----------\n colour_checkers : unicode or ColourChecker or array_like\n Color checker to plot, count must be less than or equal to 2.\n ``colour_checkers`` elements can be of any type or form supported by\n the :func:`colour.plotting.filter_colour_checkers` definition.\n\n Other Parameters\n ----------------\n \\\\**kwargs : dict, optional\n {:func:`colour.plotting.artist`,\n :func:`colour.plotting.plot_multi_colour_swatches`,\n :func:`colour.plotting.render`},\n Please refer to the documentation of the previously listed definitions.\n\n Returns\n -------\n tuple\n Current figure and axes.\n\n Examples\n --------\n >>> plot_multi_colour_checkers(['ColorChecker 1976', 'ColorChecker 2005'])\n ... # doctest: +ELLIPSIS\n (<Figure size ... with 1 Axes>, <...AxesSubplot...>)\n\n .. image:: ../_static/Plotting_Plot_Multi_Colour_Checkers.png\n :align: center\n :alt: plot_multi_colour_checkers\n \"\"\"\n\n assert len(colour_checkers) <= 2, (\n 'Only two colour checkers can be compared at a time!')\n\n colour_checkers = filter_colour_checkers(colour_checkers).values()\n\n _figure, axes = artist(**kwargs)\n\n compare_swatches = len(colour_checkers) == 2\n\n colour_swatches = []\n colour_checker_names = []\n for colour_checker in colour_checkers:\n colour_checker_names.append(colour_checker.name)\n for label, xyY in colour_checker.data.items():\n XYZ = xyY_to_XYZ(xyY)\n RGB = XYZ_to_plotting_colourspace(XYZ, colour_checker.illuminant)\n colour_swatches.append(\n ColourSwatch(label.title(), np.clip(np.ravel(RGB), 0, 1)))\n\n if compare_swatches:\n colour_swatches = [\n swatch\n for pairs in zip(colour_swatches[0:len(colour_swatches) // 2],\n colour_swatches[len(colour_swatches) // 2:])\n for swatch in pairs\n ]\n\n background_colour = '0.1'\n width = height = 1.0\n spacing = 0.25\n columns = 6\n\n settings = {\n 'axes': axes,\n 'width': width,\n 'height': height,\n 'spacing': spacing,\n 'columns': columns,\n 'direction': '-y',\n 'text_kwargs': {\n 'size': 8\n },\n 'background_colour': background_colour,\n 'compare_swatches': 'Stacked' if compare_swatches else None,\n }\n settings.update(kwargs)\n settings['standalone'] = False\n\n plot_multi_colour_swatches(colour_swatches, **settings)\n\n axes.text(\n 0.5,\n 0.005,\n '{0} - {1} - Colour Rendition Chart'.format(\n ', '.join(colour_checker_names),\n CONSTANTS_COLOUR_STYLE.colour.colourspace.name),\n transform=axes.transAxes,\n color=CONSTANTS_COLOUR_STYLE.colour.bright,\n ha='center',\n va='bottom')\n\n settings.update({\n 'axes': axes,\n 'standalone': True,\n 'title': ', '.join(colour_checker_names),\n })\n\n return render(**settings)\n", "# -*- coding: utf-8 -*-\n\"\"\"\nSpectral Generation\n===================\n\nDefines various objects performing spectral generation:\n\n- :func:`colour.sd_constant`\n- :func:`colour.sd_zeros`\n- :func:`colour.sd_ones`\n- :func:`colour.msds_constant`\n- :func:`colour.msds_zeros`\n- :func:`colour.msds_ones`\n- :func:`colour.colorimetry.sd_gaussian_normal`\n- :func:`colour.colorimetry.sd_gaussian_fwhm`\n- :attr:`colour.SD_GAUSSIAN_METHODS`\n- :func:`colour.sd_gaussian`\n- :func:`colour.colorimetry.sd_single_led_Ohno2005`\n- :attr:`colour.SD_SINGLE_LED_METHODS`\n- :func:`colour.sd_single_led`\n- :func:`colour.colorimetry.sd_multi_leds_Ohno2005`\n- :attr:`colour.SD_MULTI_LEDS_METHODS`\n- :func:`colour.sd_multi_leds`\n\nReferences\n----------\n- :cite:`Ohno2005` : Ohno, Yoshi. (2005). Spectral design considerations for\n white LED color rendering. Optical Engineering, 44(11), 111302.\n doi:10.1117/1.2130694\n- :cite:`Ohno2008a` : Ohno, Yoshiro, & Davis, W. (2008). NIST CQS simulation\n (Version 7.4) [Computer software].\n https://drive.google.com/file/d/1PsuU6QjUJjCX6tQyCud6ul2Tbs8rYWW9/view?\\\nusp=sharing\n\"\"\"\n\nfrom __future__ import division, unicode_literals\n\nimport numpy as np\n\nfrom colour.constants import DEFAULT_FLOAT_DTYPE\nfrom colour.colorimetry import (\n SPECTRAL_SHAPE_DEFAULT, MultiSpectralDistributions, SpectralDistribution)\nfrom colour.utilities import CaseInsensitiveMapping, as_float_array, full, ones\n\n__author__ = 'Colour Developers'\n__copyright__ = 'Copyright (C) 2013-2020 - Colour Developers'\n__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'\n__maintainer__ = 'Colour Developers'\n__email__ = '[email protected]'\n__status__ = 'Production'\n\n__all__ = [\n 'sd_constant', 'sd_zeros', 'sd_ones', 'msds_constant', 'msds_zeros',\n 'msds_ones', 'sd_gaussian_normal', 'sd_gaussian_fwhm',\n 'SD_GAUSSIAN_METHODS', 'sd_gaussian', 'sd_single_led_Ohno2005',\n 'SD_SINGLE_LED_METHODS', 'sd_single_led', 'sd_multi_leds_Ohno2005',\n 'SD_MULTI_LEDS_METHODS', 'sd_multi_leds'\n]\n\n\ndef sd_constant(k, shape=SPECTRAL_SHAPE_DEFAULT, dtype=None):\n \"\"\"\n Returns a spectral distribution of given spectral shape filled with\n constant :math:`k` values.\n\n Parameters\n ----------\n k : numeric\n Constant :math:`k` to fill the spectral distribution with.\n shape : SpectralShape, optional\n Spectral shape used to create the spectral distribution.\n dtype : type\n Data type used for the spectral distribution.\n\n Returns\n -------\n SpectralDistribution\n Constant :math:`k` filled spectral distribution.\n\n Notes\n -----\n - By default, the spectral distribution will use the shape given by\n :attr:`colour.SPECTRAL_SHAPE_DEFAULT` attribute.\n\n Examples\n --------\n >>> sd = sd_constant(100)\n >>> sd.shape\n SpectralShape(360.0, 780.0, 1.0)\n >>> sd[400]\n 100.0\n \"\"\"\n\n if dtype is None:\n dtype = DEFAULT_FLOAT_DTYPE\n\n wavelengths = shape.range(dtype)\n values = full(len(wavelengths), k, dtype)\n\n name = '{0} Constant'.format(k)\n return SpectralDistribution(values, wavelengths, name=name, dtype=dtype)\n\n\ndef sd_zeros(shape=SPECTRAL_SHAPE_DEFAULT):\n \"\"\"\n Returns a spectral distribution of given spectral shape filled with zeros.\n\n Parameters\n ----------\n shape : SpectralShape, optional\n Spectral shape used to create the spectral distribution.\n\n Returns\n -------\n SpectralDistribution\n Zeros filled spectral distribution.\n\n Notes\n -----\n - By default, the spectral distribution will use the shape given by\n :attr:`colour.SPECTRAL_SHAPE_DEFAULT` attribute.\n\n Examples\n --------\n >>> sd = sd_zeros()\n >>> sd.shape\n SpectralShape(360.0, 780.0, 1.0)\n >>> sd[400]\n 0.0\n \"\"\"\n\n return sd_constant(0, shape)\n\n\ndef sd_ones(shape=SPECTRAL_SHAPE_DEFAULT):\n \"\"\"\n Returns a spectral distribution of given spectral shape filled with ones.\n\n Parameters\n ----------\n shape : SpectralShape, optional\n Spectral shape used to create the spectral distribution.\n\n Returns\n -------\n SpectralDistribution\n Ones filled spectral distribution.\n\n Notes\n -----\n - By default, the spectral distribution will use the shape given by\n :attr:`colour.SPECTRAL_SHAPE_DEFAULT` attribute.\n\n Examples\n --------\n >>> sd = sd_ones()\n >>> sd.shape\n SpectralShape(360.0, 780.0, 1.0)\n >>> sd[400]\n 1.0\n \"\"\"\n\n return sd_constant(1, shape)\n\n\ndef msds_constant(k, labels, shape=SPECTRAL_SHAPE_DEFAULT, dtype=None):\n \"\"\"\n Returns the multi-spectral distributions with given labels and given\n spectral shape filled with constant :math:`k` values.\n\n Parameters\n ----------\n k : numeric\n Constant :math:`k` to fill the multi-spectral distributions with.\n labels : array_like\n Names to use for the :class:`colour.SpectralDistribution` class\n instances.\n shape : SpectralShape, optional\n Spectral shape used to create the multi-spectral distributions.\n dtype : type\n Data type used for the multi-spectral distributions.\n\n Returns\n -------\n MultiSpectralDistributions\n Constant :math:`k` filled multi-spectral distributions.\n\n Notes\n -----\n - By default, the multi-spectral distributions will use the shape given\n by :attr:`colour.SPECTRAL_SHAPE_DEFAULT` attribute.\n\n Examples\n --------\n >>> msds = msds_constant(100, labels=['a', 'b', 'c'])\n >>> msds.shape\n SpectralShape(360.0, 780.0, 1.0)\n >>> msds[400]\n array([ 100., 100., 100.])\n >>> msds.labels # doctest: +SKIP\n ['a', 'b', 'c']\n \"\"\"\n\n if dtype is None:\n dtype = DEFAULT_FLOAT_DTYPE\n\n wavelengths = shape.range(dtype)\n values = full([len(wavelengths), len(labels)], k, dtype)\n\n name = '{0} Constant'.format(k)\n return MultiSpectralDistributions(\n values, wavelengths, name=name, labels=labels, dtype=dtype)\n\n\ndef msds_zeros(labels, shape=SPECTRAL_SHAPE_DEFAULT):\n \"\"\"\n Returns the multi-spectral distributionss with given labels and given\n spectral shape filled with zeros.\n\n Parameters\n ----------\n labels : array_like\n Names to use for the :class:`colour.SpectralDistribution` class\n instances.\n shape : SpectralShape, optional\n Spectral shape used to create the multi-spectral distributions.\n\n Returns\n -------\n MultiSpectralDistributions\n Zeros filled multi-spectral distributions.\n\n Notes\n -----\n - By default, the multi-spectral distributions will use the shape given\n by :attr:`colour.SPECTRAL_SHAPE_DEFAULT` attribute.\n\n Examples\n --------\n >>> msds = msds_zeros(labels=['a', 'b', 'c'])\n >>> msds.shape\n SpectralShape(360.0, 780.0, 1.0)\n >>> msds[400]\n array([ 0., 0., 0.])\n >>> msds.labels # doctest: +SKIP\n ['a', 'b', 'c']\n \"\"\"\n\n return msds_constant(0, labels, shape)\n\n\ndef msds_ones(labels, shape=SPECTRAL_SHAPE_DEFAULT):\n \"\"\"\n Returns the multi-spectral distributionss with given labels and given\n spectral shape filled with ones.\n\n Parameters\n ----------\n labels : array_like\n Names to use for the :class:`colour.SpectralDistribution` class\n instances.\n shape : SpectralShape, optional\n Spectral shape used to create the multi-spectral distributions.\n\n Returns\n -------\n MultiSpectralDistributions\n Ones filled multi-spectral distributions.\n\n Notes\n -----\n - By default, the multi-spectral distributions will use the shape given\n by :attr:`colour.SPECTRAL_SHAPE_DEFAULT` attribute.\n\n Examples\n --------\n >>> msds = msds_ones(labels=['a', 'b', 'c'])\n >>> msds.shape\n SpectralShape(360.0, 780.0, 1.0)\n >>> msds[400]\n array([ 1., 1., 1.])\n >>> msds.labels # doctest: +SKIP\n ['a', 'b', 'c']\n \"\"\"\n\n return msds_constant(1, labels, shape)\n\n\ndef sd_gaussian_normal(mu, sigma, shape=SPECTRAL_SHAPE_DEFAULT):\n \"\"\"\n Returns a gaussian spectral distribution of given spectral shape at\n given mean wavelength :math:`\\\\mu` and standard deviation :math:`sigma`.\n\n Parameters\n ----------\n mu : numeric\n Mean wavelength :math:`\\\\mu` the gaussian spectral distribution will\n peak at.\n sigma : numeric\n Standard deviation :math:`sigma` of the gaussian spectral distribution.\n shape : SpectralShape, optional\n Spectral shape used to create the spectral distribution.\n\n Returns\n -------\n SpectralDistribution\n Gaussian spectral distribution.\n\n Notes\n -----\n - By default, the spectral distribution will use the shape given by\n :attr:`colour.SPECTRAL_SHAPE_DEFAULT` attribute.\n\n Examples\n --------\n >>> sd = sd_gaussian_normal(555, 25)\n >>> sd.shape\n SpectralShape(360.0, 780.0, 1.0)\n >>> sd[555] # doctest: +ELLIPSIS\n 1.0000000...\n >>> sd[530] # doctest: +ELLIPSIS\n 0.6065306...\n \"\"\"\n\n wavelengths = shape.range()\n\n values = np.exp(-(wavelengths - mu) ** 2 / (2 * sigma ** 2.))\n\n name = '{0}nm - {1} Sigma - Gaussian'.format(mu, sigma)\n\n return SpectralDistribution(values, wavelengths, name=name)\n\n\ndef sd_gaussian_fwhm(peak_wavelength, fwhm, shape=SPECTRAL_SHAPE_DEFAULT):\n \"\"\"\n Returns a gaussian spectral distribution of given spectral shape at given\n peak wavelength and full width at half maximum.\n\n Parameters\n ----------\n peak_wavelength : numeric\n Wavelength the gaussian spectral distribution will peak at.\n fwhm : numeric\n Full width at half maximum, i.e. width of the gaussian spectral\n distribution measured between those points on the *y* axis which are\n half the maximum amplitude.\n shape : SpectralShape, optional\n Spectral shape used to create the spectral distribution.\n\n Returns\n -------\n SpectralDistribution\n Gaussian spectral distribution.\n\n Notes\n -----\n - By default, the spectral distribution will use the shape given by\n :attr:`colour.SPECTRAL_SHAPE_DEFAULT` attribute.\n\n Examples\n --------\n >>> sd = sd_gaussian_fwhm(555, 25)\n >>> sd.shape\n SpectralShape(360.0, 780.0, 1.0)\n >>> sd[555]\n 1.0\n >>> sd[530] # doctest: +ELLIPSIS\n 0.3678794...\n \"\"\"\n\n wavelengths = shape.range()\n\n values = np.exp(-((wavelengths - peak_wavelength) / fwhm) ** 2)\n\n name = '{0}nm - {1} FWHM - Gaussian'.format(peak_wavelength, fwhm)\n\n return SpectralDistribution(values, wavelengths, name=name)\n\n\nSD_GAUSSIAN_METHODS = CaseInsensitiveMapping({\n 'Normal': sd_gaussian_normal,\n 'FWHM': sd_gaussian_fwhm\n})\nSD_GAUSSIAN_METHODS.__doc__ = \"\"\"\nSupported gaussian spectral distribution computation methods.\n\nSD_GAUSSIAN_METHODS : CaseInsensitiveMapping\n **{'Normal', 'FWHM'}**\n\"\"\"\n\n\ndef sd_gaussian(mu_peak_wavelength,\n sigma_fwhm,\n shape=SPECTRAL_SHAPE_DEFAULT,\n method='Normal'):\n \"\"\"\n Returns a gaussian spectral distribution of given spectral shape using\n given method.\n\n Parameters\n ----------\n mu_peak_wavelength : numeric\n Mean wavelength :math:`\\\\mu` the gaussian spectral distribution will\n peak at.\n sigma_fwhm : numeric\n Standard deviation :math:`sigma` of the gaussian spectral distribution\n or Full width at half maximum, i.e. width of the gaussian spectral\n distribution measured between those points on the *y* axis which are\n half the maximum amplitude.\n shape : SpectralShape, optional\n Spectral shape used to create the spectral distribution.\n method : unicode, optional\n **{'Normal', 'FWHM'}**,\n Computation method.\n\n Returns\n -------\n SpectralDistribution\n Gaussian spectral distribution.\n\n Notes\n -----\n - By default, the spectral distribution will use the shape given by\n :attr:`colour.SPECTRAL_SHAPE_DEFAULT` attribute.\n\n Examples\n --------\n >>> sd = sd_gaussian(555, 25)\n >>> sd.shape\n SpectralShape(360.0, 780.0, 1.0)\n >>> sd[555] # doctest: +ELLIPSIS\n 1.0000000...\n >>> sd[530] # doctest: +ELLIPSIS\n 0.6065306...\n >>> sd = sd_gaussian(555, 25, method='FWHM')\n >>> sd.shape\n SpectralShape(360.0, 780.0, 1.0)\n >>> sd[555]\n 1.0\n >>> sd[530] # doctest: +ELLIPSIS\n 0.3678794...\n \"\"\"\n\n return SD_GAUSSIAN_METHODS[method](mu_peak_wavelength, sigma_fwhm, shape)\n\n\ndef sd_single_led_Ohno2005(peak_wavelength, fwhm,\n shape=SPECTRAL_SHAPE_DEFAULT):\n \"\"\"\n Returns a single *LED* spectral distribution of given spectral shape at\n given peak wavelength and full width at half maximum according to\n *Ohno (2005)* method.\n\n Parameters\n ----------\n peak_wavelength : numeric\n Wavelength the single *LED* spectral distribution will peak at.\n fwhm : numeric\n Full width at half maximum, i.e. width of the underlying gaussian\n spectral distribution measured between those points on the *y* axis\n which are half the maximum amplitude.\n shape : SpectralShape, optional\n Spectral shape used to create the spectral distribution.\n\n Returns\n -------\n SpectralDistribution\n Single *LED* spectral distribution.\n\n Notes\n -----\n - By default, the spectral distribution will use the shape given by\n :attr:`colour.SPECTRAL_SHAPE_DEFAULT` attribute.\n\n References\n ----------\n :cite:`Ohno2005`, :cite:`Ohno2008a`\n\n Examples\n --------\n >>> sd = sd_single_led_Ohno2005(555, 25)\n >>> sd.shape\n SpectralShape(360.0, 780.0, 1.0)\n >>> sd[555] # doctest: +ELLIPSIS\n 1.0000000...\n \"\"\"\n\n sd = sd_gaussian_fwhm(peak_wavelength, fwhm, shape)\n\n sd.values = (sd.values + 2 * sd.values ** 5) / 3\n\n sd.name = '{0}nm - {1} FWHM LED - Ohno (2005)'.format(\n peak_wavelength, fwhm)\n\n return sd\n\n\nSD_SINGLE_LED_METHODS = CaseInsensitiveMapping({\n 'Ohno 2005': sd_single_led_Ohno2005,\n})\nSD_SINGLE_LED_METHODS.__doc__ = \"\"\"\nSupported single *LED* spectral distribution computation methods.\n\nSD_SINGLE_LED_METHODS : CaseInsensitiveMapping\n **{'Ohno 2005'}**\n\"\"\"\n\n\ndef sd_single_led(peak_wavelength,\n fwhm,\n shape=SPECTRAL_SHAPE_DEFAULT,\n method='Ohno 2005'):\n \"\"\"\n Returns a single *LED* spectral distribution of given spectral shape at\n given peak wavelength and full width at half maximum according to given\n method.\n\n Parameters\n ----------\n peak_wavelength : numeric\n Wavelength the single *LED* spectral distribution will peak at.\n fwhm : numeric\n Full width at half maximum, i.e. width of the underlying gaussian\n spectral distribution measured between those points on the *y*\n axis which are half the maximum amplitude.\n shape : SpectralShape, optional\n Spectral shape used to create the spectral distribution.\n method : unicode, optional\n **{'Ohno 2005'}**,\n Computation method.\n\n Returns\n -------\n SpectralDistribution\n Single *LED* spectral distribution.\n\n Notes\n -----\n - By default, the spectral distribution will use the shape given by\n :attr:`colour.SPECTRAL_SHAPE_DEFAULT` attribute.\n\n References\n ----------\n :cite:`Ohno2005`, :cite:`Ohno2008a`\n\n Examples\n --------\n >>> sd = sd_single_led(555, 25)\n >>> sd.shape\n SpectralShape(360.0, 780.0, 1.0)\n >>> sd[555] # doctest: +ELLIPSIS\n 1.0000000...\n \"\"\"\n\n return SD_SINGLE_LED_METHODS[method](peak_wavelength, fwhm, shape)\n\n\ndef sd_multi_leds_Ohno2005(peak_wavelengths,\n fwhm,\n peak_power_ratios=None,\n shape=SPECTRAL_SHAPE_DEFAULT):\n \"\"\"\n Returns a multi *LED* spectral distribution of given spectral shape at\n given peak wavelengths and full widths at half maximum according to\n *Ohno (2005)* method.\n\n The multi *LED* spectral distribution is generated using many single\n *LED* spectral distributions generated with\n :func:`colour.sd_single_led_Ohno2005` definition.\n\n Parameters\n ----------\n peak_wavelengths : array_like\n Wavelengths the multi *LED* spectral distribution will peak at, i.e.\n the peaks for each generated single *LED* spectral distributions.\n fwhm : array_like\n Full widths at half maximum, i.e. widths of the underlying gaussian\n spectral distributions measured between those points on the *y* axis\n which are half the maximum amplitude.\n peak_power_ratios : array_like, optional\n Peak power ratios for each generated single *LED* spectral\n distributions.\n shape : SpectralShape, optional\n Spectral shape used to create the spectral distribution.\n\n Returns\n -------\n SpectralDistribution\n Multi *LED* spectral distribution.\n\n Notes\n -----\n - By default, the spectral distribution will use the shape given by\n :attr:`colour.SPECTRAL_SHAPE_DEFAULT` attribute.\n\n References\n ----------\n :cite:`Ohno2005`, :cite:`Ohno2008a`\n\n Examples\n --------\n >>> sd = sd_multi_leds_Ohno2005(\n ... np.array([457, 530, 615]),\n ... np.array([20, 30, 20]),\n ... np.array([0.731, 1.000, 1.660]),\n ... )\n >>> sd.shape\n SpectralShape(360.0, 780.0, 1.0)\n >>> sd[500] # doctest: +ELLIPSIS\n 0.1295132...\n \"\"\"\n\n peak_wavelengths = as_float_array(peak_wavelengths)\n fwhm = np.resize(fwhm, peak_wavelengths.shape)\n if peak_power_ratios is None:\n peak_power_ratios = ones(peak_wavelengths.shape)\n else:\n peak_power_ratios = np.resize(peak_power_ratios,\n peak_wavelengths.shape)\n\n sd = sd_zeros(shape)\n\n for (peak_wavelength, fwhm_s, peak_power_ratio) in zip(\n peak_wavelengths, fwhm, peak_power_ratios):\n sd += sd_single_led_Ohno2005(peak_wavelength,\n fwhm_s) * peak_power_ratio\n\n def _format_array(a):\n \"\"\"\n Formats given array :math:`a`.\n\n Parameters\n ----------\n a : array_like\n Array to format\n\n Returns\n -------\n unicode\n Formatted array :math:`a`.\n \"\"\"\n\n return ', '.join([str(e) for e in a])\n\n sd.name = (\n '{0}nm - {1}FWHM - {2} Peak Power Ratios - LED - Ohno (2005)'.format(\n _format_array(peak_wavelengths),\n _format_array(fwhm),\n _format_array(peak_power_ratios),\n ))\n\n return sd\n\n\nSD_MULTI_LEDS_METHODS = CaseInsensitiveMapping({\n 'Ohno 2005': sd_multi_leds_Ohno2005,\n})\nSD_MULTI_LEDS_METHODS.__doc__ = \"\"\"\nSupported multi *LED* spectral distribution computation methods.\n\nSD_MULTI_LEDS_METHODS : CaseInsensitiveMapping\n **{'Ohno 2005'}**\n\"\"\"\n\n\ndef sd_multi_leds(peak_wavelengths,\n fwhm,\n peak_power_ratios=None,\n shape=SPECTRAL_SHAPE_DEFAULT,\n method='Ohno 2005'):\n \"\"\"\n Returns a multi *LED* spectral distribution of given spectral shape at\n given peak wavelengths and full widths at half maximum according to given\n method.\n\n Parameters\n ----------\n peak_wavelengths : array_like\n Wavelengths the multi *LED* spectral distribution will peak at, i.e.\n the peaks for each generated single *LED* spectral distributions.\n fwhm : array_like\n Full widths at half maximum, i.e. widths of the underlying gaussian\n spectral distributions measured between those points on the *y* axis\n which are half the maximum amplitude.\n peak_power_ratios : array_like, optional\n Peak power ratios for each generated single *LED* spectral\n distributions.\n shape : SpectralShape, optional\n Spectral shape used to create the spectral distribution.\n method : unicode, optional\n **{'Ohno 2005'}**,\n Computation method.\n\n Returns\n -------\n SpectralDistribution\n Multi *LED* spectral distribution.\n\n Notes\n -----\n - By default, the spectral distribution will use the shape given by\n :attr:`colour.SPECTRAL_SHAPE_DEFAULT` attribute.\n\n References\n ----------\n :cite:`Ohno2005`, :cite:`Ohno2008a`\n\n Examples\n --------\n >>> sd = sd_multi_leds(\n ... np.array([457, 530, 615]),\n ... np.array([20, 30, 20]),\n ... np.array([0.731, 1.000, 1.660]),\n ... )\n >>> sd.shape\n SpectralShape(360.0, 780.0, 1.0)\n >>> sd[500] # doctest: +ELLIPSIS\n 0.1295132...\n \"\"\"\n\n return SD_MULTI_LEDS_METHODS[method](peak_wavelengths, fwhm,\n peak_power_ratios, shape)\n" ]
[ [ "numpy.ravel" ], [ "numpy.exp", "numpy.resize" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
dglowienka/Small_DD_modules
[ "5714a40163b93a50dba2063d4422a0a1aeb7b7f3" ]
[ "Spatial/spatial_graph_calc.py" ]
[ "import matplotlib.pyplot as plt;\nimport numpy as np;\nimport math\n\ndata = np.loadtxt('spatial.txt', skiprows=1);\n\nx = data[:,0];\nV = data[:,1];\nE = data[:,2];\nn = data[:,3];\np = data[:,4];\ns = data[:,5];\nNA = data[:,6];\nNC = data[:,7];\nJn = data[:,8];\nJp = data[:,9];\nJ_NA = data[:,10];\nJ_NC = data[:,11];\nJ_disp = data[:,12];\n\nq = 1.602E-19;\nk_B = 1.38E-23;\nT = 293;\nC_n = 1E-14;\nC_p = 1E-14;\nN_t = 1E21;\nN_c = 1E27;\nN_v = 1E27;\nE_c = 3.9*q;\nE_v = -5.4*q;\nE_t = 0.15*q;\nn1 = N_c*math.exp((E_t-E_c) / (k_B*T));\np1 = N_v*math.exp((E_v - E_t) / (k_B*T));\nk1 = ((C_n*C_p*N_t) / (C_n*(n+n1) + C_p*(p+p1))) * n*p;\ntest = n*p/(n+p);\n\nplt.figure(num=None, figsize=(8, 6), dpi=80, facecolor='w', edgecolor='k');\n# k1\nplt.plot(x, k1)\nplt.yscale('log')\nplt.title('k1')\nplt.grid(True)" ]
[ [ "matplotlib.pyplot.title", "matplotlib.pyplot.yscale", "matplotlib.pyplot.plot", "matplotlib.pyplot.grid", "numpy.loadtxt", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
anArkitek/TriNet_WACV2021
[ "760faad04aa697dfba752ec46661fe938665f23b" ]
[ "lib/FSANET_model.py" ]
[ "import sys\nimport logging\nimport numpy as np\n\nimport tensorflow as tf\n\nfrom keras.models import Model\nfrom keras.applications.resnet50 import ResNet50\n\nfrom keras.layers import Input\nfrom keras.layers import Dense\nfrom keras.layers import Conv2D\nfrom keras.layers import Layer\nfrom keras.layers import Reshape\nfrom keras.layers import Multiply\nfrom keras.layers import Flatten\nfrom keras.layers import Activation\nfrom keras.layers import Concatenate\nfrom keras.layers import MaxPooling2D\nfrom keras.layers import SeparableConv2D\nfrom keras.layers import AveragePooling2D\nfrom keras.layers import BatchNormalization\n\n\nfrom keras import backend as K\n\nfrom .capsulelayers import CapsuleLayer\nfrom .capsulelayers import MatMulLayer\n\nfrom .utils import register_keras_custom_object\n\nsys.setrecursionlimit(2 ** 20)\nnp.random.seed(2 ** 10)\n\n# Custom layers\n# Note - Usage of Lambda layers prevent the convertion\n# and the optimizations by the underlying math engine (tensorflow in this case)\n\n@register_keras_custom_object\nclass SSRLayer(Layer):\n def __init__(self, s1, s2, s3, lambda_d, **kwargs):\n super(SSRLayer, self).__init__(**kwargs)\n self.s1 = s1\n self.s2 = s2\n self.s3 = s3\n self.lambda_d = lambda_d\n self.trainable = False\n\n def call(self, inputs):\n #inputs shape: (?,3,39)\n x = inputs\n a = x[:, :, 0] * 0\n b = x[:, :, 0] * 0\n c = x[:, :, 0] * 0\n \n s1 = 3\n s2 = 9\n s3 = 27\n \n di = s1 // 2 \n dj = s2 // 2 \n dk = s3 // 2\n \n V = 1\n\n #s1 = 3\n # i = 0, 1, 2 ~> i-di = -1, 0, 1\n for i in range(0, s1):\n a = a + (i - di) * x[:, :, i]\n a = a / (s1//2)\n \n #s2 = 9\n # j - dj ~> [-4, 4]\n for j in range(0, s2):\n b = b + (j - dj) * x[:, :, j+3]\n b = b / (s2//2)\n \n \n #s3 = 27\n for k in range(0, s3):\n c = c + (k - dk) * x[:, :, k+12]\n c = c / (s3//2)\n \n pred = (a+b+c) / 3\n\n\n return pred\n\n def compute_output_shape(self, input_shape):\n return (input_shape[0], 3)\n\n def get_config(self):\n config = {\n 's1': self.s1,\n 's2': self.s2,\n 's3': self.s3,\n 'lambda_d': self.lambda_d\n }\n base_config = super(SSRLayer, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\n\n@register_keras_custom_object\nclass FeatSliceLayer(Layer):\n def __init__(self, start_index, end_index, **kwargs):\n super(FeatSliceLayer, self).__init__(**kwargs)\n self.start_index = start_index\n self.end_index = end_index\n self.trainable = False\n\n def call(self, inputs): \n return inputs[:,self.start_index:self.end_index]\n\n def compute_output_shape(self, input_shape):\n return (input_shape[0], self.end_index - self.start_index)\n\n def get_config(self):\n config = {\n 'start_index': self.start_index,\n 'end_index': self.end_index\n }\n base_config = super(FeatSliceLayer, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n@register_keras_custom_object\nclass MomentsLayer(Layer):\n def __init__(self, **kwargs):\n super(MomentsLayer,self).__init__(**kwargs)\n self.trainable = False\n\n def call(self, inputs): \n _, var = tf.nn.moments(inputs,axes=-1)\n #var : (batch_size, feature_map_width, feature_map_height)\n return var\n\n def compute_output_shape(self, input_shape):\n return (input_shape[0], input_shape[-1])\n\n@register_keras_custom_object\nclass MatrixMultiplyLayer(Layer):\n def __init__(self, **kwargs):\n super(MatrixMultiplyLayer,self).__init__(**kwargs)\n self.trainable = False\n\n def call(self, inputs): \n x1, x2 = inputs\n # TODO: add some asserts on the inputs\n # it is expected the shape of inputs are \n # arranged to be able to perform the matrix multiplication\n return tf.matmul(x1,x2)\n\n def compute_output_shape(self, input_shapes): \n return (input_shapes[0][0],input_shapes[0][1], input_shapes[1][-1])\n\n@register_keras_custom_object\nclass MatrixNormLayer(Layer):\n def __init__(self, tile_count, **kwargs):\n super(MatrixNormLayer,self).__init__(**kwargs)\n self.trainable = False\n self.tile_count = tile_count\n\n def call(self, input): \n sum = K.sum(input,axis=-1,keepdims=True) \n tiled = K.tile(sum,(1,1,self.tile_count)) \n return tiled\n\n def compute_output_shape(self, input_shape): \n return (input_shape[0], input_shape[1], self.tile_count)\n\n def get_config(self):\n config = {\n 'tile_count': self.tile_count\n }\n base_config = super(MatrixNormLayer, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n@register_keras_custom_object\nclass PrimCapsLayer(Layer):\n def __init__(self, **kwargs):\n super(PrimCapsLayer,self).__init__(**kwargs)\n self.trainable = False \n\n def call(self, inputs): \n x1, x2, norm = inputs\n return tf.matmul(x1,x2) / norm\n\n def compute_output_shape(self, input_shapes): \n return input_shapes[-1]\n\n@register_keras_custom_object\nclass AggregatedFeatureExtractionLayer(Layer):\n def __init__(self, num_capsule, **kwargs):\n super(AggregatedFeatureExtractionLayer,self).__init__(**kwargs)\n self.trainable = False\n self.num_capsule = num_capsule\n\n def call(self, input): \n s1_a = 0\n s1_b = self.num_capsule//3\n # input[:, 0: 1, :]\n feat_s1_div = input[:,s1_a:s1_b,:]\n s2_a = self.num_capsule//3\n s2_b = 2*self.num_capsule//3\n # input[:, 1: 2, :]\n feat_s2_div = input[:,s2_a:s2_b,:]\n s3_a = 2*self.num_capsule//3\n s3_b = self.num_capsule\n # input[:, 2: 3, :]\n feat_s3_div = input[:,s3_a:s3_b,:]\n\n return [feat_s1_div, feat_s2_div, feat_s3_div]\n\n def compute_output_shape(self, input_shape): \n last_dim = input_shape[-1]\n partition = self.num_capsule//3\n return [(input_shape[0], partition, last_dim), (input_shape[0], partition, last_dim), (input_shape[0], partition, last_dim)]\n\n def get_config(self):\n config = {\n 'num_capsule': self.num_capsule\n }\n base_config = super(AggregatedFeatureExtractionLayer, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\nclass BaseFSANet(object):\n def __init__(self, image_size,num_classes,stage_num,lambda_d, S_set):\n '''\n Args:\n image_size : 64;\n num_classes : 3; roll, pitch, yaw\n stage_num : [3, 3, 3]; # of bins in each stage\n lambda_d : 1.0; Control Delta\n S_set : []; Parameters of Capsules\n '''\n \n self._channel_axis = 3 if K.image_data_format() == 'channels_last' else 1\n\n if self._channel_axis == 1:\n logging.debug(\"image_dim_ordering = 'th'\")\n self._input_shape = (3, image_size, image_size)\n else:\n logging.debug(\"image_dim_ordering = 'tf'\")\n self._input_shape = (image_size, image_size, 3)\n\n\n self.num_classes = num_classes\n self.stage_num = stage_num\n self.lambda_d = lambda_d\n\n ''''\n num_capsule = 3\n dim_capsule = 16\n routings = 2\n\n num_primcaps = 7*3 or 8*8*3\n m_dim = 5\n '''\n self.num_capsule = S_set[0]\n self.dim_capsule = S_set[1]\n self.routings = S_set[2]\n\n self.num_primcaps = S_set[3]\n self.m_dim = S_set[4]\n\n # ? F_shape = 16\n self.F_shape = int(self.num_capsule / 3) * self.dim_capsule\n # ? map_xy_size = 8\n self.map_xy_size = int(8 * image_size / 64)\n\n # is_fc_model\n self.is_fc_model = False\n self.is_noS_model = False\n self.is_varS_model = False\n\n def ssr_build_resnet(self, input_size):\n resnet = ResNet50(include_top=False, weights=None, input_tensor=None, input_shape=input_size, pooling=None, classes=1000)\n\n model = Model(inputs=resnet.input, outputs=[AveragePooling2D((2,2))(Conv2D(64,(1,1))(resnet.get_layer('activation_10').output)),\n Conv2D(64,(1,1))(resnet.get_layer('activation_16').output),\n Conv2D(64,(1,1))(resnet.get_layer('activation_22').output)], name='ssr_backbone')\n\n return model\n\n\n def _convBlock(self, x, num_filters, activation, kernel_size=(3,3)):\n x = SeparableConv2D(num_filters,kernel_size,padding='same')(x)\n x = BatchNormalization(axis=-1)(x)\n x = Activation(activation)(x)\n return x\n\n def ssr_F_model_build(self, feat_dim, name_F, vec_order):\n input_s1_pre = Input((feat_dim,))\n input_s2_pre = Input((feat_dim,))\n input_s3_pre = Input((feat_dim,))\n\n def _process_input(stage_index, stage_num, num_classes, input_s_pre):\n # input_s_pre : (None, 16)\n bins_num = stage_num ** stage_index\n units_num = 3 * bins_num\n assert units_num in [9, 27, 81]\n prob_all_bins = Reshape((3, bins_num))(Dense(units=units_num, \n activation='sigmoid',\n name='all_bins_{}'.format(stage_index))(input_s_pre))\n \n # delta_s : (None, 3)\n # local_s : (None, 3)\n # pred_s : (None, 3, 3)\n # return delta_s, local_s, pred_s\n\n return prob_all_bins\n ###########################################################################################\n\n # delta_s1 : (None, 3)\n # local_s1 : (None, 3)\n # pred_s1 : (None, 3, 3)\n\n # prob_s1: [None, 3, 3]\n # prob_s2: [None, 3, 9]\n # prob_s3: [None, 3, 27]\n prob_s1 = _process_input(1, self.stage_num[0], self.num_classes, input_s1_pre)\n prob_s2 = _process_input(2, self.stage_num[1], self.num_classes, input_s2_pre)\n prob_s3 = _process_input(3, self.stage_num[2], self.num_classes, input_s3_pre)\n\n # prob_merge: (None, 3, 39)\n prob_merge = Concatenate(axis=-1)([prob_s1, prob_s2, prob_s3])\n\n # return Model(inputs=[input_s1_pre,input_s2_pre,input_s3_pre],outputs=[pred_s1,pred_s2,pred_s3,delta_s1,delta_s2,delta_s3,local_s1,local_s2,local_s3], name=name_F + f'_{vec_order}')\n return Model(inputs=[input_s1_pre, input_s2_pre, input_s3_pre],\n outputs=prob_merge,\n name=name_F + '_{}'.format(vec_order))\n\n\n def ssr_FC_model_build(self, feat_dim, name_F):\n input_s1_pre = Input((feat_dim,))\n input_s2_pre = Input((feat_dim,))\n input_s3_pre = Input((feat_dim,))\n\n def _process_input(stage_index, stage_num, num_classes, input_s_pre):\n feat_delta_s = Dense(2 * num_classes, activation='tanh')(input_s_pre)\n delta_s = Dense(num_classes, activation='tanh', name='delta_s{}'.format(stage_index))(feat_delta_s)\n\n feat_local_s = Dense(2 * num_classes, activation='tanh')(input_s_pre)\n local_s = Dense(units=num_classes, activation='tanh', name='local_delta_stage{}'.format(stage_index))(feat_local_s)\n\n feat_pred_s = Dense(stage_num * num_classes,activation='relu')(input_s_pre) \n pred_s = Reshape((num_classes,stage_num))(feat_pred_s)\n\n return delta_s, local_s, pred_s\n\n delta_s1, local_s1, pred_s1 = _process_input(1, self.stage_num[0], self.num_classes, input_s1_pre)\n delta_s2, local_s2, pred_s2 = _process_input(2, self.stage_num[1], self.num_classes, input_s2_pre)\n delta_s3, local_s3, pred_s3 = _process_input(3, self.stage_num[2], self.num_classes, input_s3_pre) \n \n return Model(inputs=[input_s1_pre,input_s2_pre,input_s3_pre],outputs=[pred_s1,pred_s2,pred_s3,delta_s1,delta_s2,delta_s3,local_s1,local_s2,local_s3], name=name_F)\n\n\n def ssr_feat_S_model_build(self, m_dim):\n \n input_preS = Input((self.map_xy_size,self.map_xy_size,64)) \n\n # is_varS_model compute teh variance\n if self.is_varS_model:\n feat_preS = MomentsLayer()(input_preS)\n else:\n feat_preS = Conv2D(1,(1,1),padding='same',activation='sigmoid')(input_preS) \n\n feat_preS = Reshape((-1,))(feat_preS)\n\n SR_matrix = Dense(m_dim*(self.map_xy_size*self.map_xy_size*3),activation='sigmoid')(feat_preS)\n SR_matrix = Reshape((m_dim,(self.map_xy_size*self.map_xy_size*3)))(SR_matrix)\n \n return Model(inputs=input_preS,outputs=[SR_matrix,feat_preS],name='feat_S_model')\n\n def ssr_S_model_build(self, num_primcaps, m_dim, vec_order):\n # Input: (8, 8, 64)\n # s1: means stage 1?\n input_s1_preS = Input((self.map_xy_size,self.map_xy_size,64))\n input_s2_preS = Input((self.map_xy_size,self.map_xy_size,64))\n input_s3_preS = Input((self.map_xy_size,self.map_xy_size,64))\n\n # 这里有两种选择:\n # 根据 is_varS_model 来判断是否计算 variance\n feat_S_model = self.ssr_feat_S_model_build(m_dim)\n\n SR_matrix_s1,feat_s1_preS = feat_S_model(input_s1_preS)\n SR_matrix_s2,feat_s2_preS = feat_S_model(input_s2_preS)\n SR_matrix_s3,feat_s3_preS = feat_S_model(input_s3_preS)\n \n # by default, axis=-1\n # keep the size of the feature map the same, concatenate the channels\n feat_pre_concat = Concatenate()([feat_s1_preS,feat_s2_preS,feat_s3_preS])\n \n # int(num_primcaps / 3) == 7 or 8*8\n # m_dim == 5\n SL_matrix = Dense(int(num_primcaps / 3) * m_dim,activation='sigmoid')(feat_pre_concat)\n SL_matrix = Reshape((int(num_primcaps/3),m_dim))(SL_matrix) \n\n S_matrix_s1 = MatrixMultiplyLayer(name=\"S_matrix_s1\")([SL_matrix,SR_matrix_s1])\n S_matrix_s2 = MatrixMultiplyLayer(name='S_matrix_s2')([SL_matrix,SR_matrix_s2])\n S_matrix_s3 = MatrixMultiplyLayer(name='S_matrix_s3')([SL_matrix,SR_matrix_s3]) \n\n # Very important!!! Without this training won't converge. \n # norm_S_s1 = Lambda(lambda x: K.tile(K.sum(x,axis=-1,keepdims=True),(1,1,64)))(S_matrix_s1)\n norm_S_s1 = MatrixNormLayer(tile_count=64)(S_matrix_s1)\n norm_S_s2 = MatrixNormLayer(tile_count=64)(S_matrix_s2)\n norm_S_s3 = MatrixNormLayer(tile_count=64)(S_matrix_s3) \n\n # map_xy_size == 8\n # feat_sk_pre : (8, 8, 64)\n feat_s1_pre = Reshape((self.map_xy_size*self.map_xy_size,64))(input_s1_preS)\n feat_s2_pre = Reshape((self.map_xy_size*self.map_xy_size,64))(input_s2_preS)\n feat_s3_pre = Reshape((self.map_xy_size*self.map_xy_size,64))(input_s3_preS)\n\n # feat_pre_concat : (8, 24, 64)\n feat_pre_concat = Concatenate(axis=1)([feat_s1_pre, feat_s2_pre, feat_s3_pre])\n \n # Warining: don't use keras's 'K.dot'. It is very weird when high dimension is used.\n # https://github.com/keras-team/keras/issues/9779\n # Make sure 'tf.matmul' is used\n # primcaps = Lambda(lambda x: tf.matmul(x[0],x[1])/x[2])([S_matrix,feat_pre_concat, norm_S])\n primcaps_s1 = PrimCapsLayer()([S_matrix_s1,feat_pre_concat, norm_S_s1])\n primcaps_s2 = PrimCapsLayer()([S_matrix_s2,feat_pre_concat, norm_S_s2])\n primcaps_s3 = PrimCapsLayer()([S_matrix_s3,feat_pre_concat, norm_S_s3]) \n \n primcaps = Concatenate(axis=1)([primcaps_s1,primcaps_s2,primcaps_s3])\n\n return Model(inputs=[input_s1_preS, input_s2_preS, input_s3_preS],outputs=primcaps, name='ssr_S_model_{}'.format(vec_order))\n\n def ssr_noS_model_build(self, vec_order, **kwargs): \n\n input_s1_preS = Input((self.map_xy_size,self.map_xy_size,64))\n input_s2_preS = Input((self.map_xy_size,self.map_xy_size,64))\n input_s3_preS = Input((self.map_xy_size,self.map_xy_size,64))\n\n primcaps_s1 = Reshape((self.map_xy_size*self.map_xy_size,64))(input_s1_preS)\n primcaps_s2 = Reshape((self.map_xy_size*self.map_xy_size,64))(input_s2_preS)\n primcaps_s3 = Reshape((self.map_xy_size*self.map_xy_size,64))(input_s3_preS)\n\n primcaps = Concatenate(axis=1)([primcaps_s1,primcaps_s2,primcaps_s3])\n print(vec_order)\n return Model(inputs=[input_s1_preS, input_s2_preS, input_s3_preS],outputs=primcaps, name='ssr_S_model_{}'.format(vec_order))\n\n def __call__(self):\n logging.debug(\"Creating model...\")\n img_inputs = Input(self._input_shape) \n\n # Build various models\n # Two-stream structure for extracting the features.\n ssr_G_model = self.ssr_build_resnet(self._input_shape)\n \n if self.is_noS_model:\n ssr_S_model_0 = self.ssr_noS_model_build(vec_order=0)\n ssr_S_model_1 = self.ssr_noS_model_build(vec_order=1)\n ssr_S_model_2 = self.ssr_noS_model_build(vec_order=2)\n\n else:\n ssr_S_model_0 = self.ssr_S_model_build(num_primcaps=self.num_primcaps,m_dim=self.m_dim, vec_order=0)\n ssr_S_model_1 = self.ssr_S_model_build(num_primcaps=self.num_primcaps,m_dim=self.m_dim, vec_order=1)\n ssr_S_model_2 = self.ssr_S_model_build(num_primcaps=self.num_primcaps,m_dim=self.m_dim, vec_order=2)\n\n ssr_aggregation_model_0 = self.ssr_aggregation_model_build((self.num_primcaps,64), vec_order=0)\n ssr_aggregation_model_1 = self.ssr_aggregation_model_build((self.num_primcaps,64), vec_order=1)\n ssr_aggregation_model_2 = self.ssr_aggregation_model_build((self.num_primcaps,64), vec_order=2)\n\n if self.is_fc_model:\n ssr_F_Cap_model = self.ssr_FC_model_build(self.F_shape,'ssr_FC_Cap_model')\n else:\n ssr_F_Cap_model_0 = self.ssr_F_model_build(self.F_shape,'ssr_NoFC_Cap_model', vec_order=0)\n ssr_F_Cap_model_1 = self.ssr_F_model_build(self.F_shape,'ssr_NoFC_Cap_model', vec_order=1)\n ssr_F_Cap_model_2 = self.ssr_F_model_build(self.F_shape,'ssr_NoFC_Cap_model', vec_order=2)\n\n # Wire them up\n # ssr_G_list: [(batch_size, 8, 8, 64), (batch_size, 8, 8, 64), (batch_size, 8, 8, 64)]\n # Two-stream structure for extracting the features.\n\n ssr_G_list = ssr_G_model(img_inputs)\n\n # ssr_primcaps: (batch_size, 21, 64)\n # Generating fine-grained structure mapping from different scoring functions.\n # Apply the mapping on to the features and generate primary capsules.\n ssr_primcaps_0 = ssr_S_model_0(ssr_G_list)\n ssr_primcaps_1 = ssr_S_model_1(ssr_G_list)\n ssr_primcaps_2 = ssr_S_model_2(ssr_G_list)\n \n # ssr_Cap_list: [(None, None), (None, None), (None, None)]\n # Feed the primary capsules into capsule layer and output the final aggregated capsule features. And divide them into 3 stages.\n \n \n ssr_Cap_list_0 = ssr_aggregation_model_0(ssr_primcaps_0)\n ssr_Cap_list_1 = ssr_aggregation_model_1(ssr_primcaps_1)\n ssr_Cap_list_2 = ssr_aggregation_model_2(ssr_primcaps_2)\n\n print('*'*50)\n print('ssr_Cap_list_0[0]: ', ssr_Cap_list_0[0].shape)\n print('*'*50)\n\n # ssr_F_Cap_list: [(batch_size, 3, 3), (batch_size, 3, 3), (batch_size, 3, 3), ~> p\n # (batch_size, 3), (batch_size, 3), (batch_size, 3), ~> delta\n # (batch_size, 3), (batch_size, 3), (batch_size, 3)] ~> eta\n # Taking the previous 3 stages features for Soft-Stagewise Regression (SSR) module. \n # Each stage further splits into three parts: prediction, dynamic index shifting, and dynamic scaling. \n # This part please check the '[IJCAI18] SSR-Net' for more detail explanation.\n\n # ssr_F_Cap_list_0 : (None, 3, 39)\n ssr_F_Cap_list_0 = ssr_F_Cap_model_0(ssr_Cap_list_0)\n ssr_F_Cap_list_1 = ssr_F_Cap_model_1(ssr_Cap_list_1)\n ssr_F_Cap_list_2 = ssr_F_Cap_model_2(ssr_Cap_list_2)\n\n print('*'*50)\n print('ssr_F_Cap_list_0', ssr_F_Cap_list_0.shape)\n print('*'*50)\n\n # pred_pose_l : (None, 3)\n # Taking the prediction, dynamic index shifting, and dynamic scaling for the final regression output. In this case, there are three outputs (yaw, pitch, roll).\n pred_vec_0 = SSRLayer(s1=self.stage_num[0], s2=self.stage_num[1], s3=self.stage_num[2], lambda_d=self.lambda_d, name=\"pred_pose_0\")(ssr_F_Cap_list_0)\n pred_vec_1 = SSRLayer(s1=self.stage_num[0], s2=self.stage_num[1], s3=self.stage_num[2], lambda_d=self.lambda_d, name=\"pred_pose_1\")(ssr_F_Cap_list_1)\n pred_vec_2 = SSRLayer(s1=self.stage_num[0], s2=self.stage_num[1], s3=self.stage_num[2], lambda_d=self.lambda_d, name=\"pred_pose_2\")(ssr_F_Cap_list_2)\n\n print('*'*50)\n print('pred_vec_0: ', pred_vec_0.shape)\n print('*'*50)\n\n pred_vecs = Concatenate(axis=-1)([pred_vec_0, pred_vec_1, pred_vec_2])\n\n print('*'*50)\n print('pred_vecs: ', pred_vecs.shape)\n print('*'*50)\n \n return Model(inputs=img_inputs, outputs=[pred_vecs, pred_vecs])\n # return Model(inputs=img_inputs, outputs=pred_pose)\n\n# Capsule FSANetworks\n\nclass BaseCapsuleFSANet(BaseFSANet):\n def __init__(self, image_size,num_classes,stage_num,lambda_d, S_set):\n super(BaseCapsuleFSANet, self).__init__(image_size,num_classes,stage_num,lambda_d, S_set)\n\n def ssr_aggregation_model_build(self, shape_primcaps, vec_order):\n input_primcaps = Input(shape_primcaps) \n capsule = CapsuleLayer(self.num_capsule, self.dim_capsule, routings=self.routings, name='caps')(input_primcaps) \n\n feat_s1_div, feat_s2_div, feat_s3_div = AggregatedFeatureExtractionLayer(num_capsule=self.num_capsule)(capsule)\n\n feat_s1_div = Reshape((-1,))(feat_s1_div)\n feat_s2_div = Reshape((-1,))(feat_s2_div)\n feat_s3_div = Reshape((-1,))(feat_s3_div) \n \n return Model(inputs=input_primcaps,outputs=[feat_s1_div,feat_s2_div,feat_s3_div], name='ssr_Cap_model_{}'.format(vec_order))\n\nclass FSA_net_Capsule(BaseCapsuleFSANet):\n def __init__(self, image_size,num_classes,stage_num,lambda_d, S_set):\n super(FSA_net_Capsule, self).__init__(image_size,num_classes,stage_num,lambda_d, S_set) \n self.is_varS_model = False \n \nclass FSA_net_Var_Capsule(BaseCapsuleFSANet):\n def __init__(self, image_size,num_classes,stage_num,lambda_d, S_set):\n super(FSA_net_Var_Capsule, self).__init__(image_size,num_classes,stage_num,lambda_d, S_set) \n self.is_varS_model = True\n \nclass FSA_net_noS_Capsule(BaseCapsuleFSANet):\n def __init__(self, image_size,num_classes,stage_num,lambda_d, S_set):\n super(FSA_net_noS_Capsule, self).__init__(image_size,num_classes,stage_num,lambda_d, S_set) \n self.is_noS_model = True \n \nclass FSA_net_Capsule_FC(FSA_net_Capsule):\n def __init__(self, image_size,num_classes,stage_num,lambda_d, S_set):\n super(FSA_net_Capsule_FC, self).__init__(image_size,num_classes,stage_num,lambda_d, S_set) \n self.is_fc_model = True\n\nclass FSA_net_Var_Capsule_FC(FSA_net_Var_Capsule):\n def __init__(self, image_size,num_classes,stage_num,lambda_d, S_set):\n super(FSA_net_Var_Capsule_FC, self).__init__(image_size,num_classes,stage_num,lambda_d, S_set)\n self.is_fc_model = True\n \nclass FSA_net_noS_Capsule_FC(FSA_net_noS_Capsule):\n def __init__(self, image_size,num_classes,stage_num,lambda_d, S_set):\n super(FSA_net_noS_Capsule_FC, self).__init__(image_size,num_classes,stage_num,lambda_d, S_set)\n self.is_fc_model = True\n\n# Metric models\n\nclass BaseMetricFSANet(BaseFSANet):\n def __init__(self, image_size,num_classes,stage_num,lambda_d, S_set):\n super(BaseMetricFSANet, self).__init__(image_size,num_classes,stage_num,lambda_d, S_set) \n \n def ssr_aggregation_model_build(self, shape_primcaps, vec_order):\n input_primcaps = Input(shape_primcaps)\n\n metric_feat = MatMulLayer(16,type=1)(input_primcaps)\n metric_feat = MatMulLayer(3,type=2)(metric_feat)\n\n feat_s1_div, feat_s2_div, feat_s3_div = AggregatedFeatureExtractionLayer(num_capsule=self.num_capsule)(metric_feat)\n\n feat_s1_div = Reshape((-1,))(feat_s1_div)\n feat_s2_div = Reshape((-1,))(feat_s2_div)\n feat_s3_div = Reshape((-1,))(feat_s3_div)\n \n return Model(inputs=input_primcaps,outputs=[feat_s1_div,feat_s2_div,feat_s3_div], name='ssr_Metric_model_{}'.format(vec_order))\n \nclass FSA_net_Metric(BaseMetricFSANet):\n def __init__(self, image_size,num_classes,stage_num,lambda_d, S_set):\n super(FSA_net_Metric, self).__init__(image_size,num_classes,stage_num,lambda_d, S_set)\n self.is_varS_model = False \n\nclass FSA_net_Var_Metric(BaseMetricFSANet):\n def __init__(self, image_size,num_classes,stage_num,lambda_d, S_set):\n super(FSA_net_Var_Metric, self).__init__(image_size,num_classes,stage_num,lambda_d, S_set)\n self.is_varS_model = True\n \nclass FSA_net_noS_Metric(BaseMetricFSANet):\n def __init__(self, image_size,num_classes,stage_num,lambda_d, S_set):\n super(FSA_net_noS_Metric, self).__init__(image_size,num_classes,stage_num,lambda_d, S_set)\n self.is_noS_model = True\n \n" ]
[ [ "tensorflow.nn.moments", "tensorflow.matmul", "numpy.random.seed" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
song2181/DI-engine
[ "268d77db3cb54401b2cfc83e2bc3ec87c31e7b83" ]
[ "ding/envs/env_manager/tests/conftest.py" ]
[ "import random\nimport time\nfrom collections import namedtuple\nimport pytest\nimport torch\nimport numpy as np\nfrom easydict import EasyDict\nfrom functools import partial\nimport gym\n\nfrom ding.envs.env.base_env import BaseEnvTimestep\nfrom ding.envs.env_manager.base_env_manager import EnvState\nfrom ding.envs.env_manager import BaseEnvManager, SyncSubprocessEnvManager, AsyncSubprocessEnvManager\nfrom ding.torch_utils import to_tensor, to_ndarray, to_list\nfrom ding.utils import deep_merge_dicts\n\n\nclass FakeEnv(object):\n\n def __init__(self, cfg):\n self._scale = cfg.scale\n self._target_time = random.randint(3, 6) * self._scale\n self._current_time = 0\n self._name = cfg['name']\n self._id = time.time()\n self._stat = None\n self._seed = 0\n self._data_count = 0\n self.timeout_flag = False\n self._launched = False\n self._state = EnvState.INIT\n self._dead_once = False\n self.observation_space = gym.spaces.Box(\n low=np.array([-1.0, -1.0, -8.0]), high=np.array([1.0, 1.0, 8.0]), shape=(3, ), dtype=np.float32\n )\n self.action_space = gym.spaces.Box(low=-2.0, high=2.0, shape=(1, ), dtype=np.float32)\n self.reward_space = gym.spaces.Box(\n low=-1 * (3.14 * 3.14 + 0.1 * 8 * 8 + 0.001 * 2 * 2), high=0.0, shape=(1, ), dtype=np.float32\n )\n\n def reset(self, stat=None):\n if isinstance(stat, str) and stat == 'error':\n self.dead()\n if isinstance(stat, str) and stat == 'error_once':\n # Die on every two reset with error_once stat.\n if self._dead_once:\n self._dead_once = False\n self.dead()\n else:\n self._dead_once = True\n if isinstance(stat, str) and stat == \"wait\":\n if self.timeout_flag: # after step(), the reset can hall with status of timeout\n time.sleep(5)\n if isinstance(stat, str) and stat == \"block\":\n self.block()\n\n self._launched = True\n self._current_time = 0\n self._stat = stat\n self._state = EnvState.RUN\n return to_ndarray(torch.randn(3))\n\n def step(self, action):\n assert self._launched\n assert not self._state == EnvState.ERROR\n self.timeout_flag = True # after one step, enable timeout flag\n if isinstance(action, str) and action == 'error':\n self.dead()\n if isinstance(action, str) and action == 'catched_error':\n return BaseEnvTimestep(None, None, True, {'abnormal': True})\n if isinstance(action, str) and action == \"wait\":\n if self.timeout_flag: # after step(), the reset can hall with status of timeout\n time.sleep(3)\n if isinstance(action, str) and action == 'block':\n self.block()\n obs = to_ndarray(torch.randn(3))\n reward = to_ndarray(torch.randint(0, 2, size=[1]).numpy())\n done = self._current_time >= self._target_time\n if done:\n self._state = EnvState.DONE\n simulation_time = random.uniform(0.5, 1) * self._scale\n info = {'name': self._name, 'time': simulation_time, 'tgt': self._target_time, 'cur': self._current_time}\n time.sleep(simulation_time)\n self._current_time += simulation_time\n self._data_count += 1\n return BaseEnvTimestep(obs, reward, done, info)\n\n def dead(self):\n self._state = EnvState.ERROR\n raise RuntimeError(\"env error, current time {}\".format(self._current_time))\n\n def block(self):\n self._state = EnvState.ERROR\n time.sleep(1000)\n\n def close(self):\n self._launched = False\n self._state = EnvState.INIT\n\n def seed(self, seed):\n self._seed = seed\n\n @property\n def name(self):\n return self._name\n\n @property\n def time_id(self):\n return self._id\n\n def user_defined(self):\n pass\n\n def __repr__(self):\n return self._name\n\n\nclass FakeAsyncEnv(FakeEnv):\n\n def reset(self, stat=None):\n super().reset(stat)\n time.sleep(random.randint(1, 3) * self._scale)\n return to_ndarray(torch.randn(3))\n\n\nclass FakeGymEnv(FakeEnv):\n\n def __init__(self, cfg):\n super().__init__(cfg)\n self.metadata = \"fake metadata\"\n self.action_space = gym.spaces.Box(low=-2.0, high=2.0, shape=(4, ), dtype=np.float32)\n\n def random_action(self) -> np.ndarray:\n random_action = self.action_space.sample()\n if isinstance(random_action, np.ndarray):\n pass\n elif isinstance(random_action, int):\n random_action = to_ndarray([random_action], dtype=np.int64)\n elif isinstance(random_action, dict):\n random_action = to_ndarray(random_action)\n else:\n raise TypeError(\n '`random_action` should be either int/np.ndarray or dict of int/np.ndarray, but get {}: {}'.format(\n type(random_action), random_action\n )\n )\n return random_action\n\n\nclass FakeModel(object):\n\n def forward(self, obs):\n if random.random() > 0.5:\n return {k: [] for k in obs}\n else:\n env_num = len(obs)\n exec_env = random.randint(1, env_num + 1)\n keys = list(obs.keys())[:exec_env]\n return {k: [] for k in keys}\n\n\[email protected](scope='class')\ndef setup_model_type():\n return FakeModel\n\n\ndef get_base_manager_cfg(env_num=4):\n manager_cfg = {\n 'env_cfg': [{\n 'name': 'name{}'.format(i),\n 'scale': 1.0,\n } for i in range(env_num)],\n 'episode_num': 2,\n 'reset_timeout': 10,\n 'step_timeout': 8,\n 'max_retry': 5,\n }\n return EasyDict(manager_cfg)\n\n\ndef get_subprecess_manager_cfg(env_num=4):\n manager_cfg = {\n 'env_cfg': [{\n 'name': 'name{}'.format(i),\n 'scale': 1.0,\n } for i in range(env_num)],\n 'episode_num': 2,\n #'step_timeout': 8,\n #'reset_timeout': 10,\n 'connect_timeout': 8,\n 'step_timeout': 5,\n 'max_retry': 2,\n }\n return EasyDict(manager_cfg)\n\n\ndef get_gym_vector_manager_cfg(env_num=4):\n manager_cfg = {\n 'env_cfg': [{\n 'name': 'name{}'.format(i),\n } for i in range(env_num)],\n 'episode_num': 2,\n 'connect_timeout': 8,\n 'step_timeout': 5,\n 'max_retry': 2,\n 'share_memory': True\n }\n return EasyDict(manager_cfg)\n\n\[email protected](scope='function')\ndef setup_base_manager_cfg():\n manager_cfg = get_base_manager_cfg(4)\n env_cfg = manager_cfg.pop('env_cfg')\n manager_cfg['env_fn'] = [partial(FakeEnv, cfg=c) for c in env_cfg]\n return deep_merge_dicts(BaseEnvManager.default_config(), EasyDict(manager_cfg))\n\n\[email protected](scope='function')\ndef setup_fast_base_manager_cfg():\n manager_cfg = get_base_manager_cfg(4)\n env_cfg = manager_cfg.pop('env_cfg')\n for e in env_cfg:\n e['scale'] = 0.1\n manager_cfg['env_fn'] = [partial(FakeEnv, cfg=c) for c in env_cfg]\n return deep_merge_dicts(BaseEnvManager.default_config(), EasyDict(manager_cfg))\n\n\[email protected](scope='function')\ndef setup_sync_manager_cfg():\n manager_cfg = get_subprecess_manager_cfg(4)\n env_cfg = manager_cfg.pop('env_cfg')\n # TODO(nyz) test fail when shared_memory = True\n manager_cfg['shared_memory'] = False\n manager_cfg['env_fn'] = [partial(FakeEnv, cfg=c) for c in env_cfg]\n return deep_merge_dicts(SyncSubprocessEnvManager.default_config(), EasyDict(manager_cfg))\n\n\[email protected](scope='function')\ndef setup_async_manager_cfg():\n manager_cfg = get_subprecess_manager_cfg(4)\n env_cfg = manager_cfg.pop('env_cfg')\n manager_cfg['env_fn'] = [partial(FakeAsyncEnv, cfg=c) for c in env_cfg]\n manager_cfg['shared_memory'] = False\n return deep_merge_dicts(AsyncSubprocessEnvManager.default_config(), EasyDict(manager_cfg))\n\n\[email protected](scope='function')\ndef setup_gym_vector_manager_cfg():\n manager_cfg = get_subprecess_manager_cfg(4)\n env_cfg = manager_cfg.pop('env_cfg')\n manager_cfg['env_fn'] = [partial(FakeGymEnv, cfg=c) for c in env_cfg]\n manager_cfg['shared_memory'] = False\n return EasyDict(manager_cfg)\n" ]
[ [ "torch.randn", "numpy.array", "torch.randint" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
u6052029/cogent3
[ "ca0efcb7f60b715bcbfbecd924cdb98a53cefe20", "ca0efcb7f60b715bcbfbecd924cdb98a53cefe20" ]
[ "src/cogent3/evolve/motif_prob_model.py", "src/cogent3/util/table.py" ]
[ "#!/usr/bin/env python\n\nimport warnings\n\nimport numpy\n\nfrom cogent3.evolve.likelihood_tree import make_likelihood_tree_leaf\n\nfrom . import substitution_calculation\n\n\n__author__ = \"Peter Maxwell\"\n__copyright__ = \"Copyright 2007-2020, The Cogent Project\"\n__credits__ = [\"Peter Maxwell\"]\n__license__ = \"BSD-3\"\n__version__ = \"2020.7.2a\"\n__maintainer__ = \"Gavin Huttley\"\n__email__ = \"[email protected]\"\n__status__ = \"Production\"\n\n\ndef make_model(mprob_model, tuple_alphabet, mask):\n if mprob_model == \"monomers\":\n return PosnSpecificMonomerProbModel(tuple_alphabet, mask)\n elif mprob_model == \"monomer\":\n return MonomerProbModel(tuple_alphabet, mask)\n elif mprob_model == \"conditional\":\n return ConditionalMotifProbModel(tuple_alphabet, mask)\n elif mprob_model in [\"word\", \"tuple\", None]:\n return SimpleMotifProbModel(tuple_alphabet)\n else:\n raise ValueError(\"Unknown mprob model '%s'\" % str(mprob_model))\n\n\nclass MotifProbModel(object):\n def __init__(self, *whatever, **kw):\n raise NotImplementedError\n\n def calc_word_probs(self, *monomer_probs):\n assert len(monomer_probs) == 1\n return monomer_probs[0]\n\n def calc_word_weight_matrix(self, *monomer_probs):\n assert len(monomer_probs) == 1\n return monomer_probs[0]\n\n def make_motif_probs_defn(self):\n \"\"\"Makes the first part of a parameter controller definition for this\n model, the calculation of motif probabilities\"\"\"\n return substitution_calculation.PartitionDefn(\n name=\"mprobs\",\n default=None,\n dimensions=(\"locus\", \"edge\"),\n dimension=(\"motif\", tuple(self.get_input_alphabet())),\n )\n\n def set_param_controller_motif_probs(self, pc, motif_probs, **kw):\n pc.set_param_rule(\"mprobs\", value=motif_probs, **kw)\n\n def count_motifs(self, alignment, include_ambiguity=False, recode_gaps=True):\n result = None\n try:\n mtype = self.alphabet.moltype\n except AttributeError:\n mtype = self.monomer_alphabet.moltype\n\n for seq_name in alignment.names:\n sequence = alignment.get_gapped_seq(seq_name, recode_gaps, moltype=mtype)\n leaf = make_likelihood_tree_leaf(\n sequence, self.get_counted_alphabet(), seq_name\n )\n count = leaf.get_motif_counts(include_ambiguity=include_ambiguity)\n if result is None:\n result = count.copy()\n else:\n result += count\n return result\n\n def adapt_motif_probs(self, motif_probs, auto=False):\n motif_probs = self.get_input_alphabet().adapt_motif_probs(motif_probs)\n assert abs(sum(motif_probs) - 1.0) < 0.0001, motif_probs\n return motif_probs\n\n def make_equal_motif_probs(self):\n alphabet = self.get_input_alphabet()\n p = 1.0 / len(alphabet)\n return dict([(m, p) for m in alphabet])\n\n def make_sample_motif_probs(self):\n import random\n\n motif_probs = numpy.array(\n [random.uniform(0.2, 1.0) for m in self.get_counted_alphabet()]\n )\n motif_probs /= sum(motif_probs)\n return motif_probs\n\n\nclass SimpleMotifProbModel(MotifProbModel):\n def __init__(self, alphabet):\n self.alphabet = alphabet\n\n def get_input_alphabet(self):\n return self.alphabet\n\n def get_counted_alphabet(self):\n return self.alphabet\n\n def make_motif_word_prob_defns(self):\n monomer_probs = self.make_motif_probs_defn()\n return (monomer_probs, monomer_probs, monomer_probs)\n\n\nclass ComplexMotifProbModel(MotifProbModel):\n def __init__(self, tuple_alphabet, mask):\n \"\"\"Arguments:\n - tuple_alphabet: series of multi-letter motifs\n - monomers: the monomers from which the motifs are made\n - mask: instantaneous change matrix\"\"\"\n self.mask = mask\n self.tuple_alphabet = tuple_alphabet\n self.monomer_alphabet = monomers = tuple_alphabet.moltype.alphabet\n self.word_length = length = tuple_alphabet.get_motif_len()\n size = len(tuple_alphabet)\n\n # m2w[AC, 1] = C\n # w2m[0, AC, A] = True\n # w2c[ATC, AT*] = 1\n self.m2w = m2w = numpy.zeros([size, length], int)\n self.w2m = w2m = numpy.zeros([length, size, len(monomers)], int)\n contexts = monomers.get_word_alphabet(length - 1)\n self.w2c = w2c = numpy.zeros([size, length * len(contexts)], int)\n for (i, word) in enumerate(tuple_alphabet):\n for j in range(length):\n monomer = monomers.index(word[j])\n context = contexts.index(word[:j] + word[j + 1 :])\n m2w[i, j] = monomer\n w2m[j, i, monomer] = 1\n w2c[i, context * length + j] = 1\n\n self.mutated_posn = numpy.zeros(mask.shape, int)\n self.mutant_motif = numpy.zeros(mask.shape, int)\n self.context_indices = numpy.zeros(mask.shape, int)\n\n for (i, old_word, j, new_word, diff) in self._mutations():\n self.mutated_posn[i, j] = diff\n mutant_motif = new_word[diff]\n context = new_word[:diff] + new_word[diff + 1 :]\n self.mutant_motif[i, j] = monomers.index(mutant_motif)\n c = contexts.index(context)\n self.context_indices[i, j] = c * length + diff\n\n def _mutations(self):\n diff_pos = lambda x, y: [i for i in range(len(x)) if x[i] != y[i]]\n num_states = len(self.tuple_alphabet)\n for i in range(num_states):\n old_word = self.tuple_alphabet[i]\n for j in range(num_states):\n new_word = self.tuple_alphabet[j]\n if self.mask[i, j]:\n assert self.mask[i, j] == 1.0\n diffs = diff_pos(old_word, new_word)\n assert len(diffs) == 1, (old_word, new_word)\n diff = diffs[0]\n yield i, old_word, j, new_word, diff\n\n\nclass MonomerProbModel(ComplexMotifProbModel):\n def get_input_alphabet(self):\n return self.monomer_alphabet\n\n def get_counted_alphabet(self):\n return self.monomer_alphabet\n\n def calc_monomer_probs(self, word_probs):\n monomer_probs = numpy.dot(word_probs, self.w2m.sum(axis=0))\n monomer_probs /= monomer_probs.sum()\n return monomer_probs\n\n def calc_word_probs(self, monomer_probs):\n result = numpy.product(monomer_probs.take(self.m2w), axis=-1)\n # maybe simpler but slower, works ok:\n # result = numpy.product(monomer_probs ** (w2m, axis=-1))\n result /= result.sum()\n return result\n\n def calc_word_weight_matrix(self, monomer_probs):\n result = monomer_probs.take(self.mutant_motif) * self.mask\n return result\n\n def make_motif_word_prob_defns(self):\n monomer_probs = self.make_motif_probs_defn()\n word_probs = substitution_calculation.CalcDefn(\n self.calc_word_probs, name=\"wprobs\"\n )(monomer_probs)\n mprobs_matrix = substitution_calculation.CalcDefn(\n self.calc_word_weight_matrix, name=\"mprobs_matrix\"\n )(monomer_probs)\n return (monomer_probs, word_probs, mprobs_matrix)\n\n def adapt_motif_probs(self, motif_probs, auto=False):\n try:\n motif_probs = self.monomer_alphabet.adapt_motif_probs(motif_probs)\n except ValueError:\n motif_probs = self.tuple_alphabet.adapt_motif_probs(motif_probs)\n if not auto:\n warnings.warn(\"Motif probs overspecified\", stacklevel=5)\n motif_probs = self.calc_monomer_probs(motif_probs)\n return motif_probs\n\n\nclass PosnSpecificMonomerProbModel(MonomerProbModel):\n def get_counted_alphabet(self):\n return self.tuple_alphabet\n\n def calc_posn_specific_monomer_probs(self, word_probs):\n monomer_probs = numpy.dot(word_probs, self.w2m)\n monomer_probs /= monomer_probs.sum(axis=1)[..., numpy.newaxis]\n return list(monomer_probs)\n\n def calc_word_probs(self, monomer_probs):\n positions = list(range(self.word_length))\n assert len(monomer_probs) == self.m2w.shape[1], (\n len(monomer_probs),\n type(monomer_probs),\n self.m2w.shape,\n )\n result = numpy.product(\n [monomer_probs[i].take(self.m2w[:, i]) for i in positions], axis=0\n )\n result /= result.sum()\n return result\n\n def calc_word_weight_matrix(self, monomer_probs):\n positions = list(range(self.word_length))\n monomer_probs = numpy.array(monomer_probs) # so [posn, motif]\n size = monomer_probs.shape[-1]\n # should be constant\n extended_indices = self.mutated_posn * size + self.mutant_motif\n # print size, self.word_length\n # for a in [extended_indices, self.mutated_posn, self.mutant_motif,\n # monomer_probs]:\n # print a.shape, a.max()\n\n result = monomer_probs.take(extended_indices) * self.mask\n return result\n\n def make_motif_word_prob_defns(self):\n monomer_probs = substitution_calculation.PartitionDefn(\n name=\"psmprobs\",\n default=None,\n dimensions=(\"locus\", \"position\", \"edge\"),\n dimension=(\"motif\", tuple(self.get_input_alphabet())),\n )\n monomer_probs3 = monomer_probs.across_dimension(\n \"position\", [str(i) for i in range(self.word_length)]\n )\n monomer_probs3 = substitution_calculation.CalcDefn(\n lambda *x: numpy.array(x), name=\"mprobs\"\n )(*monomer_probs3)\n word_probs = substitution_calculation.CalcDefn(\n self.calc_word_probs, name=\"wprobs\"\n )(monomer_probs3)\n mprobs_matrix = substitution_calculation.CalcDefn(\n self.calc_word_weight_matrix, name=\"mprobs_matrix\"\n )(monomer_probs3)\n return (monomer_probs, word_probs, mprobs_matrix)\n\n def set_param_controller_motif_probs(self, pc, motif_probs, **kw):\n assert len(motif_probs) == self.word_length\n for (i, m) in enumerate(motif_probs):\n pc.set_param_rule(\"psmprobs\", value=m, position=str(i), **kw)\n\n def adapt_motif_probs(self, motif_probs, auto=False):\n try:\n motif_probs = self.monomer_alphabet.adapt_motif_probs(motif_probs)\n except ValueError:\n motif_probs = self.tuple_alphabet.adapt_motif_probs(motif_probs)\n motif_probs = self.calc_posn_specific_monomer_probs(motif_probs)\n else:\n motif_probs = [motif_probs] * self.word_length\n return motif_probs\n\n\nclass ConditionalMotifProbModel(ComplexMotifProbModel):\n def get_input_alphabet(self):\n return self.tuple_alphabet\n\n def get_counted_alphabet(self):\n return self.tuple_alphabet\n\n def calc_word_weight_matrix(self, motif_probs):\n context_probs = numpy.dot(motif_probs, self.w2c)\n context_probs[context_probs == 0.0] = numpy.inf\n result = motif_probs / context_probs.take(self.context_indices)\n return result\n\n def make_motif_word_prob_defns(self):\n mprobs = self.make_motif_probs_defn()\n mprobs_matrix = substitution_calculation.CalcDefn(\n self.calc_word_weight_matrix, name=\"mprobs_matrix\"\n )(mprobs)\n return (mprobs, mprobs, mprobs_matrix)\n", "#!/usr/bin/env python\n\"\"\"\nA light-weight Table class for manipulating 2D data and representing it as\ntext, or writing to file for import into other packages.\n\nCurrent output formats include pickle (pythons serialisation format),\nrestructured text (keyed by 'rest'), latex, html, delimited columns, and a\nsimple text format.\n\nTable can read pickled and delimited formats.\n\"\"\"\n\nimport csv\nimport json\nimport pickle\nimport re\n\nfrom collections import defaultdict\nfrom collections.abc import Callable, MutableMapping\nfrom itertools import product\nfrom xml.sax.saxutils import escape\n\nimport numpy\n\nfrom cogent3.format import bedgraph\nfrom cogent3.format import table as table_format\nfrom cogent3.util.dict_array import DictArray, DictArrayTemplate\nfrom cogent3.util.misc import (\n atomic_write,\n extend_docstring_from,\n get_format_suffixes,\n get_object_provenance,\n open_,\n)\nfrom cogent3.util.union_dict import UnionDict\nfrom cogent3.util.warning import deprecated\n\n\ntry:\n from IPython.display import display\nexcept ImportError:\n display = lambda x: print(repr(x))\n\n__author__ = \"Gavin Huttley\"\n__copyright__ = \"Copyright 2007-2020, The Cogent Project\"\n__credits__ = [\"Gavin Huttley\", \"Felix Schill\", \"Sheng Koh\"]\n__license__ = \"BSD-3\"\n__version__ = \"2020.7.2a\"\n__maintainer__ = \"Gavin Huttley\"\n__email__ = \"[email protected]\"\n__status__ = \"Production\"\n\n# making reversed characters for use in reverse order sorting\n_all_chrs = [chr(i) for i in range(256)]\n_all_chrs.reverse()\n_reversed_chrs = \"\".join(_all_chrs)\n\n\ndef _reverse_str(x):\n \"\"\"returns reverse translation of x\"\"\"\n return x.translate(_reversed_chrs)\n\n\ndef _reverse_num(x):\n \"\"\"returns reversed val of x\"\"\"\n return x * -1\n\n\ndef _numeric_sum(data):\n \"\"\"returns sum of all numeric values\"\"\"\n try:\n result = numpy.sum(data)\n result / 3\n return result\n except TypeError:\n pass\n\n total = 0\n valid = False\n for v in data:\n try:\n total += v\n valid = True\n except TypeError:\n pass\n result = total if valid else numpy.nan\n return result\n\n\ndef _callback(callback, row, num_columns=None):\n if isinstance(callback, Callable):\n if num_columns == 1:\n row = row[0]\n return callback(row)\n else:\n return eval(callback, {}, row)\n\n\ndef formatted_array(\n series, title=\"\", precision=4, format_spec=None, missing_data=\"\", center=False,\n):\n \"\"\"converts elements in a numpy array series to an equal length string.\n\n Parameters\n ----------\n series\n the series of table rows\n title\n title of series\n precision\n number of decimal places. Can be overridden by following.\n format_spec\n format specification as per the python Format Specification, Mini-Language\n or a callable function.\n missing_data\n default missing data value.\n\n Returns\n -------\n list of formatted series, formatted title\n \"\"\"\n if callable(format_spec):\n formatter = format_spec\n format_spec = base_format = \"\"\n else:\n formatter = None\n\n if isinstance(format_spec, str):\n format_spec = format_spec.replace(\"%\", \"\")\n\n if format_spec:\n match = re.search(\"[<>^]\", format_spec[:2])\n final_align = \">\" if match is None else match.group()\n align = \"\"\n else:\n final_align = align = \">\"\n\n base_format = format_spec if format_spec else \"\"\n assert isinstance(series, numpy.ndarray), \"must be numpy array\"\n if format_spec is None:\n type_name = series.dtype.name\n align = \"^\" if center else \">\"\n if \"int\" in type_name:\n base_format = \"d\"\n elif \"float\" in type_name:\n base_format = f\".{precision}f\"\n elif \"bool\" == type_name:\n base_format = \"\"\n else:\n # handle mixed types with a custom formatter\n formatter = _MixedFormatter(\n align, len(title), precision, missing_data=missing_data\n )\n format_spec = base_format = \"\"\n\n format_spec = base_format\n\n formatted = []\n max_length = len(title)\n for i, v in enumerate(series):\n if formatter:\n v = formatter(v)\n else:\n try:\n v = format(v, format_spec)\n except (TypeError, ValueError):\n # could be a python object\n v = str(v)\n l = len(v)\n if l > max_length:\n max_length = l\n format_spec = f\"{align}{max_length}{base_format}\"\n formatted.append(v)\n\n # title is always right aligned, for now\n title = format(title, f\">{max_length}\")\n # now adjust to max_len\n format_spec = f\"{final_align}{max_length}s\"\n for i in range(len(series)):\n if len(formatted[i]) < max_length:\n formatted[i] = format(formatted[i].strip(), format_spec)\n return formatted, title\n\n\ndef cast_str_to_numeric(values):\n \"\"\"converts a series of strings to numeric values\"\"\"\n if not (isinstance(values[0], str) or isinstance(values[0], bytes)):\n return numpy.array(values)\n\n if not isinstance(values, numpy.ndarray):\n values = numpy.array(values, dtype=\"U\")\n\n for typ in (int, float, complex):\n try:\n values = values.astype(typ)\n break\n except (ValueError, TypeError):\n pass\n return values\n\n\ndef cast_str_to_array(values, static_type=False):\n \"\"\"converts a series of strings to numeric values\"\"\"\n values = numpy.array(values, dtype=\"U\")\n result = cast_str_to_numeric(values)\n if static_type or result is not values:\n return result\n\n # we handle mixed types by using eval\n result = []\n all_fail = True\n for v in values:\n try:\n v = eval(v)\n all_fail = False\n except (TypeError, NameError, SyntaxError):\n # syntax error from empty strings\n pass\n result.append(v)\n\n if not all_fail:\n result = numpy.array(result, dtype=\"O\")\n else:\n result = values\n\n return result\n\n\n_numeric_types = {int, float, complex}\n\n\ndef cast_to_array(values):\n \"\"\"converts a series to a general array type\"\"\"\n if isinstance(values, numpy.ndarray):\n return values\n\n types = {type(v) for v in values}\n if len(types) != 1 and not types <= _numeric_types:\n return numpy.array(values, dtype=object)\n\n # force unicode if str type, otherwise try None\n dtype = \"U\" if types == {str} else None\n try:\n result = numpy.array(values, dtype=dtype)\n except Exception:\n result = numpy.array(values, dtype=object)\n\n return result\n\n\ndef cast_2d_to_1d_dict(data, row_order=None):\n \"\"\"converts a 2D dict to a 1D dict\"\"\"\n if not row_order:\n key = list(data.keys())[0]\n row_order = list(data[key])\n\n result = {c: [data[c][r] for r in row_order] for c in data}\n return result\n\n\ndef cast_to_1d_dict(data, row_order=None):\n \"\"\"Returns a 2 dimensional list.\n\n Parameters\n ----------\n data : dict\n may be 2D\n row_order\n a specified order to generate the rows.\n \"\"\"\n val_types = {type(v): v for v in data.values()}\n if dict in val_types:\n result = cast_2d_to_1d_dict(data, row_order=row_order)\n else:\n result = data\n return result\n\n\nclass _MixedFormatter:\n \"\"\"handles formatting of mixed data types\"\"\"\n\n def __init__(\n self, alignment, length, precision=4, float_type=\"f\", missing_data=None\n ):\n self.missing_data = missing_data\n self.length = length\n self.alignment = alignment\n self.precision = precision\n self.float_type = float_type\n\n def __call__(self, val):\n prefix = f\"{self.alignment}{self.length}\"\n float_spec = f\"{prefix}.{self.precision}{self.float_type}\"\n int_spec = f\"{prefix}d\"\n result = str(val)\n if self.missing_data is not None and not result:\n return self.missing_data\n\n for fspec in (int_spec, float_spec, prefix):\n try:\n result = format(val, fspec)\n break\n except (TypeError, ValueError):\n pass\n\n return result\n\n\nclass Columns(MutableMapping):\n \"\"\"Collection of columns. iter operates over columns.\"\"\"\n\n def __init__(self):\n self._order = ()\n self._num_rows = 0\n self._template = None\n self._index_name = None\n\n def _get_key_(self, value):\n \"\"\"returns string corresponding to column\"\"\"\n\n if isinstance(value, int):\n try:\n value = self._order[value]\n except IndexError:\n raise KeyError(f\"no key corresponding to index {value}\")\n\n return value\n\n def _get_keys_(self, key):\n \"\"\"returns series of str corresponding to columns\"\"\"\n if isinstance(key, str) or isinstance(key, int):\n key = self._get_key_(key)\n return key\n\n if isinstance(key, slice):\n key, _ = self._template.interpret_index(key)\n key = self._order[key[0]]\n\n if type(key) in (list, tuple):\n key = [self._get_key_(k) for k in key]\n elif isinstance(key, numpy.ndarray):\n # we try slicing by array\n cols = numpy.array(self.order, dtype=\"U\")\n try:\n key = cols[key]\n except Exception:\n msg = f\"{key} could not be used to slice columns\"\n raise KeyError(msg)\n else:\n raise KeyError(f\"{key}\")\n\n return key\n\n def __contains__(self, key):\n return key in self._order\n\n def __getitem__(self, key):\n if isinstance(key, str) or isinstance(key, int):\n key = self._get_key_(key)\n return self.__dict__[key]\n\n if isinstance(key, slice):\n key, _ = self._template.interpret_index(key)\n key = self._order[key[0]]\n if isinstance(key, numpy.ndarray):\n key = numpy.array(self._order)[key].tolist()\n\n if type(key) in (list, tuple):\n result = [self.__dict__[self._get_key_(k)] for k in key]\n else:\n raise KeyError(f\"{key}\")\n\n return result\n\n def __delitem__(self, key):\n key = self._get_key_(key)\n del self.__dict__[key]\n self._order = tuple(k for k in self._order if k != key)\n self._template = DictArrayTemplate(self._order)\n\n def __iter__(self):\n return iter(k for k in self._order)\n\n def __len__(self):\n return len(self._order)\n\n def __setitem__(self, key, val):\n key = str(key)\n if isinstance(val, str):\n val = [val]\n try:\n _ = len(val)\n except TypeError:\n val = [val]\n\n if self._num_rows == 0:\n self._num_rows = len(val)\n elif len(val) != self._num_rows:\n raise ValueError(\"number rows incorrect\")\n\n if key not in self._order:\n self._order += (key,)\n self._template = DictArrayTemplate(self._order)\n\n if not isinstance(val, numpy.ndarray):\n val = cast_to_array(val)\n\n # make immutable, sort of\n val.flags.writeable = False\n\n self.__dict__[key] = val\n\n def __getstate__(self):\n # note that index name is captured by the Table\n result = {\"order\": list(self.order), \"columns\": {}}\n for c in self:\n v = self[c]\n dtype = v.dtype.name\n if dtype.startswith(\"str\"):\n dtype = dtype.replace(\"str\", \"U\")\n result[\"columns\"][c] = dict(values=v.tolist(), dtype=dtype)\n return result\n\n def __setstate__(self, data):\n new = self.__class__()\n for k in (\"type\", \"version\"):\n data.pop(k, None)\n\n order = data.pop(\"order\")\n columns = data.pop(\"columns\")\n for c in order:\n values, dtype = columns[c][\"values\"], columns[c][\"dtype\"]\n values = numpy.array(values, dtype=dtype)\n new[c] = values\n\n self.__dict__.update(new.__dict__)\n\n def __repr__(self):\n d = [f\"'{c}': {v.dtype}\" for c, v in self.items()]\n num = len(d)\n v = d[:5]\n if num > 5:\n v.append(f\"... + {num - 5} more\")\n txt = f\"{self.__class__.__name__}({', '.join(v)})\"\n return txt\n\n def __str__(self):\n return repr(self)\n\n def iter_rows(self):\n columns = [self[c] for c in self]\n for row in zip(*columns):\n yield self._template.wrap(row, dtype=object)\n\n @property\n def index_name(self):\n \"\"\"column name whose values can be used to index table rows\"\"\"\n return self._index_name\n\n @index_name.setter\n def index_name(self, name):\n if name is None:\n self._index_name = None\n return\n\n if name not in self:\n raise ValueError(f\"'{name}' unknown, index must be an existing column\")\n\n # make sure index has unique values\n unique = set(self[name])\n if len(unique) != self._num_rows:\n raise ValueError(f\"cannot use '{name}' as index, not all values unique\")\n\n self._index_name = name\n order = [name] + [c for c in self._order if c != name]\n self._order = tuple(order)\n\n def add_column_from_str(self, name, values):\n \"\"\"adds a column from series of str\n\n Parameters\n ----------\n name : str\n column name\n values : series\n any type, cast to numpy array\n \"\"\"\n values = cast_str_to_numeric(values)\n self[name] = values\n\n def take_columns(self, columns):\n \"\"\"returns new Columns instance with just columns\"\"\"\n result = self.__class__()\n if type(columns) in {int, str}:\n columns = [columns]\n\n columns = self._get_keys_(columns)\n\n for c in columns:\n result[c] = self[c]\n\n return result\n\n @property\n def array(self):\n \"\"\"object array of all columns\"\"\"\n arr = numpy.empty((len(self), self._num_rows), dtype=\"O\")\n for i, c in enumerate(self.order):\n try:\n arr[i] = self[c]\n except ValueError:\n # this can happen of elements of array are tuples, for example\n v = numpy.empty(self._num_rows, dtype=\"O\")\n for j, e in enumerate(self[c]):\n v[j] = e\n arr[i] = v\n\n return arr.T\n\n @property\n def order(self):\n \"\"\"column order\"\"\"\n # if index_name not first, we re-order\n if self._index_name is not None and self._order[0] != self._index_name:\n order = [self._index_name] + [\n c for c in self._order if c != self._index_name\n ]\n self._order = tuple(order)\n return self._order\n\n def to_dict(self):\n \"\"\"returns column based dict\"\"\"\n result = {c: self[c].tolist() for c in self}\n return result\n\n def to_rich_dict(self):\n data = self.__getstate__()\n data[\"type\"] = get_object_provenance(self)\n data[\"version\"] = None # todo\n return data\n\n\nclass Table:\n \"\"\"Tabular data. iter operates over rows. Columns are available as an attribute.\"\"\"\n\n def __init__(\n self,\n header=None,\n data=None,\n index=None,\n title=\"\",\n legend=\"\",\n digits=4,\n space=4,\n max_width=1e100,\n column_templates=None,\n format=\"simple\",\n missing_data=\"\",\n **kwargs,\n ):\n attrs = {\n k: v\n for k, v in locals().items()\n if k not in (\"self\", \"__class__\", \"data\", \"header\", \"kwargs\")\n }\n rows = kwargs.pop(\"rows\", None)\n\n assert not (rows and data), \"rows is deprecated, use data\"\n if rows:\n deprecated(\"argument\", \"rows\", \"data\", \"2020.11\")\n data = rows\n\n attrs.update(kwargs)\n\n self._persistent_attrs = attrs\n\n self.columns = Columns()\n self._template = None\n self._index_name = None\n\n if isinstance(data, dict):\n # convert containers like a defaultdict to a standard dict\n data = dict(data)\n\n try:\n len(data[0])\n row_data = True\n except (TypeError, IndexError, KeyError):\n row_data = False\n\n if header and row_data:\n hlen = len(header)\n dcols = len(data[0])\n if hlen != dcols:\n raise ValueError(\n f\"different number of elements in header {hlen} and data row 0 {dcols}\"\n )\n\n data = {c: v for c, v in zip(header, zip(*data))}\n\n if header is None:\n header = list(data) if isinstance(data, dict) else []\n has_index = index is not None\n if has_index and not isinstance(index, str):\n raise TypeError(f\"only str type supported for index, not {type(index)}\")\n\n if data:\n row_order = kwargs.get(\"row_order\", None)\n data = cast_to_1d_dict(data, row_order=row_order)\n if has_index:\n try:\n self.columns[index] = data[index]\n except KeyError:\n raise ValueError(f\"'{index}' not in data\")\n\n for c in header:\n if c == index:\n continue\n self.columns[c] = data[c]\n\n elif header:\n # empty table\n for c in header:\n self.columns[c] = []\n\n # this assignment triggers creation of row template if index specified\n # but only if we have data\n if len(self.columns) > 0:\n self.index_name = index\n elif has_index:\n self._index_name = index\n\n # default title / legend to be empty strings\n self._title = str(title) if title else \"\"\n self._legend = str(legend) if legend else \"\"\n try:\n self._space = \" \" * space\n except TypeError:\n self._space = space\n self._digits = digits\n self._max_width = max_width\n\n # some attributes are not preserved in any file format, so always based\n # on args\n self._column_templates = column_templates or {}\n # define the repr() display policy\n random = 0\n self._repr_policy = dict(head=None, tail=None, random=random, show_shape=True)\n self.format = format\n self._missing_data = missing_data\n\n def __iter__(self):\n return iter(self.columns.iter_rows())\n\n def __len__(self):\n return self.columns._num_rows\n\n def __getitem__(self, names):\n # this is funky, but a side-effect of construction allowing setting\n # prior to having assigned the index column\n self.index_name\n\n if isinstance(names, tuple):\n rows, columns = names\n else:\n rows = names\n columns = self.columns.order\n\n if type(columns) in (str, int):\n columns = [columns]\n else:\n columns = self.columns._get_keys_(columns)\n\n columns = [self.columns._get_key_(c) for c in columns]\n\n # if a index_name has been specified we need to interpret\n # the provided values using the template\n if self._template:\n rows, _ = self._template.interpret_index(rows)\n rows = rows[0]\n\n if not hasattr(rows, \"__len__\") and not isinstance(rows, slice):\n rows = (rows,)\n\n if isinstance(rows, numpy.ndarray):\n rows = [i for i, v in enumerate(rows) if v]\n\n # if the length of rows and columns are both 1, return a single value\n if not isinstance(rows, slice) and len(rows) == len(columns) == 1:\n return self.columns[columns[0]][rows]\n\n attr = self._get_persistent_attrs()\n index_name = attr.pop(\"index\")\n result = self.__class__(**attr)\n for c in columns:\n result.columns[c] = self.columns[c][rows]\n\n if index_name in result.columns:\n result.index_name = index_name\n\n return result\n\n def __getstate__(self):\n attrs = self._get_persistent_attrs()\n data = dict(init_table=attrs)\n cols = self.columns.to_rich_dict()\n data[\"data\"] = cols\n return data\n\n def __setstate__(self, data):\n # we're not using these right now\n for k in (\"type\", \"version\"):\n data.pop(k, None)\n\n kwargs = data.pop(\"init_table\")\n index = kwargs.pop(\"index\")\n table = self.__class__(**kwargs)\n table.columns.__setstate__(data[\"data\"])\n table.index_name = index\n self.__dict__.update(table.__dict__)\n\n def __repr__(self):\n if self.shape == (0, 0):\n return \"0 rows x 0 columns\"\n\n table, shape_info, unset_columns = self._get_repr_()\n if not self._repr_policy[\"show_shape\"]:\n shape_info = \"\"\n result = (\n \"\\n\".join([str(table), shape_info, unset_columns])\n if unset_columns\n else \"\\n\".join([str(table), shape_info])\n )\n return result\n\n def __str__(self):\n if self.shape == (0, 0):\n return \"\"\n\n return self.to_string(self.format)\n\n def _get_repr_(self):\n \"\"\"returns a table for __repr__\"\"\"\n rn = self._repr_policy[\"random\"]\n head = self._repr_policy[\"head\"]\n tail = self._repr_policy[\"tail\"]\n if head is None and tail is None:\n if self.shape[0] < 50:\n head = self.shape[0]\n tail = None\n else:\n head, tail = 5, 5\n self._repr_policy[\"head\"] = head\n self._repr_policy[\"tail\"] = tail\n\n shape_info = \"\"\n ellipsis = None\n if rn:\n indices = numpy.random.choice(self.shape[0], size=rn, replace=False)\n indices = list(sorted(indices))\n shape_info = f\"Random selection of {rn} rows\"\n elif all([head, tail]):\n indices = list(range(head)) + list(\n range(self.shape[0] - tail, self.shape[0])\n )\n ellipsis = \"...\"\n elif head:\n indices = list(range(head))\n elif tail:\n indices = list(range(self.shape[0] - tail, self.shape[0]))\n else:\n indices = list(range(self.shape[0]))\n\n rows = {}\n unset_columns = []\n for c in self.header:\n if len(self.columns[c]):\n rows[c] = [self.columns[c][i] for i in indices]\n else:\n unset_columns.append(c)\n\n if ellipsis:\n for k, v in rows.items():\n v.insert(head, ellipsis)\n\n shape_info += f\"\\n{self.shape[0]:,} rows x {self.shape[1]:,} columns\"\n unset_columns = (\n \"unset columns: %s\" % \", \".join(unset_columns) if unset_columns else None\n )\n\n kwargs = self._get_persistent_attrs()\n header = self.header\n if rows.keys():\n header = tuple(rows.keys())\n table = self.__class__(header=header, data=rows, **kwargs)\n table._column_templates.update(self._column_templates)\n return table, shape_info, unset_columns\n\n def _repr_html_(self):\n \"\"\"returns html, used by Jupyter\"\"\"\n base_colour = \"rgba(161, 195, 209, {alpha})\"\n colour = base_colour.format(alpha=0.25)\n\n def row_cell_func(val, row, col):\n if self.index_name is not None and col == 0:\n klass = f' class=\"index\"'\n else:\n klass = \"\"\n val = f\"<td{klass}>{val}</td>\"\n return val\n\n table, shape_info, unset_columns = self._get_repr_()\n shape_info = (\n f\"<p>{shape_info}; unset columns={unset_columns}</p>\"\n if unset_columns\n else f\"<p>{shape_info}</p>\"\n )\n if not self._repr_policy[\"show_shape\"]:\n shape_info = \"\"\n\n if self.shape == (0, 0):\n return shape_info\n\n title, legend = table.title, table.legend\n # current rich_html does not provide a good mechanism for custom\n # formatting of titles, legends\n table.title, table.legend = None, None\n head_colour = base_colour.format(alpha=0.75)\n element_format = dict(thead=f'<thead class=\"head_cell\">')\n html = table.to_rich_html(\n row_cell_func=row_cell_func, element_formatters=element_format\n )\n if title or legend:\n title = title or \"\"\n legend = legend or \"\"\n caption = (\n \"<caption>\"\n f'<span class=\"cell_title\">{title}</span>'\n f'<br><span class=\"cell_legend\">{legend}</span></caption>'\n )\n html = html.splitlines()\n html.insert(1, caption)\n html = \"\\n\".join(html)\n html = html.splitlines()\n html.insert(\n 0,\n \"\\n\".join(\n [\n \"<style>\",\n \".c3table table {margin: 10px 0;}\",\n \".c3table tr:last-child {border-bottom: 1px solid #000;} \",\n \".c3table tr > th {text-align: center !important; padding: 0 5px;}\",\n \".c3table tr > td {text-align: right !important; padding: 5px;}\",\n \".c3table tr:nth-child(even) {background: #f7f7f7;}\",\n \".c3table .index {background: \"\n + f\"{colour}\"\n + \"; font-weight: 600;}\",\n \".c3table .head_cell {background: \"\n + f\"{head_colour}\"\n + \"; font-weight: bold; text-align: center;}\",\n \".c3table caption {color: rgb(250, 250, 250); background: rgba(30, 140, 200, 1); padding: 3px; white-space: nowrap; caption-side: top;}\",\n \".c3table .cell_title {font-weight: bold;}\",\n \"</style>\",\n '<div class=\"c3table\">',\n ]\n ),\n )\n html = \"\\n\".join([\"\\n\".join(html), shape_info, \"</div>\"])\n return html\n\n def _get_persistent_attrs(self):\n attrs = UnionDict(self._persistent_attrs.copy())\n return attrs\n\n @property\n def title(self):\n return self._title\n\n @title.setter\n def title(self, value):\n self._title = value\n self._persistent_attrs[\"title\"] = value\n\n @property\n def legend(self):\n return self._legend\n\n @legend.setter\n def legend(self, value):\n self._legend = value\n self._persistent_attrs[\"legend\"] = value\n\n @property\n def space(self):\n return self._space\n\n @space.setter\n def space(self, value):\n try:\n self._space = \" \" * value\n except TypeError:\n self._space = value\n\n self._persistent_attrs[\"space\"] = value\n\n def set_repr_policy(self, head=None, tail=None, random=0, show_shape=True):\n \"\"\"specify policy for repr(self)\n\n Parameters\n ----------\n\n - head: number of top rows to included in represented display\n - tail: number of bottom rows to included in represented display\n - random: number of rows to sample randomly (supercedes head/tail)\n - show_shape: boolean to determine if table shape info is displayed\n \"\"\"\n if not any([head, tail, random]):\n self._repr_policy[\"show_shape\"] = show_shape\n return\n if random:\n assert (\n type(random) == int and random > 0\n ), \"random must be a positive integer\"\n head = tail = None\n self._repr_policy = dict(\n head=head, tail=tail, random=random, show_shape=show_shape\n )\n\n @property\n def format(self):\n \"\"\"the str display format\"\"\"\n return self._format\n\n @format.setter\n def format(self, new=\"simple\"):\n \"\"\"the str display format\"\"\"\n new = new.lower()\n if new not in table_format.known_formats:\n msg = (\n f\"{new} not a supported format, see cogent3.format.table.known_formats\"\n )\n raise ValueError(msg)\n\n self._format = new\n\n def format_column(self, column_head, format_template):\n \"\"\"Provide a formatting template for a named column.\n\n Parameters\n ----------\n column_head\n the column label.\n format_template\n string formatting template or a function that will handle the formatting.\n \"\"\"\n test_val = self.columns[column_head].tolist()[0]\n try:\n _ = (\n format_template(test_val)\n if callable(format_template)\n else format_template % test_val\n )\n except Exception as err:\n msg = f\"{format_template} invalid for {column_head}: {err.args[0]}\"\n raise ValueError(msg)\n\n self._column_templates[column_head] = format_template\n\n def head(self, nrows=5):\n \"\"\"displays top nrows\"\"\"\n repr_policy = self._repr_policy\n nrows = min(nrows, self.shape[0])\n show_shape = self._repr_policy[\"show_shape\"]\n self._repr_policy = dict(\n head=nrows, tail=None, random=None, show_shape=show_shape\n )\n display(self)\n self._repr_policy = repr_policy\n\n def tail(self, nrows=5):\n \"\"\"displays bottom nrows\"\"\"\n repr_policy = self._repr_policy\n nrows = min(nrows, self.shape[0])\n show_shape = self._repr_policy[\"show_shape\"]\n self._repr_policy = dict(\n head=None, tail=nrows, random=None, show_shape=show_shape\n )\n display(self)\n self._repr_policy = repr_policy\n\n @property\n def index_name(self):\n \"\"\"column name whose values can be used to index table rows\"\"\"\n if self._index_name is not None and not self._template:\n self.columns.index_name = self._index_name\n self.index_name = self._index_name\n\n return self._index_name\n\n @index_name.setter\n def index_name(self, name):\n self.columns.index_name = name\n self._index_name = name\n self._template = None if name is None else DictArrayTemplate(self.columns[name])\n\n @property\n def header(self):\n return self.columns.order\n\n @property\n def shape(self):\n return (self.columns._num_rows, len(self.columns))\n\n @property\n def array(self):\n return self.columns.array\n\n def cross_join(self, other, **kwargs):\n \"\"\"cross join, or full outer join, of self with other\n\n Notes\n -----\n The column headers of the output are made unique by prepending\n other column headers with _, e.g. 'Name' becomes _Name'.\n \"\"\"\n self_range = range(self.shape[0])\n other_range = range(other.shape[0])\n self_selected, other_selected = list(zip(*product(self_range, other_range)))\n joined_data = {c: self.columns[c].take(self_selected) for c in self.columns}\n col_prefix = \"right\" if not other.title else other.title\n other_data = {\n f\"{col_prefix}_{c}\": other.columns[c].take(other_selected)\n for c in other.columns\n }\n\n joined_data.update(other_data)\n new_header = list(self.columns.order) + [\n f\"{col_prefix}_{c}\" for c in other.columns\n ]\n attrs = self._get_persistent_attrs()\n attrs.pop(\"title\", None)\n attrs |= kwargs\n joined = self.__class__(**attrs)\n for c in new_header:\n joined.columns[c] = joined_data[c]\n return joined\n\n def inner_join(\n self, other, columns_self=None, columns_other=None, use_index=True, **kwargs,\n ):\n \"\"\"inner join of self with other\n\n Parameters\n ----------\n other\n A table object which will be joined with this\n table. other must have a title.\n columns_self, columns_other\n indices of key columns that will be compared in the join operation.\n Can be either column index, or a string matching the column header.\n The order matters, and the dimensions of columns_self and\n columns_other have to match. A row will be included in the output iff\n self[row, columns_self]==other[row, columns_other] for all i\n use_index\n if no columns specified and both self and other have a nominated\n index, this will be used.\n\n Notes\n -----\n The column headers of the output are made unique by prepending\n other column headers with _, e.g. 'Name' becomes _Name'.\n \"\"\"\n col_prefix = \"right\" if not other.title else other.title\n\n if columns_self:\n columns_self = self.columns._get_keys_(columns_self)\n\n if columns_other:\n columns_other = other.columns._get_keys_(columns_other)\n\n columns_self = [columns_self] if isinstance(columns_self, str) else columns_self\n columns_other = (\n [columns_other] if isinstance(columns_other, str) else columns_other\n )\n columns_self = [columns_self] if isinstance(columns_self, str) else columns_self\n columns_other = (\n [columns_other] if isinstance(columns_other, str) else columns_other\n )\n if columns_self is columns_other is None and not use_index:\n # we do the natural inner join\n shared = set(self.columns) & set(other.columns)\n columns_self = [c for c in self.columns if c in shared]\n columns_other = [c for c in other.columns if c in shared]\n elif columns_self is columns_other is None:\n if not (self.index_name and other.index_name):\n msg = (\n \"indexes not specified, set use_index=False for natural inner join\"\n )\n raise ValueError(msg)\n columns_self = [self.index_name]\n columns_other = [other.index_name]\n elif columns_self is None or columns_other is None:\n # the same column labels will be used for both tables\n columns_self = columns_self or columns_other\n columns_other = columns_self or columns_other\n\n if len(columns_self) != len(columns_other):\n raise RuntimeError(\n \"Error during table join: key columns have different dimensions!\"\n )\n\n output_mask = [c for c in other.columns if c not in columns_other]\n\n # key is a tuple made from specified columns; data is the row index\n other_row_index = defaultdict(list)\n # subtable = other.columns.take_columns(columns_other)\n subtable = other[:, columns_other]\n for row_index, row in enumerate(subtable.columns.array):\n # insert new entry for each row\n other_row_index[tuple(row)].append(row_index)\n\n other_selected = []\n self_selected = []\n subtable = self[:, columns_self]\n for row_index, row in enumerate(subtable.columns.array):\n # assemble key for query of other\n key = tuple(row)\n if key not in other_row_index:\n continue\n\n self_selected.extend([row_index] * len(other_row_index[key]))\n other_selected.extend(other_row_index[key])\n\n joined_data = {c: self.columns[c][self_selected] for c in self.columns}\n other_data = {\n f\"{col_prefix}_{c}\": other.columns[c][other_selected] for c in output_mask\n }\n\n joined_data.update(other_data)\n new_header = list(self.columns.order) + [\n f\"{col_prefix}_{c}\" for c in output_mask\n ]\n attr = self._get_persistent_attrs()\n attr.pop(\"title\", None)\n attr |= kwargs\n joined = self.__class__(**attr)\n for c in new_header:\n joined.columns[c] = joined_data[c]\n return joined\n\n def joined(\n self, other, columns_self=None, columns_other=None, inner_join=True, **kwargs,\n ):\n \"\"\"returns a new table containing the join of this table and\n other. See docstring for inner_join, or cross_join\n \"\"\"\n if not inner_join:\n assert (\n columns_self is columns_other is None\n ), \"Cannot specify column indices for a cross join\"\n return self.cross_join(other, **kwargs)\n\n return self.inner_join(\n other=other,\n columns_self=columns_self,\n columns_other=columns_other,\n use_index=False,\n **kwargs,\n )\n\n # todo check the type info\n # todo implement negate argument\n # todo implement check that callable returns bool\n def get_row_indices(self, callback, columns, negate=False):\n \"\"\"returns boolean array of callback values given columns\"\"\"\n subset = self[:, columns]\n if not isinstance(callback, Callable):\n data = subset\n else:\n data = subset.array\n\n num_columns = len(columns)\n match = not negate\n indices = numpy.array(\n [\n True\n if _callback(callback, row=row, num_columns=num_columns) == match\n else False\n for row in data\n ]\n )\n return indices\n\n def filtered(self, callback, columns=None, **kwargs):\n \"\"\"Returns a table with rows satisfying the provided callback function.\n\n Parameters\n ----------\n columns\n the columns whose values determine whether a row is to be included.\n callback\n Can be a function, which takes rows and returns True/False, or a\n string representing valid python code to be evaluated.\n\n Notes\n -----\n Row data provided to callback is a 1D list if more than one column,\n single value (row[col]) otherwise.\n \"\"\"\n # no point filtering if no rows, justv return self\n if self.shape[0] == 0:\n return self\n\n if isinstance(columns, str):\n columns = (columns,)\n\n if columns is None:\n columns = self.columns.order\n\n indices = self.get_row_indices(callback=callback, columns=columns)\n attr = self._get_persistent_attrs()\n attr |= kwargs\n result = self.__class__(**attr)\n for c in self.columns:\n result.columns[c] = self.columns[c][indices]\n return result\n\n def filtered_by_column(self, callback, **kwargs):\n \"\"\"Returns a table with columns identified by callback\n\n Parameters\n ----------\n callback\n A function which takes the columns delimited by columns and returns\n True/False, or a string representing valid python code to be evaluated.\n \"\"\"\n columns = [c for c in self.columns if callback(self.columns[c])]\n attr = self._get_persistent_attrs()\n attr |= kwargs\n result = self.__class__(**attr)\n for c in columns:\n result.columns[c] = self.columns[c]\n return result\n\n def count(self, callback, columns=None, **kwargs):\n \"\"\"Returns number of rows for which the provided callback\n function returns True when passed row data from columns. Row data\n is a 1D list if more than one column, raw row[col] value otherwise.\n\n Parameters\n ----------\n columns\n the columns whose values determine whether a row is to\n be included.\n callback\n Can be a function, which takes the sub\n by columns and returns True/False, or a string representing valid\n python code to be evaluated.\n\n \"\"\"\n # no rows, value must be 0\n if self.shape[0] == 0:\n return 0\n\n if isinstance(columns, str):\n columns = (columns,)\n\n if columns is None:\n columns = self.columns.order\n\n indices = self.get_row_indices(callback=callback, columns=columns)\n return indices.sum()\n\n def count_unique(self, columns=None):\n \"\"\"count occurrences of unique combinations of columns\n\n Parameters\n ----------\n columns\n name of one or more columns. If None, all columns are used\n\n Returns\n -------\n CategoryCounter instance\n \"\"\"\n from cogent3.maths.stats.number import CategoryCounter\n\n if columns is None:\n columns = self.columns.order\n\n subset = self.columns.take_columns(columns)\n if len(subset) == 1:\n data = subset[0].tolist()\n else:\n data = subset.array\n data = list(tuple(e) for e in data)\n\n return CategoryCounter(data=data)\n\n def distinct_values(self, columns):\n \"\"\"returns the set of distinct values for the named column(s)\"\"\"\n data = [tuple(r) for r in self[:, columns].array.tolist()]\n result = set(data)\n result = {d[0] if len(d) == 1 else d for d in result}\n return result\n\n def appended(self, new_column, *tables, **kwargs):\n \"\"\"Concatenates an arbitrary number of tables together\n\n Parameters\n ----------\n new_column\n provide a heading for the new column, each tables\n title will be placed in it. If value is false, the result is no\n additional column.\n tables\n series of Table instances\n\n Notes\n -----\n All tables must have the same columns.\n \"\"\"\n if new_column is not None:\n assert new_column not in self.columns, f\"'{new_column}' already exists\"\n # default title is no title\n kwargs[\"title\"] = kwargs.get(\"title\", \"\")\n attr = self._get_persistent_attrs()\n attr |= kwargs\n result = self.__class__(**attr)\n # convert series of tables\n if isinstance(tables[0], tuple) or isinstance(tables[0], list):\n tables = tuple(tables[0])\n # for each table, determine it's number of rows and create an\n # equivalent length vector of its title\n columns = set(self.columns.order)\n new_col = []\n table_series = (self,) + tables\n raw_data = defaultdict(list)\n for table in table_series:\n assert set(table.columns.order) == columns, \"columns don't match\"\n if new_column is not None:\n new_col.extend([table.title] * table.shape[0])\n data = table.columns.to_dict()\n for c, v in data.items():\n raw_data[c].extend(v)\n\n dtypes = {c: self.columns[c].dtype for c in self.columns}\n if new_column is not None:\n columns = (new_column,) + self.columns.order\n raw_data[new_column] = new_col\n dtypes[new_column] = \"<U15\"\n else:\n columns = self.columns.order\n for c in columns:\n result.columns[c] = numpy.array(raw_data[c], dtype=dtypes[c])\n return result\n\n def get_columns(self, columns, with_index=True):\n \"\"\"select columns from self with index_name unless excluded\n\n Parameters\n ----------\n columns : string or sequence of strings\n names of columns\n with_index : bool\n If index_name is set, includes with columns.\n\n Returns\n -------\n Table\n \"\"\"\n if self.index_name and with_index:\n columns = [self.index_name] + [c for c in columns if c != self.index_name]\n return self[:, columns]\n\n def with_new_column(self, new_column, callback, columns=None, dtype=None, **kwargs):\n \"\"\"Returns new table with an additional column, computed using callback.\n\n Parameters\n ----------\n new_column\n new column heading\n columns\n the columns whose values determine whether a row is to be included.\n callback\n Can be a function, which takes the subtable by columns and returns\n True/False, or a string representing valid python code to be evaluated.\n dtype\n numpy type of result\n \"\"\"\n attr = self._get_persistent_attrs()\n index = attr.pop(\"index\")\n attr |= kwargs\n result = self.__class__(**attr)\n for c in self.columns:\n if c == new_column:\n continue\n result.columns[c] = self.columns[c]\n\n if columns is None:\n columns = self.columns.order\n\n if isinstance(columns, str):\n columns = (columns,)\n\n subset = self[:, columns]\n if not isinstance(callback, Callable):\n data = subset\n else:\n data = subset.array\n\n num_columns = len(columns)\n values = numpy.array(\n [_callback(callback, row=row, num_columns=num_columns) for row in data]\n )\n\n if dtype:\n values = numpy.array(values, dtype=dtype)\n\n result.columns[new_column] = values\n\n if index in result.columns:\n result.index_name = index\n\n return result\n\n # todo deprecate this method\n def with_new_header(self, old, new, **kwargs):\n \"\"\"returns a new Table with old header labels replaced by new\n\n Parameters\n ----------\n old\n the old column header(s). Can be a string or series of them.\n new\n the new column header(s). Can be a string or series of them.\n \"\"\"\n if isinstance(old, str):\n old = [old]\n new = [new]\n\n assert len(old) == len(new), \"Mismatched number of old/new labels\"\n attr = self._get_persistent_attrs()\n attr |= kwargs\n result = self.__class__(**attr)\n for c in self.columns:\n key = c\n if c in old:\n index = old.index(c)\n key = new[index]\n result.columns[key] = self.columns[c]\n return result\n\n def sum_columns(self, columns=None, strict=True):\n \"\"\"return sum of indicated columns\n\n Parameters\n ----------\n columns\n column name(s) or indices\n strict\n if False, ignores cells with non column/row.\n\n \"\"\"\n if columns is None:\n columns = self.columns.order\n\n if isinstance(columns, str) or isinstance(columns, int):\n columns = [columns]\n\n columns = self.columns[columns]\n func = numpy.sum if strict else _numeric_sum\n result = [func(c) for c in columns]\n if len(result) == 1:\n result = result[0]\n return result\n\n def sum_rows(self, indices=None, strict=True):\n \"\"\"return sum of indicated rows\n\n Parameters\n ----------\n indices\n row indices\n strict\n if False, ignores cells with non numeric values.\n \"\"\"\n if indices is None:\n data = self.array\n else:\n data = self[indices, :].array\n\n # a multi-rowed result\n if strict:\n result = data.sum(axis=1).tolist()\n else:\n result = [_numeric_sum(row) for row in data]\n\n if len(result) == 1:\n result = result[0]\n\n return result\n\n # todo change indices to columns\n def summed(self, indices=None, col_sum=True, strict=True):\n \"\"\"returns the sum of numerical values for column(s)/row(s)\n\n Parameters\n ----------\n indices\n column name(s) or indices or row indices\n col_sum\n sums values in the indicated column, the default. If\n False, returns the row sum.\n strict\n if False, ignores cells with non column/row.\n \"\"\"\n if col_sum:\n return self.sum_columns(columns=indices, strict=strict)\n\n return self.sum_rows(indices=indices, strict=strict)\n\n def normalized(self, by_row=True, denominator_func=None, **kwargs):\n \"\"\"returns a table with elements expressed as a fraction according\n to the results from func\n\n Parameters\n ----------\n by_row\n normalisation done by row\n denominator_func\n a callback function that takes an array and\n returns a value to be used as the denominator. Default is sum.\n\n \"\"\"\n attr = self._get_persistent_attrs()\n attr |= kwargs\n result = self.__class__(**attr)\n denominator_func = denominator_func if callable(denominator_func) else numpy.sum\n if not by_row:\n for c in self.columns:\n v = self.columns[c]\n result.columns[c] = v / denominator_func(v)\n\n return result\n\n totals = numpy.array([denominator_func(r) for r in self.array])\n for i, c in enumerate(self.columns):\n result.columns[c] = self.columns[i] / totals\n\n return result\n\n def sorted(self, columns=None, reverse=False, **kwargs):\n \"\"\"Returns a new table sorted according to columns order.\n\n Parameters\n ----------\n columns\n column headings, their order determines the sort order.\n reverse\n column headings, these columns will be reverse sorted.\n\n Either can be provided as just a single string, or a series of\n strings.\n\n Notes\n -----\n If only reverse is provided, that order is used.\n \"\"\"\n reverse = reverse if reverse else []\n if reverse and columns is None:\n columns = reverse\n\n if columns is None:\n columns = list(self.columns)\n\n if isinstance(columns, str):\n columns = [columns]\n\n if isinstance(reverse, str):\n reverse = [reverse]\n\n columns = list(columns)\n\n if reverse and not (set(columns) & set(reverse)):\n for c in reverse:\n if c in columns:\n continue\n\n columns.append(c)\n\n dtypes = [(c, self.columns[c].dtype) for c in columns]\n data = numpy.array(self.columns[columns], dtype=\"O\").T\n for c in reverse:\n index = columns.index(c)\n dtype = self.columns[c].dtype.name\n if \"int\" in dtype or \"float\" in dtype or \"complex\" in dtype:\n func = _reverse_num\n else:\n func = _reverse_str\n func = numpy.vectorize(func)\n data[:, index] = func(data[:, index])\n\n data = numpy.rec.fromarrays(data.copy().T, dtype=dtypes)\n indices = data.argsort()\n\n attr = self._get_persistent_attrs()\n attr |= kwargs\n result = Table(**attr)\n for c in self.columns:\n result.columns[c] = self.columns[c][indices]\n\n return result\n\n def _formatted(self, missing_data=\"\", stripped=False):\n \"\"\"returns self as formatted strings\n\n Parameters\n ----------\n missing_data : str\n default str value for missing\n stripped : bool\n if True, removes padding\n\n \"\"\"\n missing_data = missing_data or self._missing_data\n formatted = []\n for c in self.columns.order:\n data = self.columns[c]\n format_spec = self._column_templates.get(c, None)\n frmt, c = formatted_array(\n data,\n c,\n format_spec=format_spec,\n missing_data=missing_data,\n precision=self._digits,\n )\n if stripped:\n c = c.strip()\n frmt = [v.strip() for v in frmt]\n formatted.append([c] + frmt)\n\n formatted = list([list(e) for e in zip(*formatted)])\n return formatted\n\n def to_csv(self, with_title=False, with_legend=False):\n \"\"\"return table formatted as comma separated values\n\n Parameters\n ----------\n with_title : bool\n include the table title\n with_legend : bool\n include table legend\n\n Returns\n -------\n str\n \"\"\"\n formatted_table = self._formatted(stripped=True)\n header = formatted_table.pop(0)\n title = self.title if with_title else None\n legend = self.legend if with_legend else None\n result = table_format.separator_format(\n header, formatted_table, title=title, legend=legend, sep=\",\"\n )\n return result\n\n def to_latex(\n self, concat_title_legend=True, justify=None, label=None, position=None\n ):\n \"\"\"Returns the text a LaTeX table.\n\n Parameters\n ----------\n concat_title_legend : bool\n the table caption is formed by concatenating the table title and legend\n rows\n table data in row orientation\n header\n table header\n justify\n column justification, default is right aligned.\n label\n for cross referencing\n position\n table page position, default is here, top separate page\n\n Notes\n -----\n The \\\\caption*{} command is provided with the caption package. See\n https://ctan.org/pkg/caption for more details.\n \"\"\"\n formatted_table = self._formatted()\n header = formatted_table.pop(0)\n caption = self.title or None\n legend = self.legend or None\n if concat_title_legend and (caption or legend):\n caption = \" \".join([caption or \"\", legend or \"\"])\n caption = caption.strip()\n legend = None\n result = table_format.latex(\n formatted_table,\n header,\n caption=caption,\n legend=legend,\n justify=justify,\n label=label,\n position=position,\n )\n return result\n\n def to_markdown(self, space=1, justify=None):\n \"\"\"\n returns markdown formatted table\n\n Parameters\n ----------\n space\n number of spaces surrounding the cell contents, must be >= 1\n justify\n characters indicating alignment of columns\n\n Returns\n -------\n str\n \"\"\"\n formatted_table = self._formatted()\n header = formatted_table.pop(0)\n return table_format.markdown(\n header, formatted_table, space=space, justify=justify\n )\n\n def to_rst(self, csv_table=False):\n \"\"\"returns rst formatted table\n\n Parameters\n ----------\n csv_table : bool\n use csv-directive, grid table otherwise\n\n Returns\n -------\n str\n \"\"\"\n stripped = csv_table\n formatted_table = self._formatted(stripped=stripped)\n header = formatted_table.pop(0)\n if csv_table:\n result = table_format.rst_csv_table(\n header, formatted_table, title=self.title, legend=self.legend\n )\n else:\n result = table_format.grid_table_format(\n header, formatted_table, title=self.title, legend=self.legend\n )\n return result\n\n def to_string(\n self,\n format=\"\",\n borders=True,\n sep=None,\n center=False,\n concat_title_legend=True,\n **kwargs,\n ):\n \"\"\"Return the table as a formatted string.\n\n Parameters\n ----------\n format\n possible formats are 'rest'/'rst', 'markdown'/'md',\n 'latex', 'html', 'phylip', 'bedgraph', 'csv', 'tsv', or 'simple'\n (default).\n sep\n A string separator for delineating columns, e.g. ',' or\n '\\t'. Overrides format.\n center : bool\n content is centered in the column, default is right\n justified\n concat_title_legend : bool\n Concat the title and legend.\n\n Notes\n -----\n If format is bedgraph, assumes that column headers are chrom, start,\n end, value. In that order!\n \"\"\"\n if format == \"bedgraph\":\n # todo remove requirement for column order\n assert self.shape[1] == 4, \"bedgraph format is for 4 column tables\"\n # assuming that header order is chrom, start, end, val\n formatted_table = bedgraph.bedgraph(self.sorted().array.tolist(), **kwargs)\n return formatted_table\n\n if format.lower() == \"phylip\":\n missing_data = \"0.0000\"\n else:\n missing_data = self._missing_data\n\n if format.lower() in (\"tsv\", \"csv\"):\n sep = sep or {\"tsv\": \"\\t\", \"csv\": \",\"}[format.lower()]\n format = \"\"\n\n if sep != \"\\t\":\n sep = sep.strip() if sep else None\n\n if sep == \",\":\n return self.to_csv(**kwargs)\n\n if sep == \"\\t\":\n return self.to_tsv(**kwargs)\n\n # convert self to a 2D list\n if format != \"phylip\":\n formatted_table = self._formatted(stripped=sep is not None)\n else:\n columns = [c for c in self.columns if c != self.index_name]\n table = self[:, columns]\n formatted_table = table._formatted(missing_data=missing_data)\n\n header = formatted_table.pop(0)\n args = (header, formatted_table, self.title, self.legend)\n\n if format in (\"rest\", \"rst\"):\n return self.to_rst(**kwargs)\n elif format in (\"markdown\", \"md\"):\n return self.to_markdown(**kwargs)\n elif format.endswith(\"tex\"):\n return self.to_latex(concat_title_legend=concat_title_legend, **kwargs)\n elif format == \"html\":\n return self.to_rich_html(**kwargs)\n elif format == \"phylip\":\n # need to eliminate row identifiers\n return table_format.phylip_matrix(formatted_table, header)\n elif sep:\n return table_format.separator_format(*args, sep=sep)\n else:\n return table_format.simple_format(\n *args + (self._max_width, self.index_name, borders, self.space)\n )\n\n def to_tsv(self, with_title=False, with_legend=False):\n \"\"\"return table formatted as tab separated values\n\n Parameters\n ----------\n with_title : bool\n include the table title\n with_legend : bool\n include table legend\n\n Returns\n -------\n str\n \"\"\"\n formatted_table = self._formatted(stripped=True)\n header = formatted_table.pop(0)\n title = self.title if with_title else None\n legend = self.legend if with_legend else None\n result = table_format.separator_format(\n header, formatted_table, title=title, legend=legend, sep=\"\\t\"\n )\n return result\n\n def to_rich_html(\n self,\n row_cell_func=None,\n header_cell_func=None,\n element_formatters=None,\n merge_identical=False,\n compact=False,\n ):\n \"\"\"returns just the table as html.\n\n Parameters\n ----------\n row_cell_func\n callback function that formats the row values. Must\n take the row value and coordinates (row index, column index).\n header_cell_func\n callback function that formats the column headings\n must take the header label value and coordinate\n element_formatters\n a dictionary of specific callback funcs for\n formatting individual html table elements.\n e.g. {'table': lambda x: '<table border=\"1\" class=\"docutils\">'}\n merge_identical\n cells within a row are merged to one span.\n\n \"\"\"\n element_formatters = element_formatters or {}\n formatted_table = self.array.tolist()\n header, formatted_table = table_format.formatted_cells(\n formatted_table,\n self.header,\n digits=self._digits,\n column_templates=self._column_templates,\n missing_data=self._missing_data,\n )\n subtables = table_format.get_continuation_tables(\n header,\n formatted_table,\n identifiers=self.index_name,\n max_width=self._max_width,\n )\n tables = []\n title = self.title if self.title else \"\"\n if title:\n title = escape(title)\n legend = self.legend if self.legend else \"\"\n if legend:\n legend = escape(legend)\n for i, (h, t) in enumerate(subtables):\n # but we strip the cell spacing\n sh = [v.strip() for v in h]\n t = [[c.strip() for c in r] for r in t]\n\n if title and i == 0:\n st = element_formatters.get(\n \"caption\", f'<span style=\"font-weight:bold\">{title}</span>'\n )\n elif title:\n st = element_formatters.get(\n \"caption\", f'<span style=\"font-weight:bold\">continuation</span>'\n )\n else:\n st = None\n\n if legend and i == 0:\n title = f\"{st} {legend}\" if st else legend\n\n caption = st if st else None\n subtable = table_format.rich_html(\n t,\n row_cell_func=row_cell_func,\n header=sh,\n header_cell_func=header_cell_func,\n element_formatters=element_formatters,\n merge_identical=merge_identical,\n compact=compact,\n caption=caption,\n )\n tables.append(subtable)\n return \"\\n\".join(tables)\n\n def tolist(self, columns=None):\n \"\"\"Returns raw data as a list\n\n Parameters\n ----------\n columns\n if None, all data are returned\n\n Notes\n -----\n If one column, a 1D list is returned.\n \"\"\"\n if columns is None:\n columns = self.columns.order\n\n columns = [columns] if isinstance(columns, str) else columns\n if len(columns) == 1:\n result = self.columns[columns[0]].tolist()\n return result\n\n subtable = self.get_columns(columns)\n result = subtable.columns.array.tolist()\n\n return result\n\n @extend_docstring_from(DictArray.to_dict)\n def to_dict(self, flatten=False):\n if self.index_name:\n index = self.columns[self.index_name]\n else:\n index = self.shape[0]\n template = DictArrayTemplate(index, self.columns.order)\n darr = template.wrap(self.array)\n return darr.to_dict(flatten=flatten)\n\n def to_rich_dict(self):\n data = self.__getstate__()\n data[\"type\"] = get_object_provenance(self)\n data[\"version\"] = None # todo\n return data\n\n def to_json(self):\n data = self.to_rich_dict()\n return json.dumps(data)\n\n def to_dataframe(self, categories=None):\n \"\"\"returns pandas DataFrame instance\n\n Parameters\n ----------\n categories\n converts these columns to category dtype in the data\n frame. Note, categories are not ordered.\n \"\"\"\n try:\n from pandas import DataFrame\n except ImportError:\n raise ImportError(\"pandas not installed\")\n\n index = None if not self.index_name else self.columns[self.index_name]\n data = {c: self.columns[c] for c in self.columns if c != self.index_name}\n df = DataFrame(data=data, index=index)\n if categories is not None:\n categories = [categories] if type(categories) == str else categories\n df = df.astype({n: \"category\" for n in categories})\n\n return df\n\n def to_plotly(self, width=500, font_size=12, layout=None, **kwargs):\n \"\"\"returns a Plotly Table\"\"\"\n from cogent3.draw.drawable import Drawable\n\n rows = self.array.tolist()\n header, rows = table_format.formatted_cells(\n rows,\n self.header,\n digits=self._digits,\n column_templates=self._column_templates,\n missing_data=self._missing_data,\n center=False,\n )\n # we strip white space padding from header and cells\n header = [e.strip() for e in header]\n rows = [[e.strip() for e in row] for row in rows]\n rows = list(zip(*rows))\n if self.index_name:\n body_colour = [\"white\"] * self.shape[0]\n index_colour = [\"rgba(161, 195, 209, 0.5)\"] * self.shape[0]\n colours = [index_colour] + [body_colour[:] for i in range(self.shape[1])]\n rows[0] = [f\"<b>{e}</b>\" for e in rows[0]]\n else:\n colours = \"white\"\n\n tab = UnionDict(\n type=\"table\",\n header=dict(\n values=[f\"<b>{c}</b>\" for c in header],\n fill=dict(color=\"rgba(161, 195, 209, 1)\"),\n font=dict(size=font_size),\n align=\"center\",\n ),\n cells=dict(values=rows, fill=dict(color=colours)),\n )\n draw = Drawable()\n aspect_ratio = self.shape[0] / self.shape[1]\n layout = layout or {}\n default_layout = dict(\n width=width,\n height=aspect_ratio * width,\n autosize=False,\n title=self.title,\n margin=dict(l=10, r=10, t=30, b=10, pad=10),\n )\n default_layout.update(layout)\n draw.traces.append(tab)\n draw.layout |= default_layout\n return draw\n\n def to_contingency(self, columns):\n \"\"\"construct object that can be used for statistical tests\n\n Parameters\n ----------\n columns\n columns to include. These correspond to contingency column\n labels. The row labels come from values under the index_name\n column.\n\n Returns\n -------\n CategoryCounts, an object for performing statistical tests on\n contingency tables.\n\n Notes\n -----\n Only applies to cases where an index_name is defined. The selected columns\n must be int types and represent the counts of corresponding categories.\n \"\"\"\n from cogent3.maths.stats.contingency import CategoryCounts\n from cogent3.util.dict_array import DictArrayTemplate\n\n if self.index_name is None:\n raise ValueError(f\"requires index_name be set\")\n\n columns = [columns] if isinstance(columns, str) else columns\n if not set(columns) <= set(self.header):\n raise ValueError(f\"unknown columns {columns}\")\n\n row_cats = self.columns[self.index_name]\n # must be convertible to int\n for col in columns:\n if \"int\" not in self.columns[col].dtype.name:\n raise TypeError(f\"{col} is not of int type\")\n\n matrix = self.get_columns(columns, with_index=False).array.astype(int)\n\n data = DictArrayTemplate(row_cats, columns).wrap(matrix)\n return CategoryCounts(data)\n\n def transposed(self, new_column_name, select_as_header=None, **kwargs):\n \"\"\"returns the transposed table.\n\n Parameters\n ----------\n new_column_name\n the existing header will become a column with\n this name\n select_as_header\n current column name containing data to be used\n as the header. Defaults to the first column.\n \"\"\"\n select_as_header = select_as_header or self.columns.order[0]\n assert select_as_header in self.columns, (\n '\"%s\" not in table header' % select_as_header\n )\n\n if len(self.distinct_values(select_as_header)) != len(self):\n raise ValueError(f\"not all '{select_as_header}' values unique\")\n\n attr = self._get_persistent_attrs()\n # on transpose, a row index becomes a column, so pop\n del attr[\"index\"]\n\n attr |= kwargs\n result = self.__class__(**attr)\n\n # place new column header first\n columns = [select_as_header] + [\n c for c in self.columns if c != select_as_header\n ]\n data = self[:, columns].array\n result.columns[new_column_name] = columns[1:]\n for row in data.tolist():\n c = str(row.pop(0))\n result.columns[c] = row\n return result\n\n def write(\n self,\n filename,\n mode=None,\n writer=None,\n format=None,\n sep=None,\n compress=None,\n **kwargs,\n ):\n \"\"\"Write table to filename in the specified format.\n\n Parameters\n ----------\n mode\n file opening mode\n format\n Valid formats are those of the to_string method plus pickle. Will\n try and guess from filename if not specified.\n writer\n a function for formatting the data for output.\n sep\n a character delimiter for fields.\n compress\n if True, gzips the file and appends .gz to the filename (if not\n already added).\n\n Notes\n -----\n If a format is not specified, it attempts to use a filename suffix.\n Unformatted numerical values are written to file in order to preserve\n numerical accuracy.\n \"\"\"\n file_suffix, compress_suffix = get_format_suffixes(filename)\n format = format or file_suffix\n compress = compress or compress_suffix is not None\n\n mode = mode or {\"pickle\": \"wb\"}.get(format, \"w\")\n\n if format == \"json\":\n with atomic_write(filename, mode=\"wt\") as f:\n f.write(self.to_json())\n return\n\n if compress:\n if not filename.endswith(\".gz\"):\n filename = \"%s.gz\" % filename\n mode = \"wt\"\n\n outfile = open_(filename, mode)\n\n if format is None:\n # try guessing from filename suffix\n if compress:\n index = -2\n else:\n index = -1\n suffix = filename.split(\".\")\n if len(suffix) > 1:\n format = suffix[index]\n\n if format == \"csv\":\n sep = sep or \",\"\n elif format == \"tsv\":\n sep = sep or \"\\t\"\n\n if writer:\n rows = self.tolist()\n rows.insert(0, self.header[:])\n rows = writer(rows, has_header=True)\n outfile.writelines(\"\\n\".join(rows))\n elif format == \"pickle\":\n data = self.__getstate__()\n pickle.dump(data, outfile, protocol=1)\n elif sep is not None and format != \"bedgraph\":\n writer = csv.writer(outfile, delimiter=sep, lineterminator=\"\\n\")\n if self.title:\n writer.writerow([self.title])\n writer.writerow(self.header)\n writer.writerows(self.array)\n if self.legend:\n writer.writerow([self.legend])\n else:\n table = self.to_string(format=format, sep=sep, **kwargs)\n outfile.writelines(table + \"\\n\")\n outfile.close()\n" ]
[ [ "numpy.dot", "numpy.array", "numpy.zeros" ], [ "numpy.random.choice", "pandas.DataFrame", "numpy.vectorize", "numpy.array", "numpy.sum", "numpy.empty" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
ngohgia/text2brain_server
[ "16bf523d63319d8c20497acfb2c5a82a5363df5e" ]
[ "google-cloud-function-src/text2brain_model.py" ]
[ "import torch\nimport torch.nn as nn\nfrom torch.nn.utils.rnn import pad_sequence\nfrom decoder import ImageDecoder\nimport transformers\n\n\nclass Text2BrainModel(nn.Module):\n def __init__(self, out_channels, fc_channels, decoder_filters, pretrained_bert_dir, decoder_act_fn=nn.Sigmoid, drop_p=0.5, decoder_input_shape=[4, 5, 4]):\n super().__init__()\n self.out_channels = out_channels\n self.fc_channels = fc_channels\n self.decoder_filters = decoder_filters\n self.decoder_input_shape = decoder_input_shape\n self.drop_p = drop_p\n\n self.tokenizer = transformers.BertTokenizer.from_pretrained(pretrained_bert_dir)\n self.encoder = transformers.BertModel.from_pretrained(pretrained_bert_dir)\n\n self.fc = nn.Linear(\n in_features=768,\n out_features=self.decoder_input_shape[0]*self.decoder_input_shape[1]*self.decoder_input_shape[2]*self.fc_channels)\n self.dropout = nn.Dropout(self.drop_p)\n self.relu = nn.ReLU()\n\n self.decoder = ImageDecoder(in_channels=self.fc_channels, out_channels=1, num_filter=self.decoder_filters, act_fn=decoder_act_fn)\n\n\n def forward(self, texts):\n batch = [self._tokenize(x) for x in texts]\n\n in_mask = self._pad_mask(batch, batch_first=True)\n in_ = pad_sequence(batch, batch_first=True)\n device = next(self.parameters()).device\n in_ = in_.to(device)\n in_mask = in_mask.to(device)\n\n _, embedding = self.encoder(in_, attention_mask=in_mask)\n\n x = self.dropout(embedding)\n x = self.fc(x)\n x = self.dropout(x)\n x = self.relu(x)\n\n decoder_tensor_shape = [-1, self.fc_channels] + self.decoder_input_shape\n x = x.view(decoder_tensor_shape)\n\n out = self.decoder(x)\n\n return out\n\n def _tokenize(self, text):\n return self.tokenizer.encode(text, add_special_tokens=True, return_tensors='pt', truncation=True, max_length=512).squeeze(0)\n\n def _pad_mask(self, sequences, batch_first=False):\n ret = [torch.ones(len(s)) for s in sequences]\n return pad_sequence(ret, batch_first=batch_first)\n\ndef init_pretrained_model(checkpoint_file, pretrained_bert_dir, fc_channels=64, decoder_filters=32):\n \"\"\"Init Model\"\"\"\n model = Text2BrainModel(\n out_channels=1,\n fc_channels=fc_channels,\n decoder_filters=decoder_filters,\n pretrained_bert_dir=pretrained_bert_dir,\n drop_p=0.55)\n\n device = torch.device('cpu')\n state_dict = torch.load(checkpoint_file, map_location=device)['state_dict']\n model.load_state_dict(state_dict)\n model.eval()\n model.to(device)\n return model" ]
[ [ "torch.nn.Dropout", "torch.load", "torch.nn.utils.rnn.pad_sequence", "torch.nn.Linear", "torch.device", "torch.nn.ReLU" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
JiByungKyu/ludwig
[ "3e2f276459f976054b5c2ab8c55be994170345da" ]
[ "ludwig/features/base_feature.py" ]
[ "# coding=utf-8\n# Copyright (c) 2019 Uber Technologies, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\nimport logging\nfrom abc import ABC, abstractmethod\nfrom typing import Dict\n\nimport tensorflow as tf\n\nfrom ludwig.constants import *\nfrom ludwig.modules.fully_connected_modules import FCStack\nfrom ludwig.modules.reduction_modules import SequenceReducer\nfrom ludwig.utils.misc_utils import merge_dict, get_from_registry\nfrom ludwig.utils.tf_utils import sequence_length_3D\n\nlogger = logging.getLogger(__name__)\n\n\nclass BaseFeature(object):\n \"\"\"Base class for all features.\n\n Note that this class is not-cooperative (does not forward kwargs), so when constructing\n feature class hierarchies, there should be only one parent class that derives from base\n feature. Other functionality should be put into mixin classes to avoid the diamond\n pattern.\n \"\"\"\n\n def __init__(self, feature, *args, **kwargs):\n super().__init__()\n\n if 'name' not in feature:\n raise ValueError('Missing feature name')\n\n self.feature_name = feature['name']\n self.type = None\n\n def overwrite_defaults(self, feature):\n attributes = set(self.__dict__.keys())\n attributes.update(self.__class__.__dict__.keys())\n\n for k in feature.keys():\n if k in attributes:\n if (isinstance(feature[k], dict) and hasattr(self, k)\n and isinstance(getattr(self, k), dict)):\n setattr(self, k, merge_dict(getattr(self, k),\n feature[k]))\n else:\n setattr(self, k, feature[k])\n\n\nclass InputFeature(BaseFeature, tf.keras.Model, ABC):\n \"\"\"Parent class for all input features.\"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n def create_input(self):\n return tf.keras.Input(shape=self.get_input_shape(),\n dtype=self.get_input_dtype(),\n name=self.name + '_input')\n\n @abstractmethod\n def get_input_dtype(self):\n \"\"\"Returns the Tensor data type this input accepts.\"\"\"\n pass\n\n @abstractmethod\n def get_input_shape(self):\n \"\"\"Returns a tuple representing the Tensor shape this input accepts.\"\"\"\n pass\n\n @staticmethod\n @abstractmethod\n def update_model_definition_with_metadata(\n input_feature,\n feature_metadata,\n *args,\n **kwargs\n ):\n pass\n\n @staticmethod\n @abstractmethod\n def populate_defaults(input_feature):\n pass\n\n @property\n @abstractmethod\n def encoder_registry(self):\n pass\n\n def initialize_encoder(self, encoder_parameters):\n return get_from_registry(self.encoder, self.encoder_registry)(\n **encoder_parameters\n )\n\n\nclass OutputFeature(BaseFeature, tf.keras.Model, ABC):\n \"\"\"Parent class for all output features.\"\"\"\n\n train_loss_function = None\n eval_loss_function = None\n\n def __init__(self, feature, *args, **kwargs):\n super().__init__(*args, feature=feature, **kwargs)\n\n self.reduce_input = None\n self.reduce_dependencies = None\n self.dependencies = []\n\n self.fc_layers = None\n self.num_fc_layers = 0\n self.fc_size = 256\n self.use_bias = True\n self.weights_initializer = 'glorot_uniform'\n self.bias_initializer = 'zeros'\n self.weights_regularizer = None\n self.bias_regularizer = None\n self.activity_regularizer = None\n # self.weights_constraint=None\n # self.bias_constraint=None\n self.norm = None\n self.norm_params = None\n self.activation = 'relu'\n self.dropout = 0\n\n self.overwrite_defaults(feature)\n\n logger.debug(' output feature fully connected layers')\n logger.debug(' FCStack')\n self.fc_stack = FCStack(\n layers=self.fc_layers,\n num_layers=self.num_fc_layers,\n default_fc_size=self.fc_size,\n default_use_bias=self.use_bias,\n default_weights_initializer=self.weights_initializer,\n default_bias_initializer=self.bias_initializer,\n default_weights_regularizer=self.weights_regularizer,\n default_bias_regularizer=self.bias_regularizer,\n default_activity_regularizer=self.activity_regularizer,\n # default_weights_constraint=self.weights_constraint,\n # default_bias_constraint=self.bias_constraint,\n default_norm=self.norm,\n default_norm_params=self.norm_params,\n default_activation=self.activation,\n default_dropout=self.dropout,\n )\n\n # set up two sequence reducers, one for inputs and other for dependencies\n self.reduce_sequence_input = SequenceReducer(\n reduce_mode=self.reduce_input\n )\n if self.dependencies:\n self.dependency_reducers = {}\n for dependency in self.dependencies:\n self.dependency_reducers[dependency] = SequenceReducer(\n reduce_mode=self.reduce_dependencies\n )\n\n def create_input(self):\n return tf.keras.Input(shape=self.get_output_shape(),\n dtype=self.get_output_dtype(),\n name=self.name + '_input')\n\n @abstractmethod\n def get_output_dtype(self):\n \"\"\"Returns the Tensor data type feature outputs.\"\"\"\n pass\n\n @abstractmethod\n def get_output_shape(self):\n \"\"\"Returns a tuple representing the Tensor shape this feature outputs.\"\"\"\n pass\n\n @property\n @abstractmethod\n def metric_functions(self) -> Dict:\n pass\n\n @property\n @abstractmethod\n def decoder_registry(self):\n pass\n\n def initialize_decoder(self, decoder_parameters):\n return get_from_registry(self.decoder, self.decoder_registry)(\n **decoder_parameters\n )\n\n def train_loss(self, targets, predictions):\n return self.train_loss_function(targets, predictions)\n\n def eval_loss(self, targets, predictions):\n return self.eval_loss_function(targets, predictions)\n\n def update_metrics(self, targets, predictions):\n for metric, metric_fn in self.metric_functions.items():\n if metric == LOSS or metric == HITS_AT_K:\n metric_fn.update_state(targets, predictions)\n else:\n metric_fn.update_state(targets, predictions[PREDICTIONS])\n\n def get_metrics(self):\n metric_vals = {}\n for metric_name, metric_onj in self.metric_functions.items():\n metric_vals[metric_name] = metric_onj.result().numpy()\n return metric_vals\n\n def reset_metrics(self):\n for of_name, metric_fn in self.metric_functions.items():\n if metric_fn is not None:\n metric_fn.reset_states()\n\n def call(\n self,\n inputs, # ((hidden, other_output_hidden), target)\n training=None,\n mask=None\n ):\n # account for output feature target\n if isinstance(inputs, tuple):\n local_inputs, target = inputs\n else:\n local_inputs = inputs\n target = None\n\n combiner_outputs, other_output_hidden = local_inputs\n\n # extract the combined hidden layer\n combiner_output = combiner_outputs['combiner_output']\n hidden = self.prepare_decoder_inputs(\n combiner_output,\n other_output_hidden,\n training=training,\n mask=mask\n )\n\n # ================ Predictions ================\n logits_input = {\n HIDDEN: hidden\n }\n if 'encoder_output_state' in combiner_outputs:\n logits_input['encoder_output_state'] = \\\n combiner_outputs['encoder_output_state']\n logits = self.logits(logits_input, target=target, training=training)\n\n # most of the cases the output of self.logits is a tensor\n # in some cases like for sequence features, it can be tuple of\n # logits, predictions, scores\n # The first element will be the logits tensor\n if isinstance(logits, tuple):\n logits = logits[0]\n\n return logits, hidden\n\n @property\n @abstractmethod\n def default_validation_metric(self):\n pass\n\n @staticmethod\n @abstractmethod\n def update_model_definition_with_metadata(\n output_feature,\n feature_metadata,\n *args,\n **kwargs\n ):\n pass\n\n @staticmethod\n @abstractmethod\n def calculate_overall_stats(\n test_stats,\n output_feature,\n dataset,\n train_set_metadata\n ):\n pass\n\n @staticmethod\n @abstractmethod\n def postprocess_results(\n output_feature,\n result,\n metadata,\n experiment_dir_name,\n skip_save_unprocessed_output=False,\n ):\n pass\n\n @staticmethod\n @abstractmethod\n def populate_defaults(input_feature):\n pass\n\n def concat_dependencies(self, hidden, other_features_hidden):\n if len(self.dependencies) > 0:\n dependencies_hidden = []\n for dependency in self.dependencies:\n # the dependent feature is ensured to be present in final_hidden\n # because we did the topological sort of the features before\n dependency_final_hidden = other_features_hidden[dependency]\n\n if len(hidden.shape) > 2:\n if len(dependency_final_hidden.shape) > 2:\n # matrix matrix -> concat\n assert hidden.shape[1] == \\\n dependency_final_hidden.shape[1]\n dependencies_hidden.append(dependency_final_hidden)\n else:\n # matrix vector -> tile concat\n sequence_max_length = hidden.shape[1]\n multipliers = tf.concat(\n [[1], [sequence_max_length], [1]],\n 0\n )\n tiled_representation = tf.tile(\n tf.expand_dims(dependency_final_hidden, 1),\n multipliers\n )\n\n # todo future: maybe modify this with TF2 mask mechanics\n sequence_length = sequence_length_3D(hidden)\n mask = tf.sequence_mask(\n sequence_length,\n sequence_max_length\n )\n tiled_representation = tf.multiply(\n tiled_representation,\n tf.cast(mask[:, :, tf.newaxis], dtype=tf.float32)\n )\n\n dependencies_hidden.append(tiled_representation)\n\n else:\n if len(dependency_final_hidden.shape) > 2:\n # vector matrix -> reduce concat\n reducer = self.dependency_reducers[dependency]\n dependencies_hidden.append(\n reducer(dependency_final_hidden)\n )\n else:\n # vector vector -> concat\n dependencies_hidden.append(dependency_final_hidden)\n\n try:\n hidden = tf.concat([hidden] + dependencies_hidden, -1)\n except:\n raise ValueError(\n 'Shape mismatch while concatenating dependent features of '\n '{}: {}. Concatenating the feature activations tensor {} '\n 'with activation tensors of dependencies: {}. The error is '\n 'likely due to a mismatch of the second dimension (sequence'\n ' length) or a difference in ranks. Likely solutions are '\n 'setting the maximum_sequence_length of all sequential '\n 'features to be the same, or reduce the output of some '\n 'features, or disabling the bucketing setting '\n 'bucketing_field to None / null, as activating it will '\n 'reduce the length of the field the bucketing is performed '\n 'on.'.format(\n self.feature_name,\n self.dependencies,\n hidden,\n dependencies_hidden\n )\n )\n\n return hidden\n\n def output_specific_fully_connected(\n self,\n inputs, # feature_hidden\n training=None,\n mask=None\n ):\n feature_hidden = inputs\n original_feature_hidden = inputs\n\n # flatten inputs\n if len(original_feature_hidden.shape) > 2:\n feature_hidden = tf.reshape(\n feature_hidden,\n [-1, feature_hidden.shape[-1]]\n )\n\n # pass it through fc_stack\n feature_hidden = self.fc_stack(\n feature_hidden,\n training=training,\n mask=mask\n )\n feature_hidden_size = feature_hidden.shape[-1]\n\n # reshape back to original first and second dimension\n if len(original_feature_hidden.shape) > 2:\n sequence_length = original_feature_hidden.shape[1]\n feature_hidden = tf.reshape(\n feature_hidden,\n [-1, sequence_length, feature_hidden_size]\n )\n\n return feature_hidden\n\n def prepare_decoder_inputs(\n self,\n combiner_output,\n other_output_features,\n training=None,\n mask=None\n ):\n \"\"\"\n Takes the combiner output and the outputs of other outputs features\n computed so far and performs:\n - reduction of combiner outputs (if needed)\n - concatenating the outputs of dependent features (if needed)\n - output_specific fully connected layers (if needed)\n\n :param combiner_output: output tensor of the combiner\n :param other_output_features: output tensors from other features\n :return: tensor\n \"\"\"\n feature_hidden = combiner_output\n\n # ================ Reduce Inputs ================\n if self.reduce_input is not None and len(feature_hidden.shape) > 2:\n feature_hidden = self.reduce_sequence_input(\n feature_hidden\n )\n\n # ================ Concat Dependencies ================\n feature_hidden = self.concat_dependencies(\n feature_hidden,\n other_output_features\n )\n\n # ================ Output-wise Fully Connected ================\n feature_hidden = self.output_specific_fully_connected(\n feature_hidden,\n training=training,\n mask=mask\n )\n\n return feature_hidden\n" ]
[ [ "tensorflow.concat", "tensorflow.cast", "tensorflow.reshape", "tensorflow.expand_dims", "tensorflow.sequence_mask" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7", "0.12", "1.0", "1.2" ] } ]
gkahn13/GtS
[ "8186177de430d4bfef253bb0ea584ee60dc58d3a" ]
[ "src/gcg/tf/tf_utils.py" ]
[ "import os\nimport tensorflow as tf\n\n######################\n### Graph creation ###\n######################\n\ndef create_session_and_graph(gpu_device=None, gpu_frac=None):\n if gpu_device is None:\n gpu_device = 0\n if gpu_frac is None:\n gpu_frac = 0.95\n\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = str(gpu_device)\n tf_graph = tf.Graph()\n if len(str(gpu_device)) > 0:\n gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_frac)\n config = tf.ConfigProto(\n gpu_options=gpu_options,\n log_device_placement=False,\n allow_soft_placement=True,\n )\n else:\n config = tf.ConfigProto(\n device_count={'GPU': 0},\n log_device_placement=False,\n allow_soft_placement=True,\n )\n tf_sess = tf.Session(graph=tf_graph, config=config)\n return tf_sess, tf_graph\n\n##################\n### Optimizing ###\n##################\n\ndef minimize_and_clip(optimizer, objective, var_list, clip_val=10):\n \"\"\"Minimized `objective` using `optimizer` w.r.t. variables in\n `var_list` while ensure the norm of the gradients for each\n variable is clipped to `clip_val`\n \"\"\"\n gradients = optimizer.compute_gradients(objective, var_list=var_list)\n for i, (grad, var) in enumerate(gradients):\n if grad is not None:\n gradients[i] = (tf.clip_by_norm(grad, clip_val), var)\n return optimizer.apply_gradients(gradients)\n\n##################\n### Operations ###\n##################\n\ndef spatial_soft_argmax(features, dtype=tf.float32):\n \"\"\"\n features shape is [N, H, W, C]\n \"\"\"\n N = tf.shape(features)[0]\n val_shape = features.get_shape()\n H, W, C = val_shape[1].value, val_shape[2].value, val_shape[3].value\n features = tf.reshape(\n tf.transpose(features, [0, 3, 1, 2]),\n [-1, H * W])\n softmax = tf.nn.softmax(features)\n spatial_softmax = tf.transpose(tf.reshape(softmax, [N, C, H, W]), [0, 2, 3, 1])\n spatial_softmax_pos = tf.expand_dims(spatial_softmax, -1)\n # TODO shape [H, W, 1, 2]\n # TODO H or W is 1\n assert(H != 1 and W != 1)\n delta_h = 2. / tf.cast(H - 1, dtype)\n delta_w = 2. / tf.cast(W - 1, dtype)\n ran_h = tf.tile(tf.expand_dims(tf.range(-1., 1. + delta_h, delta_h, dtype=dtype), 1), [1, W])\n ran_w = tf.tile(tf.expand_dims(tf.range(-1., 1 + delta_w, delta_w, dtype=dtype), 0), [H, 1])\n image_pos = tf.expand_dims(tf.stack([ran_h, ran_w], 2), 2)\n spatial_soft_amax = tf.reduce_sum(spatial_softmax_pos * image_pos, axis=[1, 2])\n shaped_ssamax = tf.reshape(spatial_soft_amax, [N, C * 2])\n return shaped_ssamax\n\ndef repeat_2d(x, reps, axis):\n assert(axis == 0 or axis == 1)\n\n if axis == 1:\n x = tf.transpose(x)\n\n static_shape = list(x.get_shape())\n dyn_shape = tf.shape(x)\n x_repeat = tf.reshape(tf.tile(x, [1, reps]), (dyn_shape[0] * reps, dyn_shape[1]))\n if static_shape[0].value is not None:\n static_shape[0] = tf.Dimension(static_shape[0].value *reps)\n x_repeat.set_shape(static_shape)\n\n if axis == 1:\n x_repeat = tf.transpose(x_repeat)\n\n return x_repeat\n\ndef batch_outer_product(X, Y):\n \"\"\"\n :param X: [N, U]\n :param Y: [N, V]\n \"\"\"\n # tf.assert_equal(tf.shape(X)[0], tf.shape(Y)[0])\n\n X_batch = tf.expand_dims(X, 2) # [N, U, 1]\n Y_batch = tf.expand_dims(Y, 1) # [N, 1, V]\n results = tf.batch_matmul(X_batch, Y_batch) # [N, U, V]\n\n return results\n\ndef batch_outer_product_2d(X, Y):\n \"\"\"\n :param X: [N, U]\n :param Y: [N, V]\n :return [N, U * V]\n \"\"\"\n U = X.get_shape()[1].value\n V = Y.get_shape()[1].value\n assert(U is not None)\n assert(V is not None)\n\n X_tile = tf.tile(X, (1, V))\n Y_repeat = repeat_2d(Y, U, 1)\n return tf.multiply(X_tile, Y_repeat)\n\ndef gather_2d(x, idxs):\n \"\"\"\n :param x: 2d tensor\n :param idxs: 1d tensor indexing the columns of x to gather\n :return: 1d tensor\n \"\"\"\n assert(len(x.get_shape()) == 2)\n tf.assert_equal(tf.shape(x)[0], tf.shape(idxs)[0])\n\n idxs = tf.transpose(tf.pack([tf.range(tf.shape(idxs)[0]), idxs]))\n x_gather = tf.gather_nd(x, idxs)\n\n return x_gather\n\ndef block_diagonal(matrices, dtype=tf.float32):\n \"\"\"Constructs block-diagonal matrices from a list of batched 2D tensors.\n\n Args:\n matrices: A list of Tensors with shape [..., N_i, M_i] (i.e. a list of\n matrices with the same batch dimension).\n dtype: Data type to use. The Tensors in `matrices` must match this dtype.\n Returns:\n A matrix with the input matrices stacked along its main diagonal, having\n shape [..., \\sum_i N_i, \\sum_i M_i].\n \"\"\"\n matrices = [tf.convert_to_tensor(matrix, dtype=dtype) for matrix in matrices]\n blocked_rows = tf.Dimension(0)\n blocked_cols = tf.Dimension(0)\n batch_shape = tf.TensorShape(None)\n for matrix in matrices:\n full_matrix_shape = matrix.get_shape().with_rank_at_least(2)\n batch_shape = batch_shape.merge_with(full_matrix_shape[:-2])\n blocked_rows += full_matrix_shape[-2]\n blocked_cols += full_matrix_shape[-1]\n ret_columns_list = []\n for matrix in matrices:\n matrix_shape = tf.shape(matrix)\n ret_columns_list.append(matrix_shape[-1])\n ret_columns = tf.add_n(ret_columns_list)\n row_blocks = []\n current_column = 0\n for matrix in matrices:\n matrix_shape = tf.shape(matrix)\n row_before_length = current_column\n current_column += matrix_shape[-1]\n row_after_length = ret_columns - current_column\n row_blocks.append(tf.pad(\n tensor=matrix,\n paddings=tf.concat(0,\n [tf.zeros([tf.rank(matrix) - 1, 2], dtype=tf.int32),\n [(row_before_length, row_after_length)]])))\n blocked = tf.concat(-2, row_blocks)\n blocked.set_shape(batch_shape.concatenate((blocked_rows, blocked_cols)))\n return blocked\n\ndef sample_categorical(p):\n # TODO change to tf.distributions once update tf version\n dist = tf.contrib.distributions.Categorical(probs=p)\n sample = dist.sample()\n return sample\n\n###############\n### Asserts ###\n###############\n\ndef assert_shape(tensor, shape):\n assert(len(tensor.get_shape()) == len(shape))\n tensor_shape = tf.shape(tensor)\n for i, s_i in enumerate(shape):\n tf.assert_equal(tensor_shape[i], tf.cast(s_i, tf.int32))\n\ndef assert_equal_approx(tensor, value, eps=1e-5, name=None):\n return tf.assert_equal(tf.cast(tf.abs(tensor - value) < 1e-5, tf.int32), 1, name=name)\n\nif __name__ == '__main__':\n import numpy as np\n np.random.seed(0)\n tf.set_random_seed(0)\n\n ### repeat_2d test\n a = tf.constant(np.random.random((2, 4)))\n a0 = repeat_2d(a, 2, 0)\n a1 = repeat_2d(a, 2, 1)\n\n sess = tf.Session()\n a_eval, a0_eval, a1_eval = sess.run([a, a0, a1])\n print('\\nrepeat 2d test')\n print('a:\\n{0}'.format(a_eval))\n print('a0\\n{0}'.format(a0_eval))\n print('a1\\n{0}'.format(a1_eval))\n\n ### test batch outer\n a = tf.constant(np.random.random((3, 2)))\n b = tf.constant(np.random.randint(0, 2, (3, 2)).astype(np.float64))\n ab_outer = tf.reshape(batch_outer_product(b, a), (a.get_shape()[0].value, -1))\n ab_outer_2d = batch_outer_product_2d(a, b)\n\n a_eval, b_eval, ab_outer_eval, ab_outer_2d_eval = sess.run([a, b, ab_outer, ab_outer_2d])\n print('\\nbatch outer test')\n print('a:\\n{0}'.format(a_eval))\n print('b:\\n{0}'.format(b_eval))\n print('ab_outer:\\n{0}'.format(ab_outer_eval))\n print('ab_outer_2d:\\n{0}'.format(ab_outer_2d_eval))\n" ]
[ [ "tensorflow.convert_to_tensor", "tensorflow.concat", "tensorflow.reduce_sum", "tensorflow.stack", "tensorflow.cast", "tensorflow.GPUOptions", "tensorflow.rank", "tensorflow.add_n", "numpy.random.randint", "tensorflow.Graph", "tensorflow.batch_matmul", "tensorflow.Dimension", "tensorflow.ConfigProto", "tensorflow.clip_by_norm", "tensorflow.Session", "tensorflow.tile", "tensorflow.TensorShape", "tensorflow.gather_nd", "tensorflow.shape", "tensorflow.contrib.distributions.Categorical", "tensorflow.set_random_seed", "tensorflow.nn.softmax", "tensorflow.multiply", "tensorflow.transpose", "numpy.random.seed", "numpy.random.random", "tensorflow.range", "tensorflow.reshape", "tensorflow.expand_dims", "tensorflow.abs" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
broadinstitute/segmentation_experiments
[ "396d5659c7c6cda9dc3d3caf3350710ff6210e2c" ]
[ "functions/colors.py" ]
[ "import numpy as np\nimport skimage\nfrom skimage import io, transform, exposure, data, color\nfrom skimage.color import *\n\nimport matplotlib.pyplot as plt\nfrom matplotlib.pyplot import imshow\n\ndef unmix_purple_img(purp_img, loud=False):\n \"\"\"\n Accepts a purple image object as a parameter \n and returns the image with the colors unmixed for\n easier segmentation\n \"\"\"\n \n hematoxylin_matrix = np.ones((3,3)) * (0.644, 0.717, 0.267) # cell profiler matrix for purple images\n stain_img = purp_img[:, :, [0, 1, 2]] # need only first 3 channels to separate stains\n separated_img = separate_stains(stain_img, hematoxylin_matrix) # apply stain matrix to image\n \n if loud:\n fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(8,8))\n \n ax[0].set_title(\"Original\")\n ax[0].imshow(purp_img)\n \n ax[1].set_title(\"Hematoxylin\")\n ax[1].imshow(separated_img[:, :, 0])\n \n return separated_img[:, :, 0]\n\ndef unmix_pink_imgs(pink_img, loud=False):\n \"\"\"\n Same as unmix_purple_img but takes a pink image\n as a parameter\n \"\"\"\n stain_img = pink_img[:, :, [0, 1, 2]]\n separated_img = separate_stains(stain_img, rbd_from_rgb)\n \n if loud:\n fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(8,8))\n \n ax[0].set_title(\"Original\")\n ax[0].imshow(pink_img)\n \n ax[1].set_title(\"RBD\")\n ax[1].imshow(separated_img[:, :, 1])\n \n return separated_img[:, :, 1]\n" ]
[ [ "matplotlib.pyplot.subplots", "numpy.ones" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
omarocegueda/dipy
[ "520b724041116a958892bee0068b057314a21cb2", "520b724041116a958892bee0068b057314a21cb2", "9d20c911b4afe83e52ded698eff9ba0f0fafeca8", "520b724041116a958892bee0068b057314a21cb2", "520b724041116a958892bee0068b057314a21cb2" ]
[ "doc/examples/reconst_shore_metrics.py", "dipy/viz/ui.py", "dipy/io/dpy.py", "doc/examples/reconst_dki.py", "dipy/workflows/reconst.py" ]
[ "\"\"\"\n===========================\nCalculate SHORE scalar maps\n===========================\n\nWe show how to calculate two SHORE-based scalar maps: return to origin\nprobability (rtop) [Descoteaux2011]_ and mean square displacement (msd)\n[Wu2007]_, [Wu2008]_ on your data. SHORE can be used with any multiple b-value\ndataset like multi-shell or DSI.\n\nFirst import the necessary modules:\n\"\"\"\n\nimport nibabel as nib\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom dipy.data import fetch_taiwan_ntu_dsi, read_taiwan_ntu_dsi, get_sphere\nfrom dipy.data import get_data, dsi_voxels\nfrom dipy.reconst.shore import ShoreModel\n\n\"\"\"\nDownload and read the data for this tutorial.\n\"\"\"\n\nfetch_taiwan_ntu_dsi()\nimg, gtab = read_taiwan_ntu_dsi()\n\n\"\"\"\nimg contains a nibabel Nifti1Image object (data) and gtab contains a GradientTable\nobject (gradient information e.g. b-values). For example, to read the b-values\nit is possible to write print(gtab.bvals).\n\nLoad the raw diffusion data and the affine.\n\"\"\"\n\ndata = img.get_data()\naffine = img.affine\nprint('data.shape (%d, %d, %d, %d)' % data.shape)\n\n\"\"\"\nInstantiate the Model.\n\"\"\"\n\nasm = ShoreModel(gtab)\n\n\"\"\"\nLets just use only one slice only from the data.\n\"\"\"\n\ndataslice = data[30:70, 20:80, data.shape[2] / 2]\n\n\"\"\"\nFit the signal with the model and calculate the SHORE coefficients.\n\"\"\"\n\nasmfit = asm.fit(dataslice)\n\n\"\"\"\nCalculate the analytical rtop on the signal\nthat corresponds to the integral of the signal.\n\"\"\"\n\nprint('Calculating... rtop_signal')\nrtop_signal = asmfit.rtop_signal()\n\n\"\"\"\nNow we calculate the analytical rtop on the propagator,\nthat corresponds to its central value.\n\"\"\"\n\nprint('Calculating... rtop_pdf')\nrtop_pdf = asmfit.rtop_pdf()\n\"\"\"\nIn theory, these two measures must be equal,\nto show that we calculate the mean square error on this two measures.\n\"\"\"\n\nmse = np.sum((rtop_signal - rtop_pdf) ** 2) / rtop_signal.size\nprint(\"mse = %f\" % mse)\n\n\"\"\"\nmse = 0.000000\n\nLet's calculate the analytical mean square displacement on the propagator.\n\"\"\"\n\nprint('Calculating... msd')\nmsd = asmfit.msd()\n\n\"\"\"\nShow the maps and save them in SHORE_maps.png.\n\"\"\"\n\nfig = plt.figure(figsize=(6, 6))\nax1 = fig.add_subplot(2, 2, 1, title='rtop_signal')\nax1.set_axis_off()\nind = ax1.imshow(rtop_signal.T, interpolation='nearest', origin='lower')\nplt.colorbar(ind)\nax2 = fig.add_subplot(2, 2, 2, title='rtop_pdf')\nax2.set_axis_off()\nind = ax2.imshow(rtop_pdf.T, interpolation='nearest', origin='lower')\nplt.colorbar(ind)\nax3 = fig.add_subplot(2, 2, 3, title='msd')\nax3.set_axis_off()\nind = ax3.imshow(msd.T, interpolation='nearest', origin='lower', vmin=0)\nplt.colorbar(ind)\nplt.savefig('SHORE_maps.png')\n\n\"\"\"\n.. figure:: SHORE_maps.png\n :align: center\n\n **rtop and msd calculated using the SHORE model**.\n\n\n.. [Descoteaux2011] Descoteaux M. et. al , \"Multiple q-shell diffusion\n\t\t\t\t\tpropagator imaging\", Medical Image Analysis, vol 15,\n\t\t\t\t\tNo. 4, p. 603-621, 2011.\n\n.. [Wu2007] Wu Y. et. al, \"Hybrid diffusion imaging\", NeuroImage, vol 36,\n \tp. 617-629, 2007.\n\n.. [Wu2008] Wu Y. et. al, \"Computation of Diffusion Function Measures\n\t\t\tin q -Space Using Magnetic Resonance Hybrid Diffusion Imaging\",\n\t\t\tIEEE TRANSACTIONS ON MEDICAL IMAGING, vol. 27, No. 6, p. 858-865,\n\t\t\t2008.\n\n.. include:: ../links_names.inc\n\n\"\"\"\n", "from _warnings import warn\n\nimport numpy as np\n\nfrom dipy.viz.interactor import CustomInteractorStyle\n\nfrom dipy.utils.optpkg import optional_package\n\n# Allow import, but disable doctests if we don't have vtk.\nvtk, have_vtk, setup_module = optional_package('vtk')\n\nif have_vtk:\n version = vtk.vtkVersion.GetVTKSourceVersion().split(' ')[-1]\n major_version = vtk.vtkVersion.GetVTKMajorVersion()\n vtkTextActor = vtk.vtkTextActor\nelse:\n vtkTextActor = object\n\n\nclass UI(object):\n \"\"\" An umbrella class for all UI elements.\n\n While adding UI elements to the renderer, we go over all the sub-elements\n that come with it and add those to the renderer automatically.\n\n Attributes\n ----------\n ui_param : object\n This is an attribute that can be passed to the UI object by the interactor.\n ui_list : list of :class:`UI`\n This is used when there are more than one UI elements inside\n a UI element. They're all automatically added to the renderer at the same time\n as this one.\n parent_ui: UI\n Reference to the parent UI element. This is useful of there is a parent\n UI element and its reference needs to be passed down to the child.\n on_left_mouse_button_pressed: function\n Callback function for when the left mouse button is pressed.\n on_left_mouse_button_drag: function\n Callback function for when the left mouse button is dragged.\n on_right_mouse_button_pressed: function\n Callback function for when the right mouse button is pressed.\n on_right_mouse_button_drag: function\n Callback function for when the right mouse button is dragged.\n\n \"\"\"\n\n def __init__(self):\n self.ui_param = None\n self.ui_list = list()\n\n self.parent_ui = None\n self._callbacks = []\n\n self.left_button_state = \"released\"\n self.right_button_state = \"released\"\n\n self.on_left_mouse_button_pressed = lambda i_ren, obj, element: None\n self.on_left_mouse_button_drag = lambda i_ren, obj, element: None\n self.on_right_mouse_button_pressed = lambda i_ren, obj, element: None\n self.on_right_mouse_button_drag = lambda i_ren, obj, element: None\n self.on_key_press = lambda i_ren, obj, element: None\n\n def get_actors(self):\n \"\"\" Returns the actors that compose this UI component.\n\n \"\"\"\n msg = \"Subclasses of UI must implement `get_actors(self)`.\"\n raise NotImplementedError(msg)\n\n def add_to_renderer(self, ren):\n \"\"\" Allows UI objects to add their own props to the renderer.\n\n Parameters\n ----------\n ren : renderer\n\n \"\"\"\n ren.add(*self.get_actors())\n\n # Get a hold on the current interactor style.\n iren = ren.GetRenderWindow().GetInteractor().GetInteractorStyle()\n\n for callback in self._callbacks:\n if not isinstance(iren, CustomInteractorStyle):\n msg = (\"The ShowManager requires `CustomInteractorStyle` in\"\n \" order to use callbacks.\")\n raise TypeError(msg)\n\n iren.add_callback(*callback, args=[self])\n\n def add_callback(self, prop, event_type, callback, priority=0):\n \"\"\" Adds a callback to a specific event for this UI component.\n\n Parameters\n ----------\n prop : vtkProp\n The prop on which is callback is to be added.\n event_type : string\n The event code.\n callback : function\n The callback function.\n priority : int\n Higher number is higher priority.\n\n \"\"\"\n # Actually since we need an interactor style we will add the callback\n # only when this UI component is added to the renderer.\n self._callbacks.append((prop, event_type, callback, priority))\n\n def set_center(self, position):\n \"\"\" Sets the center of the UI component\n\n Parameters\n ----------\n position : (float, float)\n These are the x and y coordinates respectively, with the\n origin at the bottom left.\n\n \"\"\"\n msg = \"Subclasses of UI must implement `set_center(self, position)`.\"\n raise NotImplementedError(msg)\n\n def set_visibility(self, visibility):\n \"\"\" Sets visibility of this UI component and all its sub-components.\n\n \"\"\"\n for actor in self.get_actors():\n actor.SetVisibility(visibility)\n\n def handle_events(self, actor):\n self.add_callback(actor, \"LeftButtonPressEvent\", self.left_button_click_callback)\n self.add_callback(actor, \"LeftButtonReleaseEvent\", self.left_button_release_callback)\n self.add_callback(actor, \"RightButtonPressEvent\", self.right_button_click_callback)\n self.add_callback(actor, \"RightButtonReleaseEvent\", self.right_button_release_callback)\n self.add_callback(actor, \"MouseMoveEvent\", self.mouse_move_callback)\n self.add_callback(actor, \"KeyPressEvent\", self.key_press_callback)\n\n @staticmethod\n def left_button_click_callback(i_ren, obj, self):\n self.left_button_state = \"clicked\"\n i_ren.event.abort()\n\n @staticmethod\n def left_button_release_callback(i_ren, obj, self):\n if self.left_button_state == \"clicked\":\n self.on_left_mouse_button_pressed(i_ren, obj, self)\n self.left_button_state = \"released\"\n\n @staticmethod\n def right_button_click_callback(i_ren, obj, self):\n self.right_button_state = \"clicked\"\n i_ren.event.abort()\n\n @staticmethod\n def right_button_release_callback(i_ren, obj, self):\n if self.right_button_state == \"clicked\":\n self.on_right_mouse_button_pressed(i_ren, obj, self)\n self.right_button_state = \"released\"\n\n @staticmethod\n def mouse_move_callback(i_ren, obj, self):\n if self.left_button_state == \"clicked\" or self.left_button_state == \"dragging\":\n self.left_button_state = \"dragging\"\n self.on_left_mouse_button_drag(i_ren, obj, self)\n elif self.right_button_state == \"clicked\" or self.right_button_state == \"dragging\":\n self.right_button_state = \"dragging\"\n self.on_right_mouse_button_drag(i_ren, obj, self)\n else:\n pass\n\n @staticmethod\n def key_press_callback(i_ren, obj, self):\n self.on_key_press(i_ren, obj, self)\n\n\nclass Button2D(UI):\n \"\"\" A 2D overlay button and is of type vtkTexturedActor2D.\n Currently supports:\n - Multiple icons.\n - Switching between icons.\n\n Attributes\n ----------\n size: (float, float)\n Button size (width, height) in pixels.\n\n \"\"\"\n\n def __init__(self, icon_fnames, size=(30, 30)):\n \"\"\"\n Parameters\n ----------\n size : 2-tuple of int, optional\n Button size.\n icon_fnames : dict\n {iconname : filename, iconname : filename, ...}\n\n \"\"\"\n super(Button2D, self).__init__()\n self.icon_extents = dict()\n self.icons = self.__build_icons(icon_fnames)\n self.icon_names = list(self.icons.keys())\n self.current_icon_id = 0\n self.current_icon_name = self.icon_names[self.current_icon_id]\n self.actor = self.build_actor(self.icons[self.current_icon_name])\n self.size = size\n self.handle_events(self.actor)\n\n def __build_icons(self, icon_fnames):\n \"\"\" Converts file names to vtkImageDataGeometryFilters.\n\n A pre-processing step to prevent re-read of file names during every state change.\n\n Parameters\n ----------\n icon_fnames : dict\n {iconname: filename, iconname: filename, ...}\n\n Returns\n -------\n icons : dict\n A dictionary of corresponding vtkImageDataGeometryFilters.\n\n \"\"\"\n icons = {}\n for icon_name, icon_fname in icon_fnames.items():\n if icon_fname.split(\".\")[-1] not in [\"png\", \"PNG\"]:\n error_msg = \"A specified icon file is not in the PNG format. SKIPPING.\"\n warn(Warning(error_msg))\n else:\n png = vtk.vtkPNGReader()\n png.SetFileName(icon_fname)\n png.Update()\n icons[icon_name] = png.GetOutput()\n\n return icons\n\n @property\n def size(self):\n \"\"\" Gets the button size.\n\n \"\"\"\n return self._size\n\n @size.setter\n def size(self, size):\n \"\"\" Sets the button size.\n\n Parameters\n ----------\n size : (float, float)\n Button size (width, height) in pixels.\n\n \"\"\"\n self._size = np.asarray(size)\n\n # Update actor.\n self.texture_points.SetPoint(0, 0, 0, 0.0)\n self.texture_points.SetPoint(1, size[0], 0, 0.0)\n self.texture_points.SetPoint(2, size[0], size[1], 0.0)\n self.texture_points.SetPoint(3, 0, size[1], 0.0)\n self.texture_polydata.SetPoints(self.texture_points)\n\n @property\n def color(self):\n \"\"\" Gets the button's color.\n\n \"\"\"\n color = self.actor.GetProperty().GetColor()\n return np.asarray(color)\n\n @color.setter\n def color(self, color):\n \"\"\" Sets the button's color.\n\n Parameters\n ----------\n color : (float, float, float)\n RGB. Must take values in [0, 1].\n\n \"\"\"\n self.actor.GetProperty().SetColor(*color)\n\n def scale(self, size):\n \"\"\" Scales the button.\n\n Parameters\n ----------\n size : (float, float)\n Scaling factor (width, height) in pixels.\n\n \"\"\"\n self.size *= size\n\n def build_actor(self, icon):\n \"\"\" Return an image as a 2D actor with a specific position.\n\n Parameters\n ----------\n icon : :class:`vtkImageData`\n\n Returns\n -------\n :class:`vtkTexturedActor2D`\n\n \"\"\"\n # This is highly inspired by\n # https://github.com/Kitware/VTK/blob/c3ec2495b183e3327820e927af7f8f90d34c3474\\\n # /Interaction/Widgets/vtkBalloonRepresentation.cxx#L47\n\n self.texture_polydata = vtk.vtkPolyData()\n self.texture_points = vtk.vtkPoints()\n self.texture_points.SetNumberOfPoints(4)\n self.size = icon.GetExtent()\n\n polys = vtk.vtkCellArray()\n polys.InsertNextCell(4)\n polys.InsertCellPoint(0)\n polys.InsertCellPoint(1)\n polys.InsertCellPoint(2)\n polys.InsertCellPoint(3)\n self.texture_polydata.SetPolys(polys)\n\n tc = vtk.vtkFloatArray()\n tc.SetNumberOfComponents(2)\n tc.SetNumberOfTuples(4)\n tc.InsertComponent(0, 0, 0.0)\n tc.InsertComponent(0, 1, 0.0)\n tc.InsertComponent(1, 0, 1.0)\n tc.InsertComponent(1, 1, 0.0)\n tc.InsertComponent(2, 0, 1.0)\n tc.InsertComponent(2, 1, 1.0)\n tc.InsertComponent(3, 0, 0.0)\n tc.InsertComponent(3, 1, 1.0)\n self.texture_polydata.GetPointData().SetTCoords(tc)\n\n texture_mapper = vtk.vtkPolyDataMapper2D()\n if major_version <= 5:\n texture_mapper.SetInput(self.texture_polydata)\n else:\n texture_mapper.SetInputData(self.texture_polydata)\n\n button = vtk.vtkTexturedActor2D()\n button.SetMapper(texture_mapper)\n\n self.texture = vtk.vtkTexture()\n button.SetTexture(self.texture)\n\n button_property = vtk.vtkProperty2D()\n button_property.SetOpacity(1.0)\n button.SetProperty(button_property)\n\n self.set_icon(icon)\n return button\n\n def get_actors(self):\n \"\"\" Returns the actors that compose this UI component.\n\n \"\"\"\n return [self.actor]\n\n def set_icon(self, icon):\n \"\"\" Modifies the icon used by the vtkTexturedActor2D.\n\n Parameters\n ----------\n icon : imageDataGeometryFilter\n\n \"\"\"\n if major_version <= 5:\n self.texture.SetInput(icon)\n else:\n self.texture.SetInputData(icon)\n\n def next_icon_name(self):\n \"\"\" Returns the next icon name while cycling through icons.\n\n \"\"\"\n self.current_icon_id += 1\n if self.current_icon_id == len(self.icons):\n self.current_icon_id = 0\n self.current_icon_name = self.icon_names[self.current_icon_id]\n\n def next_icon(self):\n \"\"\" Increments the state of the Button.\n\n Also changes the icon.\n\n \"\"\"\n self.next_icon_name()\n self.set_icon(self.icons[self.current_icon_name])\n\n def set_center(self, position):\n \"\"\" Sets the icon center to position.\n\n Parameters\n ----------\n position : (float, float)\n The new center of the button (x, y).\n\n \"\"\"\n new_position = np.asarray(position) - self.size / 2.\n self.actor.SetPosition(*new_position)\n\n\nclass Rectangle2D(UI):\n \"\"\" A 2D rectangle sub-classed from UI.\n Uses vtkPolygon.\n\n Attributes\n ----------\n size : (float, float)\n The size of the rectangle (height, width) in pixels.\n\n \"\"\"\n\n def __init__(self, size, center=(0, 0), color=(1, 1, 1), opacity=1.0):\n \"\"\" Initializes a rectangle.\n\n Parameters\n ----------\n size : (float, float)\n The size of the rectangle (height, width) in pixels.\n center : (float, float)\n The center of the rectangle (x, y).\n color : (float, float, float)\n Must take values in [0, 1].\n opacity : float\n Must take values in [0, 1].\n\n \"\"\"\n super(Rectangle2D, self).__init__()\n self.size = size\n self.actor = self.build_actor(size=size, center=center,\n color=color, opacity=opacity)\n self.handle_events(self.actor)\n\n def get_actors(self):\n \"\"\" Returns the actors that compose this UI component.\n\n \"\"\"\n return [self.actor]\n\n def build_actor(self, size, center, color, opacity):\n \"\"\" Builds the text actor.\n\n Parameters\n ----------\n size : (float, float)\n The size of the rectangle (height, width) in pixels.\n center : (float, float)\n The center of the rectangle (x, y).\n color : (float, float, float)\n Must take values in [0, 1].\n opacity : float\n Must take values in [0, 1].\n\n Returns\n -------\n :class:`vtkActor2D`\n\n \"\"\"\n # Setup four points\n points = vtk.vtkPoints()\n points.InsertNextPoint(0, 0, 0)\n points.InsertNextPoint(size[0], 0, 0)\n points.InsertNextPoint(size[0], size[1], 0)\n points.InsertNextPoint(0, size[1], 0)\n\n # Create the polygon\n polygon = vtk.vtkPolygon()\n polygon.GetPointIds().SetNumberOfIds(4) # make a quad\n polygon.GetPointIds().SetId(0, 0)\n polygon.GetPointIds().SetId(1, 1)\n polygon.GetPointIds().SetId(2, 2)\n polygon.GetPointIds().SetId(3, 3)\n\n # Add the polygon to a list of polygons\n polygons = vtk.vtkCellArray()\n polygons.InsertNextCell(polygon)\n\n # Create a PolyData\n polygonPolyData = vtk.vtkPolyData()\n polygonPolyData.SetPoints(points)\n polygonPolyData.SetPolys(polygons)\n\n # Create a mapper and actor\n mapper = vtk.vtkPolyDataMapper2D()\n if vtk.VTK_MAJOR_VERSION <= 5:\n mapper.SetInput(polygonPolyData)\n else:\n mapper.SetInputData(polygonPolyData)\n\n actor = vtk.vtkActor2D()\n actor.SetMapper(mapper)\n actor.GetProperty().SetColor(color)\n actor.GetProperty().SetOpacity(opacity)\n actor.SetPosition(center[0] - self.size[0] / 2, center[1] - self.size[1] / 2)\n\n return actor\n\n def set_center(self, position):\n \"\"\" Sets the center to position.\n\n Parameters\n ----------\n position : (float, float)\n The new center of the rectangle (x, y).\n\n \"\"\"\n self.actor.SetPosition(position[0] - self.size[0] / 2, position[1] - self.size[1] / 2)\n\n\nclass Panel2D(UI):\n \"\"\" A 2D UI Panel.\n\n Can contain one or more UI elements.\n\n Attributes\n ----------\n center : (float, float)\n The center of the panel (x, y).\n size : (float, float)\n The size of the panel (width, height) in pixels.\n alignment : [left, right]\n Alignment of the panel with respect to the overall screen.\n\n \"\"\"\n\n def __init__(self, center, size, color=(0.1, 0.1, 0.1), opacity=0.7, align=\"left\"):\n \"\"\"\n Parameters\n ----------\n center : (float, float)\n The center of the panel (x, y).\n size : (float, float)\n The size of the panel (width, height) in pixels.\n color : (float, float, float)\n Must take values in [0, 1].\n opacity : float\n Must take values in [0, 1].\n align : [left, right]\n Alignment of the panel with respect to the overall screen.\n\n \"\"\"\n super(Panel2D, self).__init__()\n self.center = center\n self.size = size\n self.lower_limits = (self.center[0] - self.size[0] / 2,\n self.center[1] - self.size[1] / 2)\n\n self.panel = Rectangle2D(size=size, center=center, color=color,\n opacity=opacity)\n\n self.element_positions = []\n self.element_positions.append([self.panel, 'relative', 0.5, 0.5])\n self.alignment = align\n\n self.handle_events(self.panel.actor)\n\n self.on_left_mouse_button_pressed = self.left_button_press\n self.on_left_mouse_button_drag = self.left_button_drag\n\n def add_to_renderer(self, ren):\n \"\"\" Allows UI objects to add their own props to the renderer.\n\n Here, we add only call add_to_renderer for the additional components.\n\n Parameters\n ----------\n ren : renderer\n\n \"\"\"\n super(Panel2D, self).add_to_renderer(ren)\n for ui_item in self.ui_list:\n ui_item.add_to_renderer(ren)\n\n def get_actors(self):\n \"\"\" Returns the panel actor.\n\n \"\"\"\n return [self.panel.actor]\n\n def add_element(self, element, position_type, position):\n \"\"\" Adds an element to the panel.\n\n The center of the rectangular panel is its bottom lower position.\n\n Parameters\n ----------\n element : UI\n The UI item to be added.\n position_type: string\n 'absolute' or 'relative'\n position : (float, float)\n Absolute for absolute and relative for relative\n\n \"\"\"\n self.ui_list.append(element)\n if position_type == 'relative':\n self.element_positions.append([element, position_type, position[0], position[1]])\n element.set_center((self.lower_limits[0] + position[0] * self.size[0],\n self.lower_limits[1] + position[1] * self.size[1]))\n elif position_type == 'absolute':\n self.element_positions.append([element, position_type, position[0], position[1]])\n element.set_center((position[0], position[1]))\n else:\n raise ValueError(\"Position can only be absolute or relative\")\n\n def set_center(self, position):\n \"\"\" Sets the panel center to position.\n\n The center of the rectangular panel is its bottom lower position.\n\n Parameters\n ----------\n position : (float, float)\n The new center of the panel (x, y).\n\n \"\"\"\n shift = [position[0] - self.center[0], position[1] - self.center[1]]\n self.center = position\n self.lower_limits = (position[0] - self.size[0] / 2, position[1] - self.size[1] / 2)\n for ui_element in self.element_positions:\n if ui_element[1] == 'relative':\n ui_element[0].set_center((self.lower_limits[0] + ui_element[2] * self.size[0],\n self.lower_limits[1] + ui_element[3] * self.size[1]))\n elif ui_element[1] == 'absolute':\n ui_element[2] += shift[0]\n ui_element[3] += shift[1]\n ui_element[0].set_center((ui_element[2], ui_element[3]))\n\n @staticmethod\n def left_button_press(i_ren, obj, panel2d_object):\n click_position = i_ren.event.position\n panel2d_object.ui_param = (click_position[0] - panel2d_object.panel.actor.GetPosition()[0]\n - panel2d_object.panel.size[0] / 2,\n click_position[1] - panel2d_object.panel.actor.GetPosition()[1]\n - panel2d_object.panel.size[1] / 2)\n i_ren.event.abort() # Stop propagating the event.\n\n @staticmethod\n def left_button_drag(i_ren, obj, panel2d_object):\n click_position = i_ren.event.position\n if panel2d_object.ui_param is not None:\n panel2d_object.set_center((click_position[0] - panel2d_object.ui_param[0],\n click_position[1] - panel2d_object.ui_param[1]))\n i_ren.force_render()\n\n def re_align(self, window_size_change):\n \"\"\" Re-organises the elements in case the window size is changed.\n\n Parameters\n ----------\n window_size_change : (int, int)\n New window size (width, height) in pixels.\n\n \"\"\"\n if self.alignment == \"left\":\n pass\n elif self.alignment == \"right\":\n self.set_center((self.center[0] + window_size_change[0],\n self.center[1] + window_size_change[1]))\n else:\n raise ValueError(\"You can only left-align or right-align objects in a panel.\")\n\n\nclass TextActor2D(object):\n \"\"\" Wraps over the default vtkTextActor and helps setting the text.\n\n Contains member functions for text formatting.\n\n Attributes\n ----------\n actor : :class:`vtkTextActor`\n\n \"\"\"\n\n def __init__(self):\n self.actor = vtkTextActor()\n\n def get_actor(self):\n \"\"\" Returns the actor composing this element.\n\n Returns\n -------\n :class:`vtkTextActor`\n The actor composing this class.\n \"\"\"\n return self.actor\n\n @property\n def message(self):\n \"\"\" Gets message from the text.\n\n Returns\n -------\n str\n The current text message.\n\n \"\"\"\n return self.actor.GetInput()\n\n @message.setter\n def message(self, text):\n \"\"\" Sets the text message.\n\n Parameters\n ----------\n text : str\n The message to be set.\n\n \"\"\"\n self.actor.SetInput(text)\n\n @property\n def font_size(self):\n \"\"\" Gets text font size.\n\n Returns\n ----------\n int\n Text font size.\n\n \"\"\"\n return self.actor.GetTextProperty().GetFontSize()\n\n @font_size.setter\n def font_size(self, size):\n \"\"\" Sets font size.\n\n Parameters\n ----------\n size : int\n Text font size.\n\n \"\"\"\n self.actor.GetTextProperty().SetFontSize(size)\n\n @property\n def font_family(self):\n \"\"\" Gets font family.\n\n Returns\n ----------\n str\n Text font family.\n\n \"\"\"\n return self.actor.GetTextProperty().GetFontFamilyAsString()\n\n @font_family.setter\n def font_family(self, family='Arial'):\n \"\"\" Sets font family.\n\n Currently defaults to Arial.\n # ToDo: Add other font families.\n\n Parameters\n ----------\n family : str\n The font family.\n\n \"\"\"\n if family == 'Arial':\n self.actor.GetTextProperty().SetFontFamilyToArial()\n else:\n raise ValueError(\"Font not supported yet: {}.\".format(family))\n\n @property\n def justification(self):\n \"\"\" Gets text justification.\n\n Returns\n -------\n str\n Text justification.\n\n \"\"\"\n return self.actor.GetTextProperty().GetJustificationAsString()\n\n @justification.setter\n def justification(self, justification):\n \"\"\" Justifies text.\n\n Parameters\n ----------\n justification : str\n Possible values are left, right, center.\n\n \"\"\"\n text_property = self.actor.GetTextProperty()\n if justification == 'left':\n text_property.SetJustificationToLeft()\n elif justification == 'center':\n text_property.SetJustificationToCentered()\n elif justification == 'right':\n text_property.SetJustificationToRight()\n else:\n raise ValueError(\"Text can only be justified left, right and center.\")\n\n @property\n def bold(self):\n \"\"\" Returns whether the text is bold.\n\n Returns\n -------\n bool\n Text is bold if True.\n\n \"\"\"\n return self.actor.GetTextProperty().GetBold()\n\n @bold.setter\n def bold(self, flag):\n \"\"\" Bolds/un-bolds text.\n\n Parameters\n ----------\n flag : bool\n Sets text bold if True.\n\n \"\"\"\n self.actor.GetTextProperty().SetBold(flag)\n\n @property\n def italic(self):\n \"\"\" Returns whether the text is italicised.\n\n Returns\n -------\n bool\n Text is italicised if True.\n\n \"\"\"\n return self.actor.GetTextProperty().GetItalic()\n\n @italic.setter\n def italic(self, flag):\n \"\"\" Italicises/un-italicises text.\n\n Parameters\n ----------\n flag : bool\n Italicises text if True.\n\n \"\"\"\n self.actor.GetTextProperty().SetItalic(flag)\n\n @property\n def shadow(self):\n \"\"\" Returns whether the text has shadow.\n\n Returns\n -------\n bool\n Text is shadowed if True.\n\n \"\"\"\n return self.actor.GetTextProperty().GetShadow()\n\n @shadow.setter\n def shadow(self, flag):\n \"\"\" Adds/removes text shadow.\n\n Parameters\n ----------\n flag : bool\n Shadows text if True.\n\n \"\"\"\n self.actor.GetTextProperty().SetShadow(flag)\n\n @property\n def color(self):\n \"\"\" Gets text color.\n\n Returns\n -------\n (float, float, float)\n Returns text color in RGB.\n\n \"\"\"\n return self.actor.GetTextProperty().GetColor()\n\n @color.setter\n def color(self, color=(1, 0, 0)):\n \"\"\" Set text color.\n\n Parameters\n ----------\n color : (float, float, float)\n RGB: Values must be between 0-1.\n\n \"\"\"\n self.actor.GetTextProperty().SetColor(*color)\n\n @property\n def position(self):\n \"\"\" Gets text actor position.\n\n Returns\n -------\n (float, float)\n The current actor position. (x, y) in pixels.\n\n \"\"\"\n return self.actor.GetPosition()\n\n @position.setter\n def position(self, position):\n \"\"\" Set text actor position.\n\n Parameters\n ----------\n position : (float, float)\n The new position. (x, y) in pixels.\n\n \"\"\"\n self.actor.SetDisplayPosition(*position)\n\n\nclass TextBox2D(UI):\n \"\"\" An editable 2D text box that behaves as a UI component.\n\n Currently supports:\n - Basic text editing.\n - Cursor movements.\n - Single and multi-line text boxes.\n - Pre text formatting (text needs to be formatted beforehand).\n\n Attributes\n ----------\n text : str\n The current text state.\n actor : :class:`vtkActor2d`\n The text actor.\n width : int\n The number of characters in a single line of text.\n height : int\n The number of lines in the textbox.\n window_left : int\n Left limit of visible text in the textbox.\n window_right : int\n Right limit of visible text in the textbox.\n caret_pos : int\n Position of the caret in the text.\n init : bool\n Flag which says whether the textbox has just been initialized.\n\n \"\"\"\n def __init__(self, width, height, text=\"Enter Text\", position=(100, 10),\n color=(0, 0, 0), font_size=18, font_family='Arial',\n justification='left', bold=False,\n italic=False, shadow=False):\n \"\"\"\n Parameters\n ----------\n width : int\n The number of characters in a single line of text.\n height : int\n The number of lines in the textbox.\n text : str\n The initial text while building the actor.\n position : (float, float)\n (x, y) in pixels.\n color : (float, float, float)\n RGB: Values must be between 0-1.\n font_size : int\n Size of the text font.\n font_family : str\n Currently only supports Arial.\n justification : str\n left, right or center.\n bold : bool\n Makes text bold.\n italic : bool\n Makes text italicised.\n shadow : bool\n Adds text shadow.\n\n \"\"\"\n super(TextBox2D, self).__init__()\n self.text = text\n self.actor = self.build_actor(self.text, position, color, font_size,\n font_family, justification, bold, italic, shadow)\n self.width = width\n self.height = height\n self.window_left = 0\n self.window_right = 0\n self.caret_pos = 0\n self.init = True\n\n self.handle_events(self.actor.get_actor())\n\n self.on_left_mouse_button_pressed = self.left_button_press\n self.on_key_press = self.key_press\n\n def build_actor(self, text, position, color, font_size,\n font_family, justification, bold, italic, shadow):\n\n \"\"\" Builds a text actor.\n\n Parameters\n ----------\n text : str\n The initial text while building the actor.\n position : (float, float)\n (x, y) in pixels.\n color : (float, float, float)\n RGB: Values must be between 0-1.\n font_size : int\n Size of the text font.\n font_family : str\n Currently only supports Arial.\n justification : str\n left, right or center.\n bold : bool\n Makes text bold.\n italic : bool\n Makes text italicised.\n shadow : bool\n Adds text shadow.\n\n Returns\n -------\n :class:`vtkActor2d`\n\n \"\"\"\n text_actor = TextActor2D()\n text_actor.position = position\n text_actor.message = text\n text_actor.font_size = font_size\n text_actor.font_family = font_family\n text_actor.justification = justification\n text_actor.bold = bold\n text_actor.italic = italic\n text_actor.shadow = shadow\n if vtk.vtkVersion.GetVTKSourceVersion().split(' ')[-1] <= \"6.2.0\":\n pass\n else:\n text_actor.actor.GetTextProperty().SetBackgroundColor(1, 1, 1)\n text_actor.actor.GetTextProperty().SetBackgroundOpacity(1.0)\n text_actor.color = color\n\n return text_actor\n\n def set_message(self, message):\n \"\"\" Set custom text to textbox.\n\n Parameters\n ----------\n message: str\n The custom message to be set.\n\n \"\"\"\n self.text = message\n self.actor.message = message\n self.init = False\n self.window_right = len(self.text)\n self.window_left = 0\n self.caret_pos = self.window_right\n\n def get_actors(self):\n \"\"\" Returns the actors that compose this UI component.\n\n \"\"\"\n return [self.actor.get_actor()]\n\n def width_set_text(self, text):\n \"\"\" Adds newlines to text where necessary.\n\n This is needed for multi-line text boxes.\n\n Parameters\n ----------\n text : str\n The final text to be formatted.\n\n Returns\n -------\n str\n A multi line formatted text.\n\n \"\"\"\n multi_line_text = \"\"\n for i in range(len(text)):\n multi_line_text += text[i]\n if (i + 1) % self.width == 0:\n multi_line_text += \"\\n\"\n return multi_line_text.rstrip(\"\\n\")\n\n def handle_character(self, character):\n \"\"\" Main driving function that handles button events.\n\n # TODO: Need to handle all kinds of characters like !, +, etc.\n\n Parameters\n ----------\n character : str\n\n \"\"\"\n if character.lower() == \"return\":\n self.render_text(False)\n return True\n if character.lower() == \"backspace\":\n self.remove_character()\n elif character.lower() == \"left\":\n self.move_left()\n elif character.lower() == \"right\":\n self.move_right()\n else:\n self.add_character(character)\n self.render_text()\n return False\n\n def move_caret_right(self):\n \"\"\" Moves the caret towards right.\n\n \"\"\"\n self.caret_pos = min(self.caret_pos + 1, len(self.text))\n\n def move_caret_left(self):\n \"\"\" Moves the caret towards left.\n\n \"\"\"\n self.caret_pos = max(self.caret_pos - 1, 0)\n\n def right_move_right(self):\n \"\"\" Moves right boundary of the text window right-wards.\n\n \"\"\"\n if self.window_right <= len(self.text):\n self.window_right += 1\n\n def right_move_left(self):\n \"\"\" Moves right boundary of the text window left-wards.\n\n \"\"\"\n if self.window_right > 0:\n self.window_right -= 1\n\n def left_move_right(self):\n \"\"\" Moves left boundary of the text window right-wards.\n\n \"\"\"\n if self.window_left <= len(self.text):\n self.window_left += 1\n\n def left_move_left(self):\n \"\"\" Moves left boundary of the text window left-wards.\n\n \"\"\"\n if self.window_left > 0:\n self.window_left -= 1\n\n def add_character(self, character):\n \"\"\" Inserts a character into the text and moves window and caret accordingly.\n\n Parameters\n ----------\n character : str\n\n \"\"\"\n if len(character) > 1 and character.lower() != \"space\":\n return\n if character.lower() == \"space\":\n character = \" \"\n self.text = self.text[:self.caret_pos] + character + self.text[self.caret_pos:]\n self.move_caret_right()\n if self.window_right - self.window_left == self.height * self.width - 1:\n self.left_move_right()\n self.right_move_right()\n\n def remove_character(self):\n \"\"\" Removes a character from the text and moves window and caret accordingly.\n\n \"\"\"\n if self.caret_pos == 0:\n return\n self.text = self.text[:self.caret_pos - 1] + self.text[self.caret_pos:]\n self.move_caret_left()\n if len(self.text) < self.height * self.width - 1:\n self.right_move_left()\n if self.window_right - self.window_left == self.height * self.width - 1:\n if self.window_left > 0:\n self.left_move_left()\n self.right_move_left()\n\n def move_left(self):\n \"\"\" Handles left button press.\n\n \"\"\"\n self.move_caret_left()\n if self.caret_pos == self.window_left - 1:\n if self.window_right - self.window_left == self.height * self.width - 1:\n self.left_move_left()\n self.right_move_left()\n\n def move_right(self):\n \"\"\" Handles right button press.\n\n \"\"\"\n self.move_caret_right()\n if self.caret_pos == self.window_right + 1:\n if self.window_right - self.window_left == self.height * self.width - 1:\n self.left_move_right()\n self.right_move_right()\n\n def showable_text(self, show_caret):\n \"\"\" Chops out text to be shown on the screen.\n\n Parameters\n ----------\n show_caret : bool\n Whether or not to show the caret.\n\n \"\"\"\n if show_caret:\n ret_text = self.text[:self.caret_pos] + \"_\" + self.text[self.caret_pos:]\n else:\n ret_text = self.text\n ret_text = ret_text[self.window_left:self.window_right + 1]\n return ret_text\n\n def render_text(self, show_caret=True):\n \"\"\" Renders text after processing.\n\n Parameters\n ----------\n show_caret : bool\n Whether or not to show the caret.\n\n \"\"\"\n text = self.showable_text(show_caret)\n if text == \"\":\n text = \"Enter Text\"\n self.actor.message = self.width_set_text(text)\n\n def edit_mode(self):\n \"\"\" Turns on edit mode.\n\n \"\"\"\n if self.init:\n self.text = \"\"\n self.init = False\n self.caret_pos = 0\n self.render_text()\n\n def set_center(self, position):\n \"\"\" Sets the text center to position.\n\n Parameters\n ----------\n position : (float, float)\n\n \"\"\"\n self.actor.position = position\n\n @staticmethod\n def left_button_press(i_ren, obj, textbox_object):\n \"\"\" Left button press handler for textbox\n\n Parameters\n ----------\n i_ren: :class:`CustomInteractorStyle`\n obj: :class:`vtkActor`\n The picked actor\n textbox_object: :class:`TextBox2D`\n\n \"\"\"\n i_ren.add_active_prop(textbox_object.actor.get_actor())\n textbox_object.edit_mode()\n i_ren.force_render()\n\n @staticmethod\n def key_press(i_ren, obj, textbox_object):\n \"\"\" Key press handler for textbox\n\n Parameters\n ----------\n i_ren: :class:`CustomInteractorStyle`\n obj: :class:`vtkActor`\n The picked actor\n textbox_object: :class:`TextBox2D`\n\n \"\"\"\n key = i_ren.event.key\n is_done = textbox_object.handle_character(key)\n if is_done:\n i_ren.remove_active_prop(textbox_object.actor.get_actor())\n\n i_ren.force_render()\n", "''' A class for handling large tractography datasets.\n\n It is built using the pytables tools which in turn implement\n key features of the HDF5 (hierachical data format) API [1]_.\n\n References\n ----------\n .. [1] http://www.hdfgroup.org/HDF5/doc/H5.intro.html\n'''\n\nimport numpy as np\n\nfrom distutils.version import LooseVersion\n\n# Conditional testing machinery for pytables\nfrom dipy.testing import doctest_skip_parser\n\n# Conditional import machinery for pytables\nfrom dipy.utils.optpkg import optional_package\n\n# Allow import, but disable doctests, if we don't have pytables\ntables, have_tables, _ = optional_package('tables')\n\n# Useful variable for backward compatibility.\nif have_tables:\n TABLES_LESS_3_0 = LooseVersion(tables.__version__) < \"3.0\"\n\n# Make sure not to carry across setup module from * import\n__all__ = ['Dpy']\n\n\nclass Dpy(object):\n @doctest_skip_parser\n def __init__(self, fname, mode='r', compression=0):\n ''' Advanced storage system for tractography based on HDF5\n\n Parameters\n ------------\n fname : str, full filename\n mode : 'r' read\n 'w' write\n 'r+' read and write only if file already exists\n 'a' read and write even if file doesn't exist (not used yet)\n compression : 0 no compression to 9 maximum compression\n\n Examples\n ----------\n >>> import os\n >>> from tempfile import mkstemp #temp file\n >>> from dipy.io.dpy import Dpy\n >>> def dpy_example():\n ... fd,fname = mkstemp()\n ... fname = fname + '.dpy' #add correct extension\n ... dpw = Dpy(fname,'w')\n ... A=np.ones((5,3))\n ... B=2*A.copy()\n ... C=3*A.copy()\n ... dpw.write_track(A)\n ... dpw.write_track(B)\n ... dpw.write_track(C)\n ... dpw.close()\n ... dpr = Dpy(fname,'r')\n ... A=dpr.read_track()\n ... B=dpr.read_track()\n ... T=dpr.read_tracksi([0,1,2,0,0,2])\n ... dpr.close()\n ... os.remove(fname) #delete file from disk\n >>> dpy_example() # skip if not have_tables\n\n '''\n\n self.mode = mode\n self.f = tables.openFile(fname, mode=self.mode) if TABLES_LESS_3_0 else tables.open_file(fname, mode=self.mode)\n self.N = 5 * 10**9\n self.compression = compression\n\n if self.mode == 'w':\n if TABLES_LESS_3_0:\n func_create_group = self.f.createGroup\n func_create_array = self.f.createArray\n func_create_earray = self.f.createEArray\n else:\n func_create_group = self.f.create_group\n func_create_array = self.f.create_array\n func_create_earray = self.f.create_earray\n\n self.streamlines = func_create_group(self.f.root, 'streamlines')\n # create a version number\n self.version = func_create_array(self.f.root, 'version',\n [b\"0.0.1\"], 'Dpy Version Number')\n\n self.tracks = func_create_earray(self.f.root.streamlines,\n 'tracks',\n tables.Float32Atom(),\n (0, 3),\n \"scalar Float32 earray\",\n tables.Filters(self.compression),\n expectedrows=self.N)\n self.offsets = func_create_earray(self.f.root.streamlines,\n 'offsets',\n tables.Int64Atom(), (0,),\n \"scalar Int64 earray\",\n tables.Filters(self.compression),\n expectedrows=self.N + 1)\n self.curr_pos = 0\n self.offsets.append(np.array([self.curr_pos]).astype(np.int64))\n\n if self.mode == 'r':\n self.tracks = self.f.root.streamlines.tracks\n self.offsets = self.f.root.streamlines.offsets\n self.track_no = len(self.offsets) - 1\n self.offs_pos = 0\n\n def version(self):\n ver = self.f.root.version[:]\n return ver[0].decode()\n\n def write_track(self, track):\n ''' write on track each time\n '''\n self.tracks.append(track.astype(np.float32))\n self.curr_pos += track.shape[0]\n self.offsets.append(np.array([self.curr_pos]).astype(np.int64))\n\n def write_tracks(self, T):\n ''' write many tracks together\n '''\n for track in T:\n self.tracks.append(track.astype(np.float32))\n self.curr_pos += track.shape[0]\n self.offsets.append(np.array([self.curr_pos]).astype(np.int64))\n\n def read_track(self):\n ''' read one track each time\n '''\n off0, off1 = self.offsets[self.offs_pos:self.offs_pos + 2]\n self.offs_pos += 1\n return self.tracks[off0:off1]\n\n def read_tracksi(self, indices):\n ''' read tracks with specific indices\n '''\n T = []\n for i in indices:\n # print(self.offsets[i:i+2])\n off0, off1 = self.offsets[i:i + 2]\n T.append(self.tracks[off0:off1])\n return T\n\n def read_tracks(self):\n ''' read the entire tractography\n '''\n I = self.offsets[:]\n TR = self.tracks[:]\n T = []\n for i in range(len(I) - 1):\n off0, off1 = I[i:i + 2]\n T.append(TR[off0:off1])\n return T\n\n def close(self):\n self.f.close()\n\n\nif __name__ == '__main__':\n pass\n", "\"\"\"\n=====================================================================\nReconstruction of the diffusion signal with the kurtosis tensor model\n=====================================================================\n\nThe diffusion kurtosis model is an expansion of the diffusion tensor model\n(see :ref:`example_reconst_dti`). In addition to the diffusion tensor (DT), the\ndiffusion kurtosis model quantifies the degree to which water diffusion in\nbiological tissues is non-Gaussian using the kurtosis tensor (KT)\n[Jensen2005]_.\n\nMeasurements of non-Gaussian diffusion from the diffusion kurtosis model are of\ninterest because they can be used to charaterize tissue microstructural\nheterogeneity [Jensen2010]_ and to derive concrete biophysical parameters, such\nas the density of axonal fibres and diffusion tortuosity [Fierem2011]_.\nMoreover, DKI can be used to resolve crossing fibers in tractography and to\nobtain invariant rotational measures not limited to well-aligned fiber\npopulations [NetoHe2015]_.\n\nThe diffusion kurtosis model expresses the diffusion-weighted signal as:\n\n.. math::\n\n S(n,b)=S_{0}e^{-bD(n)+\\frac{1}{6}b^{2}D(n)^{2}K(n)}\n\nwhere $\\mathbf{b}$ is the applied diffusion weighting (which is dependent on\nthe measurement parameters), $S_0$ is the signal in the absence of diffusion\ngradient sensitization, $\\mathbf{D(n)}$ is the value of diffusion along\ndirection $\\mathbf{n}$, and $\\mathbf{K(n)}$ is the value of kurtosis along\ndirection $\\mathbf{n}$. The directional diffusion $\\mathbf{D(n)}$ and kurtosis\n$\\mathbf{K(n)}$ can be related to the diffusion tensor (DT) and kurtosis tensor\n(KT) using the following equations:\n\n.. math::\n D(n)=\\sum_{i=1}^{3}\\sum_{j=1}^{3}n_{i}n_{j}D_{ij}\n\nand\n\n.. math::\n K(n)=\\frac{MD^{2}}{D(n)^{2}}\\sum_{i=1}^{3}\\sum_{j=1}^{3}\\sum_{k=1}^{3}\n \\sum_{l=1}^{3}n_{i}n_{j}n_{k}n_{l}W_{ijkl}\n\nwhere $D_{ij}$ are the elements of the second-order DT, and $W_{ijkl}$ the\nelements of the fourth-order KT and $MD$ is the mean diffusivity. As the DT,\nKT has antipodal symmetry and thus only 15 Wijkl elemments are needed to fully\ncharacterize the KT:\n\n.. math::\n \\begin{matrix} ( & W_{xxxx} & W_{yyyy} & W_{zzzz} & W_{xxxy} & W_{xxxz}\n & ... \\\\\n & W_{xyyy} & W_{yyyz} & W_{xzzz} & W_{yzzz} & W_{xxyy}\n & ... \\\\\n & W_{xxzz} & W_{yyzz} & W_{xxyz} & W_{xyyz} & W_{xyzz}\n & & )\\end{matrix}\n\nIn the following example we show how to fit the diffusion kurtosis model on\ndiffusion-weighted multi-shell datasets and how to estimate diffusion kurtosis\nbased statistics.\n\nFirst, we import all relevant modules:\n\"\"\"\n\nimport numpy as np\nimport dipy.reconst.dki as dki\nimport dipy.reconst.dti as dti\nimport matplotlib.pyplot as plt\nfrom dipy.data import fetch_cenir_multib\nfrom dipy.data import read_cenir_multib\nfrom dipy.segment.mask import median_otsu\nfrom dipy.denoise.noise_estimate import estimate_sigma\nfrom dipy.denoise.nlmeans import nlmeans\n\n\"\"\"\nDKI requires multi-shell data, i.e. data acquired from more than one non-zero\nb-value. Here, we use fetch to download a multi-shell dataset with parameters\nthat are similar to the data collected in the Human Connectome Project (HCP).\nThe total size of the downloaded data is 1760 MBytes, however you only need to\nfetch it once. Parameter ``with_raw`` of function ``fetch_cenir_multib`` is set\nto ``False`` to only download eddy-current/motion corrected data:\n\"\"\"\n\nfetch_cenir_multib(with_raw=False)\n\n\"\"\"\nNext, we read the saved dataset. To decrease the influence of diffusion signal\nTaylor approximation components larger than the fourth order (componets not\ntaken into account by the diffusion kurtosis tensor), we only select the\nb-values up to 2000 $s.mm^{-2}$:\n\"\"\"\n\nbvals = [200, 400, 1000, 2000]\n\nimg, gtab = read_cenir_multib(bvals)\n\ndata = img.get_data()\n\naffine = img.affine\n\n\"\"\"\nFunction ``read_cenir_multib`` return img and gtab which contains respectively\na nibabel Nifti1Image object (where the data can be extracted) and a\nGradientTable object with information about the b-values and b-vectors.\n\nBefore fitting the data, we preform some data pre-processing. We first compute\na brain mask to avoid unnecessary calculations on the background of the image.\n\"\"\"\n\nmaskdata, mask = median_otsu(data, 4, 2, False, vol_idx=[0, 1], dilate=1)\n\n\"\"\"\nSince the diffusion kurtosis models involves the estimation of a large number\nof parameters [TaxCMW2015]_ and since the non-Gaussian components of the\ndiffusion signal are more sensitive to artefacts [NetoHe2012]_, a fundamental\ndata pre-processing step for diffusion kurtosis fitting is to denoise our data.\nFor this, we use Dipy's non-local mean filter (see\n:ref:`example-denoise-nlmeans`). Note that, since the HCP-like data has a large\nnumber of diffusion-weigthed volumes, this procedure can take a couple of hours\nto compute the entire dataset. Therefore, to speed the run time in this example\nwe only denoise an axial slice of the data.\n\"\"\"\n\naxial_slice = 40\n\nsigma = estimate_sigma(data, N=4)\n\nmask_roi = np.zeros(data.shape[:-1], dtype=bool)\nmask_roi[:, :, axial_slice] = mask[:, :, axial_slice]\n\nden = nlmeans(data, sigma=sigma, mask=mask_roi)\nden = den[:, :, axial_slice, :]\n\n\"\"\"\nNow that we have loaded and prepared the voxels to process we can go forward\nwith the voxel reconstruction. This can be done by first instantiating the\nDiffusionKurtosisModel in the following way:\n\"\"\"\n\ndkimodel = dki.DiffusionKurtosisModel(gtab)\n\n\"\"\"\nTo fit the data using the defined model object, we call the ``fit`` function of\nthis object:\n\"\"\"\n\ndkifit = dkimodel.fit(den)\n\n\"\"\"\nThe fit method creates a DiffusionKurtosisFit object which contains all the\ndiffusion and kurtosis fitting parameters and other DKI attributes. For\ninstance, since the diffusion kurtosis model estimates the diffusion tensor,\nall diffusion standard tensor statistics can be computed from the\nDiffusionKurtosisFit instance. For example, we show below how to extract the\nfractional anisotropy (FA), the mean diffusivity (MD), the axial diffusivity\n(AD) and the radial diffusivity (RD) from the DiffusionKurtosisiFit instance.\n\"\"\"\n\nFA = dkifit.fa\nMD = dkifit.md\nAD = dkifit.ad\nRD = dkifit.rd\n\n\"\"\"\nNote that these four standard measures could also be computed from Dipy's DTI\nmodule. Theoretically, computing these measures from both models should be\nanalogous. However, since the diffusion kurtosis model involves a larger number\nof parameters, diffusion statistics maps can look more noisy. On the other\nhand, the diffusion statistics from the kurtosis model are expected to have\nbetter accuracy [Veraar2011]_. For comparison purposes, we calculate below the\nFA, MD, AD, and RD using Dipy's TensorModel.\n\"\"\"\n\ntenmodel = dti.TensorModel(gtab)\ntenfit = tenmodel.fit(den)\n\ndti_FA = tenfit.fa\ndti_MD = tenfit.md\ndti_AD = tenfit.ad\ndti_RD = tenfit.rd\n\n\"\"\"\nThe DT based measures can be easly visualized using matplotlib. For example,\nthe FA, MD, AD, and RD obtain from the diffusion kurtosis model (upper panels)\nand the tensor model (lower panels) are plotted for the selected axial slice.\n\"\"\"\n\nfig1, ax = plt.subplots(2, 4, figsize=(12, 6),\n subplot_kw={'xticks': [], 'yticks': []})\n\nfig1.subplots_adjust(hspace=0.3, wspace=0.05)\n\nax.flat[0].imshow(FA, cmap='gray')\nax.flat[0].set_title('FA (DKI)')\nax.flat[1].imshow(MD, cmap='gray')\nax.flat[1].set_title('MD (DKI)')\nax.flat[2].imshow(AD, cmap='gray')\nax.flat[2].set_title('AD (DKI)')\nax.flat[3].imshow(RD, cmap='gray')\nax.flat[3].set_title('RD (DKI)')\n\nax.flat[4].imshow(dti_FA, cmap='gray')\nax.flat[4].set_title('FA (DTI)')\nax.flat[5].imshow(dti_MD, cmap='gray')\nax.flat[5].set_title('MD (DTI)')\nax.flat[6].imshow(dti_AD, cmap='gray')\nax.flat[6].set_title('AD (DTI)')\nax.flat[7].imshow(dti_RD, cmap='gray')\nax.flat[7].set_title('RD (DTI)')\n\nplt.show()\nfig1.savefig('Diffusion_tensor_measures_from_DTI_and_DKI.png')\n\n\"\"\"\n.. figure:: Diffusion_tensor_measures_from_DTI_and_DKI.png\n :align: center\n\n **Diffusion tensor measures obtain from the diffusion tensor estimated from\n DKI (upper panels) and DTI (lower panels).**.\n\nFrom the figure, we can see that the standard diffusion measures of the\nHCP-like data obtained from the diffusion kurtosis model have similar contrasts\nto the standard diffusion measures obtain from the tensor model. This can be\nexplained by the high quality of the dataset and the high performance of the\ndiffusion kurtosis fit procedure.\n\nIn addition to the standard diffusion statistics, the DiffusionKurtosisFit\ninstance can be used to estimate the non-Gaussian measures of mean kurtosis\n(MK), the axial kurtosis (AK) and the radial kurtosis (RK).\n\"\"\"\n\nMK = dkifit.mk(0, 3)\nAK = dkifit.ak(0, 3)\nRK = dkifit.rk(0, 3)\n\n\"\"\"\nKurtosis measures are susceptible to high amplitude outliers. The impact of\nhigh amplitude kurtosis outliers were removed on the above lines of codes by\nintroducing as an optional input the extremes of the typical values of kurtosis\n(assumed here as the values on the range between 0 and 3)\n\nNow we are ready to plot the kurtosis standard measures using matplotlib:\n\"\"\"\n\nfig2, ax = plt.subplots(1, 3, figsize=(12, 6),\n subplot_kw={'xticks': [], 'yticks': []})\n\nfig2.subplots_adjust(hspace=0.3, wspace=0.05)\n\nax.flat[0].imshow(MK, cmap='gray')\nax.flat[0].set_title('MK')\nax.flat[1].imshow(AK, cmap='gray')\nax.flat[1].set_title('AK')\nax.flat[2].imshow(RK, cmap='gray')\nax.flat[2].set_title('RK')\n\nplt.show()\nfig2.savefig('Kurtosis_tensor_standard_measures.png')\n\n\"\"\"\n.. figure:: Kurtosis_tensor_standard_measures.png\n :align: center\n \n **Kurtosis tensor standard measures obtain from the kurtosis tensor.**.\n\nThe non-Gaussian behaviour of the diffusion signal is larger when water\ndiffusion is restrited by compartments and barriers (e.g., myelin sheath).\nTherefore, as the figure above shows, white matter kurtosis values are smaller\nalong the axial direction of fibers (smaller amplitudes shown in the AK map)\nthan for the radial directions (larger amplitudes shown in the RK map).\n\nReferences:\n\n.. [TaxCMW2015] Tax CMW, Otte WM, Viergever MA, Dijkhuizen RM, Leemans A\n (2014). REKINDLE: Robust extraction of kurtosis INDices with\n linear estimation. Magnetic Resonance in Medicine 73(2):\n 794-808.\n.. [Jensen2005] Jensen JH, Helpern JA, Ramani A, Lu H, Kaczynski K (2005).\n Diffusional Kurtosis Imaging: The Quantification of\n Non_Gaussian Water Diffusion by Means of Magnetic Resonance\n Imaging. Magnetic Resonance in Medicine 53: 1432-1440\n.. [Jensen2010] Jensen JH, Helpern JA (2010). MRI quantification of\n non-Gaussian water diffusion by kurtosis analysis. NMR in\n Biomedicine 23(7): 698-710\n.. [Fierem2011] Fieremans E, Jensen JH, Helpern JA (2011). White matter\n characterization with diffusion kurtosis imaging. NeuroImage\n 58: 177-188\n.. [NetoHe2012] Neto Henriques R, Ferreira H, Correia M, (2012). Diffusion\n kurtosis imaging of the healthy human brain. Master\n Dissertation Bachelor and Master Programin Biomedical\n Engineering and Biophysics, Faculty of Sciences.\n http://repositorio.ul.pt/bitstream/10451/8511/1/ulfc104137_tm_Rafael_Henriques.pdf\n.. [NetoHe2015] Neto Henriques R, Correia MM, Nunes RG, Ferreira HA (2015).\n Exploring the 3D geometry of the diffusion kurtosis tensor -\n Impact on the development of robust tractography procedures and\n novel biomarkers, NeuroImage 111: 85-99\n.. [Veraar2011] Veraart J, Poot DH, Van Hecke W, Blockx I, Van der Linden A,\n Verhoye M, Sijbers J (2011). More Accurate Estimation of\n Diffusion Tensor Parameters Using Diffusion Kurtosis Imaging.\n Magnetic Resonance in Medicine 65(1): 138-145\n\n.. include:: ../links_names.inc\n\"\"\"\n", "from __future__ import division, print_function, absolute_import\n\nimport logging\nimport numpy as np\nimport os.path\n\nimport nibabel as nib\n\nfrom dipy.core.gradients import gradient_table\nfrom dipy.io.gradients import read_bvals_bvecs\nfrom dipy.reconst.dti import (TensorModel, color_fa, fractional_anisotropy,\n geodesic_anisotropy, mean_diffusivity,\n axial_diffusivity, radial_diffusivity,\n lower_triangular, mode as get_mode)\nfrom dipy.workflows.workflow import Workflow\n\n\nclass ReconstDtiFlow(Workflow):\n @classmethod\n def get_short_name(cls):\n return 'dti'\n\n def run(self, input_files, bvalues, bvectors, mask_files, b0_threshold=0.0,\n save_metrics=[],\n out_dir='', out_tensor='tensors.nii.gz', out_fa='fa.nii.gz',\n out_ga='ga.nii.gz', out_rgb='rgb.nii.gz', out_md='md.nii.gz',\n out_ad='ad.nii.gz', out_rd='rd.nii.gz', out_mode='mode.nii.gz',\n out_evec='evecs.nii.gz', out_eval='evals.nii.gz'):\n\n \"\"\" Workflow for tensor reconstruction and for computing DTI metrics.\n Performs a tensor reconstruction on the files by 'globing'\n ``input_files`` and saves the DTI metrics in a directory specified by\n ``out_dir``.\n\n Parameters\n ----------\n input_files : string\n Path to the input volumes. This path may contain wildcards to\n process multiple inputs at once.\n bvalues : string\n Path to the bvalues files. This path may contain wildcards to use\n multiple bvalues files at once.\n bvectors : string\n Path to the bvalues files. This path may contain wildcards to use\n multiple bvalues files at once.\n mask_files : string\n Path to the input masks. This path may contain wildcards to use\n multiple masks at once. (default: No mask used)\n b0_threshold : float, optional\n Threshold used to find b=0 directions (default 0.0)\n save_metrics : variable string, optional\n List of metrics to save.\n Possible values: fa, ga, rgb, md, ad, rd, mode, tensor, evec, eval\n (default [] (all))\n out_dir : string, optional\n Output directory (default input file directory)\n out_tensor : string, optional\n Name of the tensors volume to be saved (default 'tensors.nii.gz')\n out_fa : string, optional\n Name of the fractional anisotropy volume to be saved\n (default 'fa.nii.gz')\n out_ga : string, optional\n Name of the geodesic anisotropy volume to be saved\n (default 'ga.nii.gz')\n out_rgb : string, optional\n Name of the color fa volume to be saved (default 'rgb.nii.gz')\n out_md : string, optional\n Name of the mean diffusivity volume to be saved\n (default 'md.nii.gz')\n out_ad : string, optional\n Name of the axial diffusivity volume to be saved\n (default 'ad.nii.gz')\n out_rd : string, optional\n Name of the radial diffusivity volume to be saved\n (default 'rd.nii.gz')\n out_mode : string, optional\n Name of the mode volume to be saved (default 'mode.nii.gz')\n out_evec : string, optional\n Name of the eigenvectors volume to be saved\n (default 'evecs.nii.gz')\n out_eval : string, optional\n Name of the eigenvalues to be saved (default 'evals.nii.gz')\n \"\"\"\n io_it = self.get_io_iterator()\n\n for dwi, bval, bvec, mask, otensor, ofa, oga, orgb, omd, oad, orad, \\\n omode, oevecs, oevals in io_it:\n\n logging.info('Computing DTI metrics for {0}'.format(dwi))\n\n img = nib.load(dwi)\n data = img.get_data()\n affine = img.affine\n\n if mask is None:\n mask = None\n else:\n mask = nib.load(mask).get_data().astype(np.bool)\n\n tenfit, _ = self.get_fitted_tensor(data, mask, bval, bvec,\n b0_threshold)\n\n if not save_metrics:\n save_metrics = ['fa', 'md', 'rd', 'ad', 'ga', 'rgb', 'mode',\n 'evec', 'eval', 'tensor']\n\n FA = fractional_anisotropy(tenfit.evals)\n FA[np.isnan(FA)] = 0\n FA = np.clip(FA, 0, 1)\n\n if 'tensor' in save_metrics:\n tensor_vals = lower_triangular(tenfit.quadratic_form)\n correct_order = [0, 1, 3, 2, 4, 5]\n tensor_vals_reordered = tensor_vals[..., correct_order]\n fiber_tensors = nib.Nifti1Image(tensor_vals_reordered.astype(\n np.float32), affine)\n nib.save(fiber_tensors, otensor)\n\n if 'fa' in save_metrics:\n fa_img = nib.Nifti1Image(FA.astype(np.float32), affine)\n nib.save(fa_img, ofa)\n\n if 'ga' in save_metrics:\n GA = geodesic_anisotropy(tenfit.evals)\n ga_img = nib.Nifti1Image(GA.astype(np.float32), affine)\n nib.save(ga_img, oga)\n\n if 'rgb' in save_metrics:\n RGB = color_fa(FA, tenfit.evecs)\n rgb_img = nib.Nifti1Image(np.array(255 * RGB, 'uint8'), affine)\n nib.save(rgb_img, orgb)\n\n if 'md' in save_metrics:\n MD = mean_diffusivity(tenfit.evals)\n md_img = nib.Nifti1Image(MD.astype(np.float32), affine)\n nib.save(md_img, omd)\n\n if 'ad' in save_metrics:\n AD = axial_diffusivity(tenfit.evals)\n ad_img = nib.Nifti1Image(AD.astype(np.float32), affine)\n nib.save(ad_img, oad)\n\n if 'rd' in save_metrics:\n RD = radial_diffusivity(tenfit.evals)\n rd_img = nib.Nifti1Image(RD.astype(np.float32), affine)\n nib.save(rd_img, orad)\n\n if 'mode' in save_metrics:\n MODE = get_mode(tenfit.quadratic_form)\n mode_img = nib.Nifti1Image(MODE.astype(np.float32), affine)\n nib.save(mode_img, omode)\n\n if 'evec' in save_metrics:\n evecs_img = nib.Nifti1Image(tenfit.evecs.astype(np.float32), affine)\n nib.save(evecs_img, oevecs)\n\n if 'eval' in save_metrics:\n evals_img = nib.Nifti1Image(tenfit.evals.astype(np.float32), affine)\n nib.save(evals_img, oevals)\n\n logging.info('DTI metrics saved in {0}'.\n format(os.path.dirname(oevals)))\n\n def get_tensor_model(self, gtab):\n return TensorModel(gtab, fit_method=\"WLS\")\n\n def get_fitted_tensor(self, data, mask, bval, bvec, b0_threshold=0):\n\n logging.info('Tensor estimation...')\n bvals, bvecs = read_bvals_bvecs(bval, bvec)\n gtab = gradient_table(bvals, bvecs, b0_threshold=b0_threshold)\n\n tenmodel = self.get_tensor_model(gtab)\n tenfit = tenmodel.fit(data, mask)\n\n return tenfit, gtab\n\n\nclass ReconstDtiRestoreFlow(ReconstDtiFlow):\n @classmethod\n def get_short_name(cls):\n return 'dti_restore'\n\n def run(self, input_files, bvalues, bvectors, mask_files, sigma,\n b0_threshold=0.0, save_metrics=[], jacobian=True,\n out_dir='', out_tensor='tensors.nii.gz', out_fa='fa.nii.gz',\n out_ga='ga.nii.gz', out_rgb='rgb.nii.gz', out_md='md.nii.gz',\n out_ad='ad.nii.gz', out_rd='rd.nii.gz', out_mode='mode.nii.gz',\n out_evec='evecs.nii.gz', out_eval='evals.nii.gz'):\n\n \"\"\" Workflow for tensor reconstruction and for computing DTI metrics.\n Performs a tensor reconstruction on the files by 'globing'\n ``input_files`` and saves the DTI metrics in a directory specified by\n ``out_dir``.\n\n Parameters\n ----------\n input_files : string\n Path to the input volumes. This path may contain wildcards to\n process multiple inputs at once.\n bvalues : string\n Path to the bvalues files. This path may contain wildcards to use\n multiple bvalues files at once.\n bvectors : string\n Path to the bvalues files. This path may contain wildcards to use\n multiple bvalues files at once.\n mask_files : string\n Path to the input masks. This path may contain wildcards to use\n multiple masks at once. (default: No mask used)\n sigma : float\n An estimate of the variance.\n b0_threshold : float, optional\n Threshold used to find b=0 directions (default 0.0)\n save_metrics : variable string, optional\n List of metrics to save.\n Possible values: fa, ga, rgb, md, ad, rd, mode, tensor, evec, eval\n (default [] (all))\n jacobian : bool, optional\n Whether to use the Jacobian of the tensor to speed the\n non-linear optimization procedure used to fit the tensor\n parameters (default True)\n out_dir : string, optional\n Output directory (default input file directory)\n out_tensor : string, optional\n Name of the tensors volume to be saved (default 'tensors.nii.gz')\n out_fa : string, optional\n Name of the fractional anisotropy volume to be saved\n (default 'fa.nii.gz')\n out_ga : string, optional\n Name of the geodesic anisotropy volume to be saved\n (default 'ga.nii.gz')\n out_rgb : string, optional\n Name of the color fa volume to be saved (default 'rgb.nii.gz')\n out_md : string, optional\n Name of the mean diffusivity volume to be saved\n (default 'md.nii.gz')\n out_ad : string, optional\n Name of the axial diffusivity volume to be saved\n (default 'ad.nii.gz')\n out_rd : string, optional\n Name of the radial diffusivity volume to be saved\n (default 'rd.nii.gz')\n out_mode : string, optional\n Name of the mode volume to be saved (default 'mode.nii.gz')\n out_evec : string, optional\n Name of the eigenvectors volume to be saved\n (default 'evecs.nii.gz')\n out_eval : string, optional\n Name of the eigenvalues to be saved (default 'evals.nii.gz')\n \"\"\"\n self.sigma = sigma\n self.jacobian = jacobian\n\n super(ReconstDtiRestoreFlow, self).\\\n run(input_files, bvalues, bvectors, mask_files, b0_threshold,\n save_metrics, out_dir, out_tensor, out_fa, out_ga, out_rgb,\n out_md, out_ad, out_rd, out_mode, out_evec, out_eval)\n\n def get_tensor_model(self, gtab):\n return TensorModel(gtab, fit_method=\"RT\", sigma=self.sigma,\n jac=self.jacobian)\n" ]
[ [ "matplotlib.pyplot.colorbar", "numpy.sum", "matplotlib.pyplot.savefig", "matplotlib.pyplot.figure" ], [ "numpy.asarray" ], [ "numpy.array" ], [ "matplotlib.pyplot.show", "numpy.zeros", "matplotlib.pyplot.subplots" ], [ "numpy.isnan", "numpy.array", "numpy.clip" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
tashby/pytorch_geometric
[ "bc1eabc242c8dc6e3b7078db922779911ca3382d" ]
[ "torch_geometric/datasets/amazon.py" ]
[ "import torch\nfrom torch_geometric.data import InMemoryDataset, download_url\nfrom torch_geometric.read import read_npz\n\n\nclass Amazon(InMemoryDataset):\n url = 'https://github.com/shchur/gnn-benchmark/raw/master/data/npz/'\n\n def __init__(self, root, name, transform=None, pre_transform=None):\n self.name = name.lower()\n assert self.name in ['computers', 'photo']\n super(Amazon, self).__init__(root, transform, pre_transform)\n self.data, self.slices = torch.load(self.processed_paths[0])\n\n @property\n def raw_file_names(self):\n return 'amazon_electronics_{}.npz'.format(self.name)\n\n @property\n def processed_file_names(self):\n return 'data.pt'\n\n def download(self):\n download_url(self.url + self.raw_file_names, self.raw_dir)\n\n def process(self):\n data = read_npz(self.raw_paths[0])\n data = data if self.pre_transform is None else self.pre_transform(data)\n data, slices = self.collate([data])\n torch.save((data, slices), self.processed_paths[0])\n\n def __repr__(self):\n return '{}{}()'.format(self.__class__.__name__, self.name.capitalize())\n" ]
[ [ "torch.save", "torch.load" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
pedro-mgb/pedestrian-arc-lstm-smf
[ "1b9fbe6c89c74dc706fd8d3b11ea08977ba2c1d3", "1b9fbe6c89c74dc706fd8d3b11ea08977ba2c1d3" ]
[ "other_scripts/data/data_size_in_memory.py", "other_scripts/plot_single_predictions.py" ]
[ "\"\"\"\nCreated on September 11th, 2021\n\nSimple script to compute the size of a dataset (with one or multiple files) in memory (RAM).\nNote that this measure will give an approximate approximate value\n\nIMPORTANT NOTE:\nThe computation may stop with an error if the data does not fit in memory.\nThis makes training with such datasets impossible in our repository.\nThe easy alternative would be to use a subset of this data.\nIf that is not desirable, some form of lazy loading should be done. Sadly it is not implemented in this repository.\nTo implement such a procedure may take some time, and lazy loading can be quite slow, depending on the files.\nFor more information see: https://discuss.pytorch.org/t/how-to-use-dataset-larger-than-memory/37785\n\"\"\"\nimport sys\nimport time\nimport argparse\nimport gc\nimport os\n\nimport psutil\nimport torch\n\nfrom models.data.loaders import get_data_loader\nfrom models.utils.parser_options import add_parser_arguments_for_data, add_parser_arguments_misc\n\nparser = argparse.ArgumentParser()\nparser = add_parser_arguments_for_data(parser)\nparser = add_parser_arguments_misc(parser)\nparser.add_argument('--data_location', default='datasets_in_trajnetpp21/train/',\n help='the relative path to the directory where the f data files are, or relative path to a file')\nparser.add_argument('--process_files_individually', action='store_true',\n help='If supplied, will retrieve each file individually, and get their memory content. '\n 'Each file will be loaded sequentially - after the size of that file has been computed, it '\n 'will be removed from memory. Especially useful for datasets that do not fit in memory')\n\n\ndef main(args):\n if args.use_gpu:\n args.use_gpu = False\n print(\"WARNING: Use of GPU was de-activated since this script only supports CPU\")\n # device = torch.device('cuda') if args.use_gpu else torch.device('cpu')\n device = torch.device('cpu')\n data_location = os.path.relpath(args.data_location)\n\n print(\"Getting available memory...\")\n v_memory = psutil.virtual_memory()\n free_memory = v_memory.free / (1024 * 1024)\n print(f\"CPU: total of {v_memory.total / (1024 * 1024):.3f}MB;\\t {v_memory.used / (1024 * 1024):.3f}MB is used\")\n if torch.cuda.is_available():\n total_mem_gpu = torch.cuda.get_device_properties(0).total_memory\n print(f\"GPU: Total of {total_mem_gpu / (1024 * 1024):.3f}MB memory reserved\")\n\n if args.process_files_individually and os.path.isdir(data_location):\n print('Loading each file separately (avoids out-of-memory errors)')\n data_path_list = [os.path.join(data_location, _path) for _path in sorted(os.listdir(data_location))]\n else:\n print('Loading all data at the same time (may have issues if data does not fit in memory)')\n data_path_list = [data_location]\n print('')\n\n full_size, full_t_size = 0, 0 # expected unit: MegaBytes (MB) or 2^20 bytes\n for path in data_path_list:\n trajs_size = 0\n if args.process_files_individually:\n print(f\"Reading {path}... \", end='')\n # get_data_loader returns a function object to retrieve the dataloader\n _, loader = (get_data_loader(args, path))(args, device, path)\n # print(f\"Dataset in {path}:{os.linesep}\\tsize {sys.getsizeof(dataset)} + {sys.getsizeof(loader)}\")\n num_batches = len(loader)\n loader_size = get_size(loader) / (1024 * 1024)\n print(\"Done!\")\n # print(f'Dataset: {dataset_size / (1024 * 1024)}MB;\\t Loader: {loader_size / (1024 * 1024)}MB')\n for i, batch in enumerate(loader):\n print(f\"\\r Batch: {i + 1}/{num_batches}\", end='', flush=True)\n (_, _, _, _, metadata, _, _) = batch\n trajs_size += sum([m.size for m in metadata])\n time.sleep(0.0001)\n print('\\r Clearing memory...', end='')\n dataset = loader = None\n gc.collect() # explicitly free any unused memory\n print('\\r', end='')\n if args.process_files_individually:\n percentage = trajs_size / loader_size * 100\n print(f\"{path} with approximately {loader_size:.2f}MB in memory.{os.linesep}\\t \"\n f\"Of which {trajs_size:.2f}MB ({percentage:.2f}%) come directly from trajectories {os.linesep}\")\n dataset = loader = None\n gc.collect() # explicitly free any unused memory\n full_size += loader_size\n full_t_size += trajs_size\n\n percentage = full_t_size / full_size * 100\n print(f\"{os.linesep}Data in {args.data_location} occupying approximately {full_size:.2f}MB in memory.\"\n f\"{os.linesep}\\t Of which {full_t_size:.2f}MB ({percentage:.2f}%) come directly from trajectories\")\n\n if full_size * 1.1 > free_memory: # the 1.1 is to account for additional memory allocated by training/testing\n print(F\"ERROR: THE DATASET IN {args.data_location} most likely does not fit in memory for this machine\")\n\n\ndef get_size(obj, seen=None):\n \"\"\"Recursively finds size of objects\n Might not work 100%\n Credits: https://goshippo.com/blog/measure-real-size-any-python-object/\"\"\"\n size = sys.getsizeof(obj)\n if seen is None:\n seen = set()\n obj_id = id(obj)\n if obj_id in seen:\n return 0\n # Important mark as seen *before* entering recursion to gracefully handle\n # self-referential objects\n seen.add(obj_id)\n if isinstance(obj, dict):\n size += sum([get_size(v, seen) for v in obj.values()])\n size += sum([get_size(k, seen) for k in obj.keys()])\n elif torch.is_tensor(obj):\n size += obj.element_size() * obj.nelement()\n elif hasattr(obj, '__dict__'):\n size += get_size(obj.__dict__, seen)\n elif hasattr(obj, '__iter__') and not isinstance(obj, (str, bytes, bytearray)):\n size += sum([get_size(i, seen) for i in obj])\n return size\n\n\nif __name__ == '__main__':\n arguments = parser.parse_args()\n main(arguments)\n", "\"\"\"\nScript to plot predictions, possibly from several models, and compare them with ground truth.\nThese predictions are only regarding one pedestrian - PRIMARY in the case of Trajnet++. Other pedestrians - neighbours\nfor the case of Trajnet++ - are ignored.\n\nNOT IMPLEMENTED - Option to visualize multimodality, in case models can output several samples.\n\"\"\"\nimport argparse\nimport copy\n\nimport torch\nimport matplotlib.pyplot as plt\n\nfrom models.data.loaders import load_test_data\nfrom models.data.environment import Environment\nfrom models.utils.parser_options import add_parser_arguments_for_data, add_parser_arguments_for_testing, \\\n add_parser_arguments_plotting, add_parser_arguments_misc, override_args_from_json\nfrom models.utils.evaluator import TrajectoryType, map_traj_type, __batch_metrics_unimodal__\nfrom models.utils.plotting import apply_plot_args_to_trajectories, get_or_compute_predictions\nfrom models.evaluate import load_model_s, ModelType\nfrom models.classical.constant_velocity import predict_const_vel\n\nparser = argparse.ArgumentParser()\nparser = add_parser_arguments_for_data(parser)\nparser = add_parser_arguments_for_testing(parser)\nparser = add_parser_arguments_plotting(parser)\nparser = add_parser_arguments_misc(parser)\nparser.add_argument('--all_prediction_per_method', action='store_true',\n help='If true, will display all predictions for each method')\n\nlstm_label = 'LSTM'\n# lstm_enc_dec_label = 'LSTM_enc_dec'\nsparse_motion_fields_label = 'SMF'\nconst_vel_label = 'CV'\nmethod_choices = [lstm_label, const_vel_label, sparse_motion_fields_label]\n\n\ndef main(args):\n # plt.rcParams.update({'figure.max_open_warning': 0}) # to avoid showing a warning about too much figures\n # FORCE USE OF CPU; GPU wouldn't really have any benefit here\n device = torch.device('cpu')\n test_loaders, file_names = load_test_data(args, device)\n if args.model_paths is None or args.model_labels is None:\n raise Exception('You must supply a list of models via --model_paths and a associated labels via --labels.')\n if len(args.model_paths) != len(args.model_labels):\n raise Exception(f'The number of model paths (received {len(args.model_paths)}) must be equal to the number of '\n f'associated labels (received {len(args.model_labels)}).')\n assert args.plot_limits is None or len(args.plot_limits) == 0 or len(args.plot_limits) == 4, \\\n 'You must provide either 4 values for the plot limits (xmin xmax ymin ymax), or not use --plot_limits'\n models, train_args, input_types, output_types = load_models(args, device, file_names)\n print(\"Loaded all models. Beginning evaluation / display of trajectory predictions\")\n trajnetpp = not args.fixed_len and not args.variable_len\n observed_trajectories, observed_displacements, ground_truth_trajectories = [], [], []\n models_prediction = []\n ades_per_method, fdes_per_method = [], []\n best_model_ade, best_model_fde, worst_model_ade, worst_model_fde = [], [], [], []\n start_end_list = []\n num_batches = sum([len(loader) for loader in test_loaders])\n curr_batch = 0\n for loader_idx, loader in enumerate(test_loaders):\n for batch in loader:\n curr_batch += 1\n (obs_traj, pred_traj_gt, obs_traj_rel, pred_traj_rel_gt, metadata, loss_mask, seq_start_end) = batch\n print(f'\\rBatch {curr_batch}/{num_batches}', end='')\n if trajnetpp:\n # only for primary pedestrians\n observed_trajectories.append(obs_traj[:, seq_start_end[:, 0]])\n observed_displacements.append(obs_traj_rel[:, seq_start_end[:, 0]])\n ground_truth_trajectories.append(pred_traj_gt[:, seq_start_end[:, 0]])\n else:\n observed_trajectories.append(obs_traj)\n observed_displacements.append(obs_traj_rel)\n ground_truth_trajectories.append(pred_traj_gt)\n start_end_list.append(seq_start_end)\n models_prediction.append(torch.tensor([], device=device))\n ades_per_method.append(torch.tensor([], device=device))\n fdes_per_method.append(torch.tensor([], device=device))\n for idx, model in enumerate(models):\n if args.num_samples > 1:\n raise Exception('TODO! Displaying multiple predictions for same pedestrian not implemented yet')\n else:\n obs_traj_seq = obs_traj if input_types[idx] == TrajectoryType.ABS else obs_traj_rel\n pred_traj_len = pred_traj_gt.shape[0]\n prediction = get_or_compute_predictions(model, loader_idx, train_args[idx], input_types[idx],\n output_types[idx], obs_traj_seq, obs_traj, seq_start_end,\n metadata, pred_traj_len)\n pred_seq, ade, fde = __batch_metrics_unimodal__(args, prediction, output_types[idx], obs_traj,\n pred_traj_gt, pred_traj_len, obs_traj_rel,\n seq_start_end)\n if trajnetpp:\n # only for primary pedestrians\n pred_seq = pred_seq[:, seq_start_end[:, 0]]\n models_prediction[-1] = torch.cat((models_prediction[-1], pred_seq[:, :, :2].unsqueeze(0)), dim=0)\n ades_per_method[-1] = torch.cat((ades_per_method[-1], ade.unsqueeze(0)), dim=0)\n fdes_per_method[-1] = torch.cat((fdes_per_method[-1], fde.unsqueeze(0)), dim=0)\n best_model_ade.append(torch.argmin(ades_per_method[-1], dim=0))\n best_model_fde.append(torch.argmin(fdes_per_method[-1], dim=0))\n worst_model_ade.append(torch.argmax(ades_per_method[-1], dim=0))\n worst_model_fde.append(torch.argmax(fdes_per_method[-1], dim=0))\n if args.all_prediction_per_method:\n display_all_predictions_per_method(args, args.model_labels, observed_trajectories, models_prediction)\n else:\n display_predictions_for_methods(args, device, args.model_labels, observed_trajectories, observed_displacements,\n ground_truth_trajectories, models_prediction, best_model_ade, best_model_fde,\n worst_model_ade, worst_model_fde, start_end_list)\n\n\ndef display_all_predictions_per_method(args, model_labels, observed_trajectories, models_prediction):\n \"\"\"\n Create a total of \"num_models\" plots, each containing the entirety of all predicted trajectories. Useful to see if\n the overall prediction horizon differs to a great extend from the GT\n :param args: command line arguments to regulate plots\n :param model_labels: list of length num_models. Labels to give to each model/plot\n :param observed_trajectories: All observed trajectories\n :param models_prediction: All model predictions\n :return: nothing, plotting is done here\n \"\"\"\n if args.plot_limits and len(args.plot_limits) == 4:\n x_limits, y_limits = args.plot_limits[:2], args.plot_limits[2:]\n else:\n x_limits, y_limits = None, None\n if args.environment_location:\n # also display the static environment - in the background\n environment = Environment.load(args.environment_location)\n environment_plot = Environment(copy.deepcopy(environment.obstacles), copy.deepcopy(environment.scene_bounds))\n environment_plot.change(args.switch_x_y, args.invert_x, args.invert_y)\n else:\n environment = environment_plot = None\n color_cycle = plt.rcParams['axes.prop_cycle'].by_key()['color']\n obs_color, model_colors = color_cycle[0], color_cycle[1:]\n for model_idx, model_label in enumerate(model_labels):\n plt.figure()\n plt.title(f'All predictions for model {model_label}', fontdict={'fontsize': 14})\n legend_shown = False\n num_collisions_with_env = num_trajectories = 0\n if environment:\n environment_plot.plot(plt)\n for idx in range(len(observed_trajectories)):\n last_obs = observed_trajectories[idx][-1, :, :].clone().unsqueeze(0)\n obs_trajs = observed_trajectories[idx]\n num_trajectories += obs_trajs.shape[1]\n # append last observed position to make the trajectory a single dash (not have the gap between last\n # observed position and first predicted position)\n predictions = torch.cat((last_obs, models_prediction[idx][model_idx, :, :, :]), dim=0)\n if environment:\n num_collisions_with_env += torch.sum(\n environment.compute_collisions(predictions, combine_cse_osb=True)).cpu().detach().data\n predictions = predictions\n for p in range(obs_trajs.shape[1]):\n if legend_shown:\n plt.plot(obs_trajs[:, p, 0], obs_trajs[:, p, 1], linewidth=1, color=obs_color)\n plt.plot(predictions[:, p, 0], predictions[:, p, 1], linewidth=1, color=model_colors[model_idx])\n else:\n legend_shown = True\n plt.plot(obs_trajs[:, p, 0], obs_trajs[:, p, 1], linewidth=1, color=obs_color, label='OBS')\n plt.plot(predictions[:, p, 0], predictions[:, p, 1], linewidth=1, color=model_colors[model_idx],\n label=model_label)\n plt.scatter(obs_trajs[:, p, 0], obs_trajs[:, p, 1], s=50, alpha=0.7, color=obs_color)\n plt.scatter(predictions[1:, p, 0], predictions[1:, p, 1], s=50, alpha=0.7,\n color=model_colors[model_idx])\n plt.xlabel(f'x ({args.units})', fontdict={'fontsize': 14})\n plt.ylabel(f'y ({args.units})', fontdict={'fontsize': 14})\n plt.xticks(fontsize=14)\n plt.yticks(fontsize=14)\n if environment:\n print(f'Model {model_label} environment collisions: {num_collisions_with_env}; '\n f'Average per trajectory: {num_collisions_with_env / float(num_trajectories):.2f}')\n if x_limits and y_limits:\n plt.xlim(x_limits)\n plt.ylim(y_limits)\n plt.legend(loc=args.legend_location)\n plt.show()\n\n\ndef display_predictions_for_methods(args, device, model_labels, observed_trajectories, observed_displacements,\n ground_truth_trajectories, models_prediction, best_model_ade, best_model_fde,\n worst_model_ade, worst_model_fde, start_end_list):\n \"\"\"\n Plots all predictions from the supplied methods/models, one trajectory per plot.\n Will also print information to the command line, indicating for each model, how much of their predictions result\n in the best/worst ADE/FDE.\n :param args: command line arguments to configure the plots\n :param device: torch.device (cpu or cuda) to map tensors to\n :param model_labels: list with labels identifying each model\n :param observed_trajectories: all observed trajectories, in absolute positions\n :param observed_displacements: all observed trajectories, in terms of relative displacements (or velocities)\n :param ground_truth_trajectories: all future GT trajectories, in absolute positions\n :param models_prediction: all future predicted trajectories, for\n :param best_model_ade: the index for the model with best ADE, per trajectory\n :param best_model_fde: the index for the model with best FDE, per trajectory\n :param worst_model_ade: the index for the model with worst ADE, per trajectory\n :param worst_model_fde: the index for the model with worst FDE, per trajectory\n :param start_end_list: delimiters for the beginning and end of each situation or set of trajectories.\n :return: nothing, plots will be done here.\n \"\"\"\n num_trajectories_discarded = 0\n num_trajectories_plotted = 0\n num_models = len(model_labels)\n num_best_ade, num_best_fde, num_worst_ade, num_worst_fde = [0] * num_models, [0] * num_models, [0] * num_models, \\\n [0] * num_models\n if args.environment_location:\n # also display the static environment - in the background\n environment = Environment.load(args.environment_location)\n environment_plot = Environment(copy.deepcopy(environment.obstacles), copy.deepcopy(environment.scene_bounds))\n environment_plot.change(args.switch_x_y, args.invert_x, args.invert_y)\n else:\n environment = environment_plot = None\n if args.plot_limits and len(args.plot_limits) == 4:\n x_limits, y_limits = args.plot_limits[:2], args.plot_limits[2:]\n else:\n x_limits, y_limits = None, None\n for idx, batch_obs in enumerate(observed_displacements):\n for batch_idx, obs_vel in enumerate(batch_obs.permute(1, 0, 2)):\n gt = ground_truth_trajectories[idx][:, batch_idx, :]\n if torch.all(torch.abs(torch.sum(obs_vel, dim=0)) <= args.displacement_threshold):\n # small displacement below specified threshold (e.g. pedestrian is stopped)\n num_trajectories_discarded += 1\n continue\n if obs_vel.shape[0] + gt.shape[0] <= args.length_threshold:\n continue\n last_obs = observed_trajectories[idx][-1, batch_idx, :].clone().unsqueeze(0)\n obs_traj = observed_trajectories[idx][:, batch_idx, :]\n # append last observed position to make the trajectory a single dash (not have the gap between last\n # observed position and first predicted position)\n gt = torch.cat((last_obs, gt), dim=0)\n # tensor of shape (num_models, traj_len, 2); the permute below is for num_models to simulate 'batch'\n predictions = torch.cat((last_obs.unsqueeze(0).repeat(num_models, 1, 1),\n models_prediction[idx][:, :, batch_idx, :]), dim=1)\n if environment and args.only_plot_collisions_static and torch.sum(\n environment.compute_collisions(predictions[:, :, 0].permute(1, 0, 2), combine_cse_osb=True)) == 0:\n continue # all primary pedestrian predictions comply with the scene environment\n predictions = predictions\n best_ade, best_fde, worst_ade, worst_fde = best_model_ade[idx][batch_idx], best_model_fde[idx][batch_idx], \\\n worst_model_ade[idx][batch_idx], worst_model_fde[idx][batch_idx]\n num_best_ade[best_ade] += 1\n num_best_fde[best_fde] += 1\n num_worst_ade[worst_ade] += 1\n num_worst_fde[worst_fde] += 1\n best_model_ade_label = model_labels[best_ade]\n # best_model_fde_label = model_labels[best_fde]\n worst_model_ade_label = model_labels[worst_ade]\n # worst_model_fde_label = model_labels[worst_fde]\n obs_traj, gt, predictions = apply_plot_args_to_trajectories(args, obs_traj, gt, predictions)\n plt.figure()\n if environment_plot:\n environment_plot.plot(plt)\n # TODO - allow background as a certain scene image\n \"\"\"\n plt.title(f'Best prediction from {best_model_ade_label}, worse from {worst_model_ade_label}',\n fontdict={'fontsize': 14})\n \"\"\"\n plt.plot(obs_traj[:, 0], obs_traj[:, 1], linewidth=3, label='OBS')\n plt.scatter(obs_traj[:, 0], obs_traj[:, 1], s=50, alpha=0.7)\n plt.plot(gt[:, 0], gt[:, 1], linewidth=3, label='GT')\n plt.scatter(gt[1:, 0], gt[1:, 1], s=50, alpha=0.7)\n for label_idx, pred in enumerate(predictions):\n plt.plot(pred[:, 0], pred[:, 1], linewidth=3, label=model_labels[label_idx])\n plt.scatter(pred[1:, 0], pred[1:, 1], s=50, alpha=0.7)\n if x_limits and y_limits:\n plt.xlim(x_limits)\n plt.ylim(y_limits)\n plt.xlabel(f'x ({args.units})', fontdict={'fontsize': 14})\n plt.ylabel(f'y ({args.units})', fontdict={'fontsize': 14})\n plt.xticks(fontsize=14)\n plt.yticks(fontsize=14)\n plt.legend(loc=args.legend_location)\n if args.plot_title is not None and args.plot_title:\n plt.title(args.plot_title, fontdict={'fontsize': 18})\n num_trajectories_plotted += 1\n if num_trajectories_plotted >= args.max_trajectories:\n print(f'Reached {args.max_trajectories} plots')\n for m_idx, label in enumerate(model_labels):\n print(f\"Model {label} predictions (from the plots): {num_best_ade[m_idx]} with best ADE, \"\n f\"{num_best_fde[m_idx]} with best FDE, {num_worst_ade[m_idx]} with worst ADE, \"\n f\"{num_worst_fde[m_idx]} with worst FDE.\")\n plt.show()\n return\n print(\"\")\n for idx, label in enumerate(model_labels):\n print(f\"Model {label} predictions (from the plots): {num_best_ade[idx]} with best ADE, {num_best_fde[idx]} \"\n f\"with best FDE, {num_worst_ade[idx]} with worst ADE, {num_worst_fde[idx]} with worst FDE.\")\n plt.show()\n\n\ndef load_models(args, device, data_file_names):\n \"\"\"\n load all models to plot predictions, or files with pre-computed predictions\n :param args: command line arguments with information and configuration options to load models\n :param device: torch.device to map the models to (e.g. map to cpu or cuda), if required\n :param data_file_names: names of the original data files with GT data. For the case of loading pre-computed\n predictions from files, checks if the names match.\n :return: 4 lists, all of the same length (number of models), containing in each element:\n 1. the model\n 2. train arguments used to create / train the model (if applicable)\n 3. type of input it expects (ABSOLUTE POSITIONS, VELOCITIES, ACCELERATIONS)\n 4. type outputted by the model (ABSOLUTE POSITIONS, VELOCITIES, ACCELERATIONS)\n \"\"\"\n models, train_args, input_types, output_types = [], [], [], []\n for idx, label in enumerate(args.model_labels):\n # first, check if the file has extension .ndjson - means it consists of pre-computed Trajnet++ predictions\n if args.model_paths[idx].endswith('ndjson'):\n if args.fixed_len or args.variable_len:\n raise Exception(f'Prediction file ({args.model_paths[idx]}) is only supported for Trajnet++ data')\n test_dir_cache = args.test_dir\n args.test_dir = args.model_paths[idx]\n loaders_pred, pred_file_names = load_test_data(args, device, load_pred=True)\n assert len(pred_file_names) == len(data_file_names) and \\\n [p == d for (p, d) in zip(pred_file_names, data_file_names)], \\\n f'The provided path to load prediction ({args.model_paths[idx]}) does not match the provided ' \\\n f'data path ({test_dir_cache})'\n args.test_dir = test_dir_cache\n # need to convert to iter to get one batch a time\n models.append([iter(loader) for loader in loaders_pred])\n train_args.append(None)\n input_types.append(TrajectoryType.ABS)\n output_types.append(TrajectoryType.ABS)\n # otherwise - it is an actual model\n elif 'lstm' in label.lower():\n args.model_path = args.model_paths[idx]\n model, _, train_arg = load_model_s(args, device, ModelType.LSTM)\n assert len(model) == 1, f'You cannot supply a directory, ({args.model_path})' \\\n 'the path to the model must be a single file, that ' 'must exist!'\n model, train_arg = model[0], train_arg[0]\n use_acceleration = train_arg.use_acc if hasattr(train_args, 'use_acc') else False\n if 'interaction' in model.__class__.__name__.lower() or \\\n 'social' in model.__class__.__name__.lower(): # interaction-aware model\n input_type = TrajectoryType.ABS\n output_type = TrajectoryType.VEL\n elif 'fields' in model.__class__.__name__.lower(): # model uses motion fields\n input_type = TrajectoryType.ABS\n output_type = TrajectoryType.VEL\n else:\n input_type = output_type = map_traj_type(train_arg.use_abs, use_acceleration)\n models.append(model)\n train_args.append(train_arg)\n input_types.append(input_type)\n output_types.append(output_type)\n elif label == sparse_motion_fields_label:\n args.model_path = args.model_paths[idx]\n model, _, _ = load_model_s(args, device, ModelType.SMF)\n assert len(model) == 1, f'You cannot supply a directory, ({args.model_path})' \\\n 'the path to the model must be a single file, that ' 'must exist!'\n models.append(model[0])\n train_args.append(None)\n input_types.append(TrajectoryType.ABS)\n output_types.append(TrajectoryType.ABS)\n else: # == const_vel_label\n models.append(predict_const_vel)\n train_args.append(None)\n input_types.append(TrajectoryType.ABS)\n output_types.append(TrajectoryType.ABS)\n return models, train_args, input_types, output_types\n\n\nif __name__ == '__main__':\n arguments = parser.parse_args()\n if hasattr(arguments, 'load_args_from_json') and arguments.load_args_from_json:\n new_args = override_args_from_json(arguments, arguments.load_args_from_json, parser)\n else:\n new_args = arguments\n main(new_args)\n" ]
[ [ "torch.device", "torch.cuda.get_device_properties", "torch.is_tensor", "torch.cuda.is_available" ], [ "matplotlib.pyplot.legend", "matplotlib.pyplot.yticks", "matplotlib.pyplot.title", "torch.cat", "matplotlib.pyplot.scatter", "matplotlib.pyplot.ylim", "torch.argmax", "torch.argmin", "torch.sum", "torch.tensor", "matplotlib.pyplot.plot", "matplotlib.pyplot.xlim", "matplotlib.pyplot.ylabel", "torch.device", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.xticks", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
zhangyiwen5512/pytorch-faster-rcnn
[ "64b15d3ee4bc2ec62eaab4af63e978a31a0ddbfe", "64b15d3ee4bc2ec62eaab4af63e978a31a0ddbfe" ]
[ "lib/nets/VGG16.py", "lib/nets/network.py" ]
[ "import torch.nn as nn\nimport torch.utils.model_zoo as model_zoo\nimport math\nimport numpy as np\n\n__all__ = [\n 'VGG', 'vgg11', 'vgg11_bn', 'vgg13', 'vgg13_bn', 'vgg16', 'vgg16_bn',\n 'vgg19_bn', 'vgg19',\n]\n\n\nmodel_urls = {\n 'vgg11': 'https://download.pytorch.org/models/vgg11-bbd30ac9.pth',\n 'vgg13': 'https://download.pytorch.org/models/vgg13-c768596a.pth',\n 'vgg16': 'https://download.pytorch.org/models/vgg16-397923af.pth',\n 'vgg19': 'https://download.pytorch.org/models/vgg19-dcbb9e9d.pth',\n 'vgg11_bn': 'https://download.pytorch.org/models/vgg11_bn-6002323d.pth',\n 'vgg13_bn': 'https://download.pytorch.org/models/vgg13_bn-abd245e5.pth',\n 'vgg16_bn': 'https://download.pytorch.org/models/vgg16_bn-6c64b313.pth',\n 'vgg19_bn': 'https://download.pytorch.org/models/vgg19_bn-c79401a0.pth',\n}\n\n\nclass VGG(nn.Module):\n\n def __init__(self, features, num_classes=1000, init_weights=True):\n super(VGG, self).__init__()\n self.features = features\n self.classifier = nn.Sequential(\n nn.Linear(512 * 7 * 7, 4096),\n nn.ReLU(True),\n nn.Dropout(),\n nn.Linear(4096, 4096),\n nn.ReLU(True),\n nn.Dropout(),\n nn.Linear(4096, num_classes),\n )\n\n\n if init_weights:\n self._initialize_weights()\n\n def forward(self, x):\n x = self.features(x)\n x = x.view(x.size(0), -1)\n\n\n##############################\n# x = self.classifier(x)\n###############################\n lam = cfg.lamda\n print(lam)\n x = self.classifier.children()[0](x)# linear1\n x = self.classifier.children()[1](x)# relu1\n x = self.classifier.children()[2](x)# dropout1\n if cfg.MIX_LOCATION == 1 and cfg.layer4 == True:\n print('loc1')\n rcnn_index = np.arange(x.size()[0])\n np.random.shuffle(rcnn_index)\n self.rcnn_mix_index = rcnn_index\n x = lam * x + (1 - lam) * x[rcnn_index, :]\n\n x = self.classifier.children()[3](x)# linear2\n x = self.classifier.children()[4](x)# relu2\n x = self.classifier.children()[5](x)# dropout2\n if cfg.MIX_LOCATION == 2 and cfg.layer4 == True:\n print('loc2')\n rcnn_index = np.arange(x.size()[0])\n np.random.shuffle(rcnn_index)\n self.rcnn_mix_index = rcnn_index\n x = lam * x + (1 - lam) * x[rcnn_index, :]\n\n x = self.classifier.children()[6](x)# linear3\n if cfg.MIX_LOCATION == 3 and cfg.layer4 == True:\n print('loc2')\n rcnn_index = np.arange(x.size()[0])\n np.random.shuffle(rcnn_index)\n self.rcnn_mix_index = rcnn_index\n x = lam * x + (1 - lam) * x[rcnn_index, :]\n\n ######\n return x\n\n def _initialize_weights(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n if m.bias is not None:\n m.bias.data.zero_()\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n elif isinstance(m, nn.Linear):\n m.weight.data.normal_(0, 0.01)\n m.bias.data.zero_()\n\n\ndef make_layers(cfg, batch_norm=False):\n layers = []\n in_channels = 3\n for v in cfg:\n if v == 'M':\n layers += [nn.MaxPool2d(kernel_size=2, stride=2)]\n else:\n conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)\n if batch_norm:\n layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]\n else:\n layers += [conv2d, nn.ReLU(inplace=True)]\n in_channels = v\n return nn.Sequential(*layers)\n\n\ncfg = {\n 'A': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],\n 'B': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],\n 'D': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],\n 'E': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],\n}\n\n\ndef vgg11(pretrained=False, **kwargs):\n \"\"\"VGG 11-layer model (configuration \"A\")\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n if pretrained:\n kwargs['init_weights'] = False\n model = VGG(make_layers(cfg['A']), **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['vgg11']))\n return model\n\n\ndef vgg11_bn(pretrained=False, **kwargs):\n \"\"\"VGG 11-layer model (configuration \"A\") with batch normalization\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n if pretrained:\n kwargs['init_weights'] = False\n model = VGG(make_layers(cfg['A'], batch_norm=True), **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['vgg11_bn']))\n return model\n\n\ndef vgg13(pretrained=False, **kwargs):\n \"\"\"VGG 13-layer model (configuration \"B\")\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n if pretrained:\n kwargs['init_weights'] = False\n model = VGG(make_layers(cfg['B']), **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['vgg13']))\n return model\n\n\ndef vgg13_bn(pretrained=False, **kwargs):\n \"\"\"VGG 13-layer model (configuration \"B\") with batch normalization\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n if pretrained:\n kwargs['init_weights'] = False\n model = VGG(make_layers(cfg['B'], batch_norm=True), **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['vgg13_bn']))\n return model\n\n\ndef vgg16(pretrained=False, **kwargs):\n \"\"\"VGG 16-layer model (configuration \"D\")\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n if pretrained:\n kwargs['init_weights'] = False\n model = VGG(make_layers(cfg['D']), **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['vgg16']))\n return model\n\n\ndef vgg16_bn(pretrained=False, **kwargs):\n \"\"\"VGG 16-layer model (configuration \"D\") with batch normalization\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n if pretrained:\n kwargs['init_weights'] = False\n model = VGG(make_layers(cfg['D'], batch_norm=True), **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['vgg16_bn']))\n return model\n\n\ndef vgg19(pretrained=False, **kwargs):\n \"\"\"VGG 19-layer model (configuration \"E\")\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n if pretrained:\n kwargs['init_weights'] = False\n model = VGG(make_layers(cfg['E']), **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['vgg19']))\n return model\n\n\ndef vgg19_bn(pretrained=False, **kwargs):\n \"\"\"VGG 19-layer model (configuration 'E') with batch normalization\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n if pretrained:\n kwargs['init_weights'] = False\n model = VGG(make_layers(cfg['E'], batch_norm=True), **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['vgg19_bn']))\n return model\n", "# --------------------------------------------------------\n# Tensorflow Faster R-CNN\n# Licensed under The MIT License [see LICENSE for details]\n# Written by Xinlei Chen\n# --------------------------------------------------------\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport math\nimport numpy as np\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\n\nimport utils.timer\n\nfrom layer_utils.snippets import generate_anchors_pre\nfrom layer_utils.proposal_layer import proposal_layer\nfrom layer_utils.proposal_top_layer import proposal_top_layer\nfrom layer_utils.anchor_target_layer import anchor_target_layer\nfrom layer_utils.proposal_target_layer import proposal_target_layer\nfrom utils.visualization import draw_bounding_boxes\n\nfrom layer_utils.roi_pooling.roi_pool import RoIPoolFunction\nfrom layer_utils.roi_align.crop_and_resize import CropAndResizeFunction\n\nfrom model.config import cfg\n\nimport tensorboardX as tb\n\nfrom scipy.misc import imresize\n\nclass Network(nn.Module):\n def __init__(self):\n nn.Module.__init__(self)# 此处初始化resnet属性self。net是一个module的子类\n self._predictions = {}\n self._losses = {}\n# if cfg.TRAIN.IMS_PER_BATCH == 2:\n # self._RPN_losses = {}\n self._anchor_targets = {}\n self._proposal_targets = {}\n self._layers = {}\n self._gt_image = None\n self._act_summaries = {}\n self._score_summaries = {}\n self._event_summaries = {}\n self._image_gt_summaries = {}\n self._variables_to_fix = {}\n self._device = 'cuda'\n\n def _add_gt_image(self):\n # add back mean\n image = self._image_gt_summaries['image'] + cfg.PIXEL_MEANS\n image = imresize(image[0], self._im_info[:2] / self._im_info[2])\n # BGR to RGB (opencv uses BGR)############################################################################\n self._gt_image = image[np.newaxis, :,:,::-1].copy(order='C')\n\n def _add_gt_image_summary(self):\n # use a customized visualization function to visualize the boxes\n self._add_gt_image()\n image = draw_bounding_boxes(\\\n self._gt_image, self._image_gt_summaries['gt_boxes'], self._image_gt_summaries['im_info'])\n#############################################################################################################################################\n# print(image[0].astype('float64'))\n return tb.summary.image('GROUND_TRUTH', image[0].astype('float64')/255.0)\n\n def _add_act_summary(self, key, tensor):\n return tb.summary.histogram('ACT/' + key + '/activations', tensor.data.cpu().numpy(), bins='auto'),\n tb.summary.scalar('ACT/' + key + '/zero_fraction',\n (tensor.data == 0).float().sum() / tensor.numel())\n\n def _add_score_summary(self, key, tensor):\n return tb.summary.histogram('SCORE/' + key + '/scores', tensor.data.cpu().numpy(), bins='auto')\n\n def _add_train_summary(self, key, var):\n return tb.summary.histogram('TRAIN/' + key, var.data.cpu().numpy(), bins='auto')\n\n def _proposal_top_layer(self, rpn_cls_prob, rpn_bbox_pred):\n rois, rpn_scores = proposal_top_layer(\\\n rpn_cls_prob, rpn_bbox_pred, self._im_info,\n self._feat_stride, self._anchors, self._num_anchors)\n return rois, rpn_scores\n\n def _proposal_layer(self, rpn_cls_prob, rpn_bbox_pred):\n #[2000,5] [x1,y1,x2,y2] [2000,1]\n rois, rpn_scores = proposal_layer(\\\n rpn_cls_prob, rpn_bbox_pred, self._im_info, self._mode,\n self._feat_stride, self._anchors, self._num_anchors)\n\n return rois, rpn_scores\n\n def _roi_pool_layer(self, bottom, rois):\n return RoIPoolFunction(cfg.POOLING_SIZE, cfg.POOLING_SIZE, 1. / 16.)(bottom, rois)\n\n def _crop_pool_layer(self, bottom, rois, max_pool=True):\n # implement it using stn\n # box to affine\n # input (x1,y1,x2,y2)\n # ROIpolinglayer :bottom\n \"\"\"\n [ x2-x1 x1 + x2 - W + 1 ]\n [ ----- 0 --------------- ]\n [ W - 1 W - 1 ]\n [ ]\n [ y2-y1 y1 + y2 - H + 1 ]\n [ 0 ----- --------------- ]\n [ H - 1 H - 1 ]\n \"\"\"\n rois = rois.detach()\n\n x1 = rois[:, 1::4] / 16.0#[256,1]\n y1 = rois[:, 2::4] / 16.0#\n x2 = rois[:, 3::4] / 16.0#\n y2 = rois[:, 4::4] / 16.0#\n\n height = bottom.size(2)\n width = bottom.size(3)\n\n # pre_pool_size=7\n pre_pool_size = cfg.POOLING_SIZE * 2 if max_pool else cfg.POOLING_SIZE\n #[256,1024,7,7] 将h×w的roi划分为h/H和w/W的网格,再池化roi到对应网格\n###########################################################################################################\n crops = CropAndResizeFunction(pre_pool_size, pre_pool_size)(bottom, torch.cat([y1/(height-1),x1/(width-1),y2/(height-1),x2/(width-1)], 1), rois[:, 0].int())\n##############################################################################################################\n if max_pool:\n crops = F.max_pool2d(crops, 2, 2)\n\n return crops\n\n def _anchor_target_layer(self, rpn_cls_score):\n rpn_labels, rpn_bbox_targets, rpn_bbox_inside_weights, rpn_bbox_outside_weights = \\\n anchor_target_layer(\n rpn_cls_score.data, self._gt_boxes.data.cpu().numpy(), self._im_info, self._feat_stride, self._anchors.data.cpu().numpy(), self._num_anchors)\n ##[1,1,A * height,width]标签 [1,height,width ,9*4]回归 [1,height,width ,9*4] [1,height,width ,9*4] 转化为numpy\n rpn_labels = torch.from_numpy(rpn_labels).float().to(self._device) #.set_shape([1, 1, None, None])\n rpn_bbox_targets = torch.from_numpy(rpn_bbox_targets).float().to(self._device)#.set_shape([1, None, None, self._num_anchors * 4])\n rpn_bbox_inside_weights = torch.from_numpy(rpn_bbox_inside_weights).float().to(self._device)#.set_shape([1, None, None, self._num_anchors * 4])\n rpn_bbox_outside_weights = torch.from_numpy(rpn_bbox_outside_weights).float().to(self._device)#.set_shape([1, None, None, self._num_anchors * 4])\n\n rpn_labels = rpn_labels.long()\n self._anchor_targets['rpn_labels'] = rpn_labels\n self._anchor_targets['rpn_bbox_targets'] = rpn_bbox_targets\n self._anchor_targets['rpn_bbox_inside_weights'] = rpn_bbox_inside_weights\n self._anchor_targets['rpn_bbox_outside_weights'] = rpn_bbox_outside_weights\n##############################################################################################################\n if cfg.TRAIN.IMS_PER_BATCH == 2 :\n ################################################################### ??????\n rpn_labels2, rpn_bbox_targets2, rpn_bbox_inside_weights2, rpn_bbox_outside_weights2 = \\\n anchor_target_layer(\n rpn_cls_score.data, self._gt_boxes2.data.cpu().numpy(), self._im_info, self._feat_stride, self._anchors.data.cpu().numpy(), self._num_anchors)\n ##[1,1,A * height,width]标签 [1,height,width ,9*4]回归 [1,height,width ,9*4] [1,height,width ,9*4] 转化为numpy\n#########################################################################???????????\n\n rpn_labels2 = torch.from_numpy(rpn_labels2).float().to(self._device) #.set_shape([1, 1, None, None])\n rpn_bbox_targets2 = torch.from_numpy(rpn_bbox_targets2).float().to(self._device)#.set_shape([1, None, None, self._num_anchors * 4])\n rpn_bbox_inside_weights2 = torch.from_numpy(rpn_bbox_inside_weights2).float().to(self._device)#.set_shape([1, None, None, self._num_anchors * 4])\n rpn_bbox_outside_weights2 = torch.from_numpy(rpn_bbox_outside_weights2).float().to(self._device)#.set_shape([1, None, None, self._num_anchors * 4])\n\n rpn_labels2 = rpn_labels2.long()\n self._anchor_targets['rpn_labels2'] = rpn_labels2\n self._anchor_targets['rpn_bbox_targets2'] = rpn_bbox_targets2\n self._anchor_targets['rpn_bbox_inside_weights2'] = rpn_bbox_inside_weights2\n self._anchor_targets['rpn_bbox_outside_weights2'] = rpn_bbox_outside_weights2\n\n##############################################################################################################\n for k in self._anchor_targets.keys():\n self._score_summaries[k] = self._anchor_targets[k]\n\n return rpn_labels\n\n def _proposal_target_layer(self, rois, roi_scores):\n #[256, 5],[256],[256, 1],[256, 84],[256, 84],[256, 84] #weights,前景为1,背景为0\n rois, roi_scores, labels, bbox_targets, bbox_inside_weights, bbox_outside_weights = \\\n proposal_target_layer(\n rois, roi_scores, self._gt_boxes, self._num_classes)\n\n self._proposal_targets['rois'] = rois\n self._proposal_targets['labels'] = labels.long()\n self._proposal_targets['bbox_targets'] = bbox_targets\n self._proposal_targets['bbox_inside_weights'] = bbox_inside_weights\n self._proposal_targets['bbox_outside_weights'] = bbox_outside_weights\n\n for k in self._proposal_targets.keys():\n self._score_summaries[k] = self._proposal_targets[k]\n\n return rois, roi_scores\n\n def _anchor_component(self, height, width):\n # just to get the shape right\n # 找出anchorbox,是合成的读取的图片的height和width\n #height = int(math.ceil(self._im_info.data[0, 0] / self._feat_stride[0]))\n #width = int(math.ceil(self._im_info.data[0, 1] / self._feat_stride[0]))\n #转到snippets。py\n anchors, anchor_length = generate_anchors_pre(\\\n height, width,\n self._feat_stride, self._anchor_scales, self._anchor_ratios)\n #得到9×k个anchor,送到GPU\n self._anchors = torch.from_numpy(anchors).to(self._device)\n self._anchor_length = anchor_length\n\n def _smooth_l1_loss(self, bbox_pred, bbox_targets, bbox_inside_weights, bbox_outside_weights, sigma=1.0, dim=[1]):\n sigma_2 = sigma ** 2 # 9 sigma=3\n box_diff = bbox_pred - bbox_targets# 预测和gt的差值\n in_box_diff = bbox_inside_weights * box_diff# 前景则为box_diff\n abs_in_box_diff = torch.abs(in_box_diff)\n smoothL1_sign = (abs_in_box_diff < 1. / sigma_2).detach().float()# <差值 1/9 为正号,大于为0\n #smoothL1_sign表示绝对值小于sigma_2 1-smoothL1_sign表示else\n in_loss_box = torch.pow(in_box_diff, 2) * (sigma_2 / 2.) * smoothL1_sign \\\n + (abs_in_box_diff - (0.5 / sigma_2)) * (1. - smoothL1_sign)\n out_loss_box = bbox_outside_weights * in_loss_box\n loss_box = out_loss_box\n #减序列\n for i in sorted(dim, reverse=True):\n loss_box = loss_box.sum(i)\n\n loss_box = loss_box.mean()\n return loss_box\n\n################################################################################################################loss计算\n def _add_losses(self, sigma_rpn=3.0):\n if cfg.TRAIN.IMS_PER_BATCH == 1:\n # RPN, class loss\n rpn_cls_score = self._predictions['rpn_cls_score_reshape'].view(-1, 2)#[前景loss,背景loss][Anchorsize*width*height]个anchor\n rpn_label = self._anchor_targets['rpn_labels'].view(-1)\n rpn_select = (rpn_label.data != -1).nonzero().view(-1)#选取的前景及背景\n rpn_cls_score = rpn_cls_score.index_select(0, rpn_select).contiguous().view(-1, 2)#[256,gt]\n rpn_label = rpn_label.index_select(0, rpn_select).contiguous().view(-1)#[256]\n # 是rpn部分的loss\n rpn_cross_entropy = F.cross_entropy(rpn_cls_score, rpn_label)\n\n # RPN, bbox loss\n rpn_bbox_pred = self._predictions['rpn_bbox_pred']# batch * h * w * (num_anchors*4) 回归框预测的坐标\n rpn_bbox_targets = self._anchor_targets['rpn_bbox_targets']# [1,height,width ,9*4] 回归框目标的坐标(和gt的回归值)\n rpn_bbox_inside_weights = self._anchor_targets['rpn_bbox_inside_weights']# [1,height,width ,9*4]\n rpn_bbox_outside_weights = self._anchor_targets['rpn_bbox_outside_weights']# [1,height,width ,9*4]\n # 是rpn部分的loss\n rpn_loss_box = self._smooth_l1_loss(rpn_bbox_pred, rpn_bbox_targets, rpn_bbox_inside_weights,\n rpn_bbox_outside_weights, sigma=sigma_rpn, dim=[1, 2, 3])\n elif cfg.TRAIN.IMS_PER_BATCH == 2:\n\n ############ img1\n # RPN, class loss\n rpn_cls_score = self._predictions['rpn_cls_score_reshape'].view(-1, 2)#[前景loss,背景loss][Anchorsize*width*height]个anchor\n rpn_label = self._anchor_targets['rpn_labels'].view(-1)\n rpn_select = (rpn_label.data != -1).nonzero().view(-1)#选取的前景及背景\n rpn_cls_score = rpn_cls_score.index_select(0, rpn_select).contiguous().view(-1, 2)#[256,gt]\n rpn_label = rpn_label.index_select(0, rpn_select).contiguous().view(-1)#[256]\n # 是rpn部分的loss\n rpn_cross_entropy1 = F.cross_entropy(rpn_cls_score, rpn_label)\n\n # RPN, bbox loss\n rpn_bbox_pred = self._predictions['rpn_bbox_pred']# batch * h * w * (num_anchors*4) 回归框预测的坐标\n rpn_bbox_targets = self._anchor_targets['rpn_bbox_targets']# [1,height,width ,9*4] 回归框目标的坐标(和gt的回归值)\n rpn_bbox_inside_weights = self._anchor_targets['rpn_bbox_inside_weights']# [1,height,width ,9*4]\n rpn_bbox_outside_weights = self._anchor_targets['rpn_bbox_outside_weights']# [1,height,width ,9*4]\n # 是rpn部分的loss\n rpn_loss_box1 = self._smooth_l1_loss(rpn_bbox_pred, rpn_bbox_targets, rpn_bbox_inside_weights,\n rpn_bbox_outside_weights, sigma=sigma_rpn, dim=[1, 2, 3])\n\n ############img2\n # RPN, class loss\n rpn_label2 = self._anchor_targets['rpn_labels2'].view(-1)\n rpn_select2 = (rpn_label2.data != -1).nonzero().view(-1)#选取的前景及背景\n rpn_cls_score = self._predictions['rpn_cls_score_reshape'].view(-1, 2)#[前景loss,背景loss][Anchorsize*width*height]个anchor\n rpn_cls_score2 = rpn_cls_score.index_select(0, rpn_select2).contiguous().view(-1, 2)#[256,gt]\n rpn_label2 = rpn_label2.index_select(0, rpn_select2).contiguous().view(-1)#[256]\n # 是rpn部分的loss\n rpn_cross_entropy2 = F.cross_entropy(rpn_cls_score2, rpn_label2)\n\n # RPN, bbox loss\n rpn_bbox_targets2 = self._anchor_targets['rpn_bbox_targets2']# [1,height,width ,9*4] 回归框目标的坐标(和gt的回归值)\n rpn_bbox_inside_weights2 = self._anchor_targets['rpn_bbox_inside_weights2']# [1,height,width ,9*4]\n rpn_bbox_outside_weights2 = self._anchor_targets['rpn_bbox_outside_weights2']# [1,height,width ,9*4]\n\n # 是rpn部分的loss\n\n rpn_loss_box2 = self._smooth_l1_loss(rpn_bbox_pred, rpn_bbox_targets2, rpn_bbox_inside_weights2,\n rpn_bbox_outside_weights2, sigma=sigma_rpn, dim=[1, 2, 3])\n##############################################3\n lam = cfg.lamda\n rpn_cross_entropy = lam * rpn_cross_entropy1 + (1 - lam) * rpn_cross_entropy2\n rpn_loss_box = lam * rpn_loss_box1 + (1 - lam) * rpn_loss_box2\n else:\n raise Exception(\"check cfg.TRAIN.IMS_PER_BACTH in /lib/model/config.py or experiments/cfgs/*.yml\")\n\n if cfg.loss_strategy == 'RCNN_ONLY' or cfg.loss_strategy == 'RCNN+RPN' or cfg.loss_strategy == 'NOCHANGE':\n # RCNN, class loss\n cls_score = self._predictions[\"cls_score\"]# [256,21]\n label = self._proposal_targets[\"labels\"].view(-1)#[256]\n # RCNN的loss\n cross_entropy = F.cross_entropy(cls_score.view(-1, self._num_classes), label)\n\n # RCNN, bbox loss\n bbox_pred = self._predictions['bbox_pred']# [256,84]\n bbox_targets = self._proposal_targets['bbox_targets']# [256,84]\n bbox_inside_weights = self._proposal_targets['bbox_inside_weights']# [256,84]\n bbox_outside_weights = self._proposal_targets['bbox_outside_weights']# [256,84]\n # RCNN box的loss\n\n loss_box = self._smooth_l1_loss(bbox_pred, bbox_targets, bbox_inside_weights, bbox_outside_weights)\n\n if cfg.loss_strategy == 'RCNN_ONLY' or cfg.loss_strategy == 'RCNN+RPN':\n lam = cfg.lamda\n label2 = self._proposal_targets['labels'][self.rcnn_mix_index, :].view(-1)\n cross_entropy2 = F.cross_entropy(cls_score.view(-1, self._num_classes), label2)\n cross_entropy = lam * cross_entropy + (1 - lam) * cross_entropy2\n\n bbox_targets2 = self._proposal_targets['bbox_targets'][self.rcnn_mix_index, :]\n bbox_inside_weights2 = self._proposal_targets['bbox_inside_weights'][self.rcnn_mix_index, :]\n bbox_outside_weights2 = self._proposal_targets['bbox_outside_weights'][self.rcnn_mix_index, :]\n loss_box2 = self._smooth_l1_loss(bbox_pred, bbox_targets2, bbox_inside_weights2, bbox_outside_weights2)\n loss_box = lam * loss_box + (1 - lam) * loss_box2\n\n if cfg.loss_strategy == 'RPN_ONLY':\n pass\n\n if cfg.loss_strategy == 'RCNN+RPN' or cfg.loss_strategy == 'NOCHANGE':\n self._losses['cross_entropy'] = cross_entropy\n self._losses['loss_box'] = loss_box\n self._losses['rpn_cross_entropy'] = rpn_cross_entropy\n self._losses['rpn_loss_box'] = rpn_loss_box\n\n loss = cross_entropy + loss_box + rpn_cross_entropy + rpn_loss_box\n elif cfg.loss_strategy == 'RPN_ONLY':\n loss = rpn_cross_entropy + rpn_loss_box\n self._losses['rpn_cross_entropy'] = rpn_cross_entropy\n self._losses['rpn_loss_box'] = rpn_loss_box\n\n elif cfg.loss_strategy == 'RCNN_ONLY':\n loss = cross_entropy + loss_box\n self._losses['cross_entropy'] = cross_entropy\n self._losses['loss_box'] = loss_box\n\n else:\n raise Exception(\"check cfg.TRAIN.loss_strategy in /lib/model/config.py or experiments/cfgs/*.yml\")\n\n##################################################################################################################\n self._losses['total_loss'] = loss\n\n for k in self._losses.keys():\n self._event_summaries[k] = self._losses[k]\n\n return loss\n\n def _region_proposal(self, net_conv):\n # 得到RPN网络的结果(做完relu)\n rpn = F.relu(self.rpn_net(net_conv))\n self._act_summaries['rpn'] = rpn\n#---------------------做anchor分数预测--------------------------------\n\n rpn_cls_score = self.rpn_cls_score_net(rpn) # batch * (num_anchors * 2) * h * w 顺序[0,1,2,3][1.18,57,38]\n\n # change it so that the score has 2 as its channel size,(前景,背景)\n rpn_cls_score_reshape = rpn_cls_score.view(1, 2, -1, rpn_cls_score.size()[-1]) # batch * 2 * (num_anchors*h) * w [0,1,2,3] [1.2,513,38] 9*57=513\n #sofamax預測分数\n rpn_cls_prob_reshape = F.softmax(rpn_cls_score_reshape, dim=1)#[1,2,513,38]\n # Move channel to the last dimenstion, to fit the input of python functions\n rpn_cls_prob = rpn_cls_prob_reshape.view_as(rpn_cls_score).permute(0, 2, 3, 1) # batch * h * w * (num_anchors * 2) [1,57,38,18]\n rpn_cls_score = rpn_cls_score.permute(0, 2, 3, 1) # batch * h * w * (num_anchors * 2) [1,57,38,18]\n rpn_cls_score_reshape = rpn_cls_score_reshape.permute(0, 2, 3, 1).contiguous() # batch * (num_anchors*h) * w * 2 [1,513,38,2]\n\n #最终预测结果rpn_cls_pred\n rpn_cls_pred = torch.max(rpn_cls_score_reshape.view(-1, 2), 1)[1]# 9*57*38=19494,是index\n#---------------------做anchor的bounding box预测--------------------------\n rpn_bbox_pred = self.rpn_bbox_pred_net(rpn)\n rpn_bbox_pred = rpn_bbox_pred.permute(0, 2, 3, 1).contiguous() # batch * h * w * (num_anchors*4)\n\n if self._mode == 'TRAIN':\n #RPN(可能性,坐标)得到正式的region 和其得分,从候选的anchor中。[1,57,38,18]\n rois, roi_scores = self._proposal_layer(rpn_cls_prob, rpn_bbox_pred) # rois, roi_scores are varible\n #rois [2000,5] [x1,y1,x2,y2] roi_scores[2000,1]\n #标记正负样本 [1,57,38,18]\n rpn_labels = self._anchor_target_layer(rpn_cls_score)\n #选出用于训练的region #[256, 5],[256]\n rois, _ = self._proposal_target_layer(rois, roi_scores)\n else:\n if cfg.TEST.MODE == 'nms':\n rois, _ = self._proposal_layer(rpn_cls_prob, rpn_bbox_pred)\n elif cfg.TEST.MODE == 'top':\n rois, _ = self._proposal_top_layer(rpn_cls_prob, rpn_bbox_pred)\n else:\n raise NotImplementedError\n\n self._predictions[\"rpn_cls_score\"] = rpn_cls_score\n self._predictions[\"rpn_cls_score_reshape\"] = rpn_cls_score_reshape\n self._predictions[\"rpn_cls_prob\"] = rpn_cls_prob\n self._predictions[\"rpn_cls_pred\"] = rpn_cls_pred\n self._predictions[\"rpn_bbox_pred\"] = rpn_bbox_pred\n self._predictions[\"rois\"] = rois#[x1,y1,x2,y2]\n\n return rois\n\n def _region_classification(self, fc7):\n cls_score = self.cls_score_net(fc7)\n cls_pred = torch.max(cls_score, 1)[1]\n cls_prob = F.softmax(cls_score, dim=1)\n bbox_pred = self.bbox_pred_net(fc7)\n\n self._predictions[\"cls_score\"] = cls_score\n self._predictions[\"cls_pred\"] = cls_pred\n self._predictions[\"cls_prob\"] = cls_prob\n self._predictions[\"bbox_pred\"] = bbox_pred\n\n return cls_prob, bbox_pred\n\n def _image_to_head(self):\n raise NotImplementedError\n\n def _head_to_tail(self, pool5):\n raise NotImplementedError\n\n def create_architecture(self, num_classes, tag=None,\n anchor_scales=(8, 16, 32), anchor_ratios=(0.5, 1, 2)):\n self._tag = tag\n\n self._num_classes = num_classes\n self._anchor_scales = anchor_scales\n self._num_scales = len(anchor_scales)\n\n self._anchor_ratios = anchor_ratios\n self._num_ratios = len(anchor_ratios)\n\n self._num_anchors = self._num_scales * self._num_ratios\n\n assert tag != None\n\n # Initialize layers\n self._init_modules()\n\n def _init_modules(self):\n self._init_head_tail()#c初始化惹101和vgg16网络\n####################################################################################################################################################\n # rpn\n self.rpn_net = nn.Conv2d(self._net_conv_channels, cfg.RPN_CHANNELS, [3, 3], padding=1)\n\n self.rpn_cls_score_net = nn.Conv2d(cfg.RPN_CHANNELS, self._num_anchors * 2, [1, 1])\n \n self.rpn_bbox_pred_net = nn.Conv2d(cfg.RPN_CHANNELS, self._num_anchors * 4, [1, 1])\n #softmax\n self.cls_score_net = nn.Linear(self._fc7_channels, self._num_classes)\n #bounding box\n self.bbox_pred_net = nn.Linear(self._fc7_channels, self._num_classes * 4)\n#####################################################################################################################################################\n self.init_weights()\n\n def _run_summary_op(self, val=False):\n \"\"\"\n Run the summary operator: feed the placeholders with corresponding newtork outputs(activations)\n \"\"\"\n summaries = []\n # Add image gt\n ######################################################################################################\n summaries.append(self._add_gt_image_summary())\n # Add event_summaries\n for key, var in self._event_summaries.items():\n summaries.append(tb.summary.scalar(key, var.item()))\n self._event_summaries = {}\n if not val:\n # Add score summaries\n for key, var in self._score_summaries.items():\n summaries.append(self._add_score_summary(key, var))\n self._score_summaries = {}\n # Add act summaries\n for key, var in self._act_summaries.items():\n summaries += self._add_act_summary(key, var)\n self._act_summaries = {}\n # Add train summaries\n for k, var in dict(self.named_parameters()).items():\n if var.requires_grad:\n summaries.append(self._add_train_summary(k, var))\n\n self._image_gt_summaries = {}\n \n return summaries\n\n def _predict(self):\n # This is just _build_network in tf-faster-rcnn\n torch.backends.cudnn.benchmark = False\n net_conv = self._image_to_head()\n\n\n # build the anchors for the image 特征图net_conv.size(2)height net_conv.size(3)weight\n #一张图片得到9×K个anchor feature size=(width/stride)*(height/stride) == K\n self._anchor_component(net_conv.size(2), net_conv.size(3))\n\n #--------------------------------RPN---------------------------------------------------------------------\n #得到roi[256, 5][class,x1,y1,x2,y2]\n rois = self._region_proposal(net_conv)\n\n #--------------------------------POLING---------------------------------------------------------------------\n if cfg.loss_strategy == 'RPN_ONLY' and cfg.test == False:##########\n for k in self._predictions.keys():\n self._score_summaries[k] = self._predictions[k]\n return rois, None, None #####################################test error NONE\n\n # 映射\n if cfg.POOLING_MODE == 'crop':#[256,1024,7,7]\n pool5 = self._crop_pool_layer(net_conv, rois)\n else:\n pool5 = self._roi_pool_layer(net_conv, rois)\n\n if cfg.loss_strategy == 'RCNN_ONLY' or cfg.loss_strategy == 'RCNN+RPN':\n# pool5 = pool5.detach()\n lam = cfg.lamda\n rcnn_index = np.arange(pool5.size()[0])\n np.random.shuffle(rcnn_index)\n self.rcnn_mix_index = rcnn_index\n pool5 = lam * pool5 + (1 - lam) * pool5[rcnn_index, :]\n\n if self._mode == 'TRAIN':\n torch.backends.cudnn.benchmark = True # benchmark because now the input size are fixed\n # [256,2048]\n\n # mixup in layer4\n fc7 = self._head_to_tail(pool5)\n #--------------------------------softmax,bouding box --------------------------------------------------------------------\n cls_prob, bbox_pred = self._region_classification(fc7)\n \n for k in self._predictions.keys():\n self._score_summaries[k] = self._predictions[k]\n\n return rois, cls_prob, bbox_pred\n\n def forward(self, image, im_info, gt_boxes=None, gt_boxes2=None, mode='TRAIN'):\n \"\"\"\n\n :param image:\n :param im_info:\n :param gt_boxes:\n :param mode:\n :return:\n \"\"\"\n self._image_gt_summaries['image'] = image\n self._image_gt_summaries['gt_boxes'] = gt_boxes\n self._image_gt_summaries['im_info'] = im_info\n\n self._image = torch.from_numpy(image.transpose([0,3,1,2])).to(self._device)# 通道换位置\n self._im_info = im_info # No need to change; actually it can be an list\n self._gt_boxes = torch.from_numpy(gt_boxes).to(self._device) if gt_boxes is not None else None\n######################################################################################################################\n if cfg.TRAIN.IMS_PER_BATCH == 2 :\n self._gt_boxes2 = torch.from_numpy(gt_boxes2).to(self._device) if gt_boxes2 is not None else None\n######################################################################################################################\n\n self._mode = mode\n #得到roi[256, 5][class,x1,y1,x2,y2]\n #cls_prob[256,21],若是前景则得到其预测分数\n #bbox_pred[256,84],得到其坐标\n rois, cls_prob, bbox_pred = self._predict()\n if mode == 'TEST':\n stds = bbox_pred.data.new(cfg.TRAIN.BBOX_NORMALIZE_STDS).repeat(self._num_classes).unsqueeze(0).expand_as(bbox_pred)\n means = bbox_pred.data.new(cfg.TRAIN.BBOX_NORMALIZE_MEANS).repeat(self._num_classes).unsqueeze(0).expand_as(bbox_pred)\n self._predictions[\"bbox_pred\"] = bbox_pred.mul(stds).add(means)\n else:\n self._add_losses() # compute losses\n\n\n def init_weights(self):\n def normal_init(m, mean, stddev, truncated=False):\n \"\"\"\n weight initalizer: truncated normal and random normal.\n \"\"\"\n # x is a parameter\n if truncated:\n m.weight.data.normal_().fmod_(2).mul_(stddev).add_(mean) # not a perfect approximation\n else:\n m.weight.data.normal_(mean, stddev)\n m.bias.data.zero_()\n \n normal_init(self.rpn_net, 0, 0.01, cfg.TRAIN.TRUNCATED)\n normal_init(self.rpn_cls_score_net, 0, 0.01, cfg.TRAIN.TRUNCATED)\n normal_init(self.rpn_bbox_pred_net, 0, 0.01, cfg.TRAIN.TRUNCATED)\n normal_init(self.cls_score_net, 0, 0.01, cfg.TRAIN.TRUNCATED)\n normal_init(self.bbox_pred_net, 0, 0.001, cfg.TRAIN.TRUNCATED)\n\n # Extract the head feature maps, for example for vgg16 it is conv5_3\n # only useful during testing mode\n def extract_head(self, image):\n feat = self._layers[\"head\"](torch.from_numpy(image.transpose([0,3,1,2])).to(self._device))\n return feat\n\n # only useful during testing mode\n def test_image(self, image, im_info):\n self.eval()\n with torch.no_grad():\n self.forward(image, im_info, None, None, mode='TEST')\n cls_score, cls_prob, bbox_pred, rois = self._predictions[\"cls_score\"].data.cpu().numpy(), \\\n self._predictions['cls_prob'].data.cpu().numpy(), \\\n self._predictions['bbox_pred'].data.cpu().numpy(), \\\n self._predictions['rois'].data.cpu().numpy()\n\n\n return cls_score, cls_prob, bbox_pred, rois\n\n def delete_intermediate_states(self):\n # Delete intermediate result to save memory\n for d in [self._losses, self._predictions, self._anchor_targets, self._proposal_targets]:\n for k in list(d):\n del d[k]\n\n def get_summary(self, blobs):\n self.eval()\n if cfg.TRAIN.IMS_PER_BATCH == 1:\n self.forward(blobs['data'], blobs['im_info'], blobs['gt_boxes'], None)\n if cfg.TRAIN.IMS_PER_BATCH == 2:\n self.forward(blobs['data'], blobs['im_info'], blobs['gt_boxes'], blobs['gt_boxes2'])\n self.train()\n summary = self._run_summary_op(True)\n\n return summary\n\n def train_step(self, blobs, train_op):\n if cfg.TRAIN.IMS_PER_BATCH == 1:\n self.forward(blobs['data'], blobs['im_info'], blobs['gt_boxes'], None)\n if cfg.TRAIN.IMS_PER_BATCH == 2:\n self.forward(blobs['data'], blobs['im_info'], blobs['gt_boxes'], blobs['gt_boxes2'])\n\n if cfg.loss_strategy == 'NOCHANGE' or cfg.loss_strategy == 'RCNN+RPN':\n rpn_loss_cls, rpn_loss_box, loss_cls, loss_box, loss = self._losses[\"rpn_cross_entropy\"].item(), \\\n self._losses['rpn_loss_box'].item(), \\\n self._losses['cross_entropy'].item(), \\\n self._losses['loss_box'].item(), \\\n self._losses['total_loss'].item()\n if cfg.loss_strategy == 'RCNN_ONLY':\n rpn_loss_cls, rpn_loss_box, loss_cls, loss_box, loss = -1, -1, \\\n self._losses['cross_entropy'].item(), \\\n self._losses['loss_box'].item(), \\\n self._losses['total_loss'].item()\n if cfg.loss_strategy == 'RPN_ONLY':\n rpn_loss_cls, rpn_loss_box, loss_cls, loss_box, loss = self._losses[\"rpn_cross_entropy\"].item(), \\\n self._losses['rpn_loss_box'].item(), \\\n -1, -1,\\\n self._losses['total_loss'].item()\n #utils.timer.timer.tic('backward')\n train_op.zero_grad()\n self._losses['total_loss'].backward()\n #utils.timer.timer.toc('backward')\n train_op.step()\n\n self.delete_intermediate_states()\n\n\n return rpn_loss_cls, rpn_loss_box, loss_cls, loss_box, loss\n\n\n\n def train_step_with_summary(self, blobs, train_op):\n if cfg.TRAIN.IMS_PER_BATCH == 1:\n self.forward(blobs['data'], blobs['im_info'], blobs['gt_boxes'], None)\n if cfg.TRAIN.IMS_PER_BATCH == 2:\n self.forward(blobs['data'], blobs['im_info'], blobs['gt_boxes'], blobs['gt_boxes2'])\n if cfg.loss_strategy == 'NOCHANGE' or cfg.loss_strategy == 'RCNN+RPN':\n rpn_loss_cls, rpn_loss_box, loss_cls, loss_box, loss = self._losses[\"rpn_cross_entropy\"].item(), \\\n self._losses['rpn_loss_box'].item(), \\\n self._losses['cross_entropy'].item(), \\\n self._losses['loss_box'].item(), \\\n self._losses['total_loss'].item()\n if cfg.loss_strategy == 'RCNN_ONLY':\n rpn_loss_cls, rpn_loss_box, loss_cls, loss_box, loss = -1, -1, \\\n self._losses['cross_entropy'].item(), \\\n self._losses['loss_box'].item(), \\\n self._losses['total_loss'].item()\n if cfg.loss_strategy == 'RPN_ONLY':\n rpn_loss_cls, rpn_loss_box, loss_cls, loss_box, loss = self._losses[\"rpn_cross_entropy\"].item(), \\\n self._losses['rpn_loss_box'].item(), \\\n -1, -1,\\\n self._losses['total_loss'].item()\n train_op.zero_grad()#\n self._losses['total_loss'].backward()\n train_op.step()\n summary = self._run_summary_op()\n\n self.delete_intermediate_states()\n\n return rpn_loss_cls, rpn_loss_box, loss_cls, loss_box, loss, summary\n\n\n\n def train_step_no_return(self, blobs, train_op):\n if cfg.TRAIN.IMS_PER_BATCH == 1:\n self.forward(blobs['data'], blobs['im_info'], blobs['gt_boxes'], None)\n if cfg.TRAIN.IMS_PER_BATCH == 2:\n self.forward(blobs['data'], blobs['im_info'], blobs['gt_boxes'], blobs['gt_boxes2'])\n train_op.zero_grad()\n self._losses['total_loss'].backward()\n train_op.step()\n self.delete_intermediate_states()\n\n def load_state_dict(self, state_dict):\n \"\"\"\n Because we remove the definition of fc layer in resnet now, it will fail when loading \n the model trained before.\n To provide back compatibility, we overwrite the load_state_dict\n \"\"\"\n nn.Module.load_state_dict(self, {k: state_dict[k] for k in list(self.state_dict())})\n\n" ]
[ [ "torch.nn.Sequential", "torch.nn.Dropout", "torch.nn.Conv2d", "numpy.random.shuffle", "torch.nn.Linear", "torch.nn.MaxPool2d", "torch.nn.BatchNorm2d", "torch.nn.ReLU", "torch.utils.model_zoo.load_url" ], [ "torch.abs", "scipy.misc.imresize", "torch.nn.functional.softmax", "torch.max", "torch.nn.Module.__init__", "torch.cat", "torch.nn.Conv2d", "torch.nn.functional.cross_entropy", "torch.from_numpy", "numpy.random.shuffle", "torch.nn.Linear", "torch.no_grad", "torch.nn.functional.max_pool2d", "torch.pow" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "0.14", "0.15", "1.0", "0.19", "0.18", "1.2", "0.12", "0.10", "0.17", "0.16" ], "tensorflow": [] } ]
justinchuby/onnx
[ "805ae1e634697e37b43701e585c9c253a29ce076", "805ae1e634697e37b43701e585c9c253a29ce076", "805ae1e634697e37b43701e585c9c253a29ce076" ]
[ "onnx/backend/test/case/node/mean.py", "onnx/backend/test/case/node/depthtospace.py", "onnx/backend/test/case/node/blackmanwindow.py" ]
[ "# SPDX-License-Identifier: Apache-2.0\n\nimport numpy as np # type: ignore\n\nimport onnx\nfrom ..base import Base\nfrom . import expect\n\n\nclass Mean(Base):\n\n @staticmethod\n def export() -> None:\n data_0 = np.array([3, 0, 2]).astype(np.float32)\n data_1 = np.array([1, 3, 4]).astype(np.float32)\n data_2 = np.array([2, 6, 6]).astype(np.float32)\n result = np.array([2, 3, 4]).astype(np.float32)\n node = onnx.helper.make_node(\n 'Mean',\n inputs=['data_0', 'data_1', 'data_2'],\n outputs=['result'],\n )\n expect(node, inputs=[data_0, data_1, data_2], outputs=[result],\n name='test_mean_example')\n\n node = onnx.helper.make_node(\n 'Mean',\n inputs=['data_0'],\n outputs=['result'],\n )\n expect(node, inputs=[data_0], outputs=[data_0],\n name='test_mean_one_input')\n\n result = np.divide(np.add(data_0, data_1), 2.)\n node = onnx.helper.make_node(\n 'Mean',\n inputs=['data_0', 'data_1'],\n outputs=['result'],\n )\n expect(node, inputs=[data_0, data_1], outputs=[result],\n name='test_mean_two_inputs')\n", "# SPDX-License-Identifier: Apache-2.0\n\nimport numpy as np # type: ignore\n\nimport onnx\nfrom ..base import Base\nfrom . import expect\n\n\nclass DepthToSpace(Base):\n\n @staticmethod\n def export_default_mode_example() -> None:\n node = onnx.helper.make_node(\n 'DepthToSpace',\n inputs=['x'],\n outputs=['y'],\n blocksize=2,\n mode='DCR'\n )\n\n # (1, 8, 2, 3) input tensor\n x = np.array([[[[0., 1., 2.],\n [3., 4., 5.]],\n [[9., 10., 11.],\n [12., 13., 14.]],\n [[18., 19., 20.],\n [21., 22., 23.]],\n [[27., 28., 29.],\n [30., 31., 32.]],\n [[36., 37., 38.],\n [39., 40., 41.]],\n [[45., 46., 47.],\n [48., 49., 50.]],\n [[54., 55., 56.],\n [57., 58., 59.]],\n [[63., 64., 65.],\n [66., 67., 68.]]]]).astype(np.float32)\n\n # (1, 2, 4, 6) output tensor\n y = np.array([[[[0., 18., 1., 19., 2., 20.],\n [36., 54., 37., 55., 38., 56.],\n [3., 21., 4., 22., 5., 23.],\n [39., 57., 40., 58., 41., 59.]],\n [[9., 27., 10., 28., 11., 29.],\n [45., 63., 46., 64., 47., 65.],\n [12., 30., 13., 31., 14., 32.],\n [48., 66., 49., 67., 50., 68.]]]]).astype(np.float32)\n expect(node, inputs=[x], outputs=[y],\n name='test_depthtospace_example')\n\n @staticmethod\n def export_crd_mode_example() -> None:\n node = onnx.helper.make_node(\n 'DepthToSpace',\n inputs=['x'],\n outputs=['y'],\n blocksize=2,\n mode='CRD'\n )\n\n # (1, 8, 2, 3) input tensor\n x = np.array([[[[0., 1., 2.],\n [3., 4., 5.]],\n [[9., 10., 11.],\n [12., 13., 14.]],\n [[18., 19., 20.],\n [21., 22., 23.]],\n [[27., 28., 29.],\n [30., 31., 32.]],\n [[36., 37., 38.],\n [39., 40., 41.]],\n [[45., 46., 47.],\n [48., 49., 50.]],\n [[54., 55., 56.],\n [57., 58., 59.]],\n [[63., 64., 65.],\n [66., 67., 68.]]]]).astype(np.float32)\n\n # (1, 2, 4, 6) output tensor\n y = np.array([[[[0., 9., 1., 10., 2., 11.],\n [18., 27., 19., 28., 20., 29.],\n [3., 12., 4., 13., 5., 14.],\n [21., 30., 22., 31., 23., 32.]],\n [[36., 45., 37., 46., 38., 47.],\n [54., 63., 55., 64., 56., 65.],\n [39., 48., 40., 49., 41., 50.],\n [57., 66., 58., 67., 59., 68.]]]]).astype(np.float32)\n expect(node, inputs=[x], outputs=[y],\n name='test_depthtospace_crd_mode_example')\n", "# SPDX-License-Identifier: Apache-2.0\n\n\nimport numpy as np # type: ignore\n\nimport onnx\nfrom ..base import Base\nfrom . import expect\n\n\nclass BlackmanWindow(Base):\n\n @staticmethod\n def export() -> None:\n # Test periodic window\n node = onnx.helper.make_node(\n 'BlackmanWindow',\n inputs=['x'],\n outputs=['y'],\n )\n size = np.int32(10)\n a0 = .42\n a1 = -.5\n a2 = .08\n y = a0\n y += a1 * np.cos(2 * 3.1415 * np.arange(0, size, 1, dtype=np.float32) / size)\n y += a2 * np.cos(4 * 3.1415 * np.arange(0, size, 1, dtype=np.float32) / size)\n expect(node, inputs=[size], outputs=[y],\n name='test_blackmanwindow')\n\n # Test symmetric window\n node = onnx.helper.make_node(\n 'BlackmanWindow',\n inputs=['x'],\n outputs=['y'],\n periodic=0\n )\n size = np.int32(10)\n a0 = .42\n a1 = -.5\n a2 = .08\n y = a0\n y += a1 * np.cos(2 * 3.1415 * np.arange(0, size, 1, dtype=np.float32) / (size - 1))\n y += a2 * np.cos(4 * 3.1415 * np.arange(0, size, 1, dtype=np.float32) / (size - 1))\n expect(node, inputs=[size], outputs=[y],\n name='test_blackmanwindow_symmetric')\n" ]
[ [ "numpy.add", "numpy.array" ], [ "numpy.array" ], [ "numpy.arange", "numpy.int32" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
DivyaKumarBaid/Live_Discord_music_bot
[ "5bda803978a5140acf82ea14a6478c677b1f2793" ]
[ "main.py" ]
[ "import os\nfrom keep_alive import keep_alive\nimport discord\nimport nacl\nfrom discord import FFmpegPCMAudio\nimport youtube_dl\nimport discord.utils\nfrom discord.utils import find\nfrom discord.ext import commands, tasks\nfrom itertools import cycle\nfrom youtubesearchpython import VideosSearch\nimport numpy as np\n\n#initializing variables\n\nmy_secret = os.environ['TOKEN']\n\nclient = commands.Bot(command_prefix='m.',help_command=None)\n\nlast_vol=[]\n\nsong_played=[]\n\nchvc=[]\n\nsong_url=[]\n\nvoice_channel_to_connect=[]\n\ncommand_dict = {\n \"m.add [url]/[name]\" : \"Adds given music to queue\",\n \"m.songs\" : \"Lists all the songs in the playlist\",\n \"m.skip\" : \"Skips the Currently Playing Song\",\n \"m.play [VoiceChannel]\" : \"This command plays music in the desired channel\",\n \"m.play_this [Name]/[URL]\" : \"Plays a particular song\",\n \"m.stop\" : \"Stops the music player\",\n \"m.remove [index]\" : \"Removes the particular song at that index\",\n \"m.clear_playlist\" : \"Removes every song from the playlist\",\n \"m.volume [integer value]\" : \"Sets the volume level\",\n \"m.playOn [VoiceChannel]\" : \"Sets the Voice Channel on which bot plays\"\n}\n\n#initializing variables\n\n\n# For accessing already created playlist if present\n\nif (os.path.exists('playlist.txt')):\n playlist_ini = np.loadtxt('playlist.txt' , dtype=str , delimiter = '\\n')\n playlist_ini = playlist_ini.tolist()\n\n if(type(playlist_ini)!=list):\n playlist = []\n playlist.append(playlist_ini)\n else:\n playlist = playlist_ini\n\nelse:\n f = open(\"playlist.txt\", \"x\")\n f.close()\n playlist = []\n\nfor i in playlist:\n song_url.append(i)\n# print(song_url)\n\n\n# For accessing already created playlist if present\n\n# ffmpeg\n#before running install pip install pynacl\n#for audio pip install ffmpeg\nFFMPEG_OPTIONS = {'before_options': '-reconnect 1 -reconnect_streamed 1 -reconnect_delay_max 5', 'options': '-vn'} #locking options for ffmpeg\n\n\n#when bot is ready\[email protected]\nasync def on_ready():\n print(\"I am alive\")\n last_vol.append(100.0)\n await client.change_presence(\n status=discord.Status.online,\n activity=discord.Game('Music. To know more type m.help'))\n\n\n#when it is first added to a server\[email protected]\nasync def on_guild_join(guild):\n general = find(lambda x: x.name == 'general', guild.text_channels)\n if general and general.permissions_for(guild.me).send_messages:\n text = discord.Embed(\n title = f'Hello **{guild.name}**!\\n',\n url = \"https://github.com/DivyaKumarBaid/Discord_music_bot_V-2\",\n description = f'Nice to you all.\\nTo setup this bot you just need to set the voice channel to play the song by typing m.playOn <channel_name> and add your song by m.add <song_name> and just m.play to play on your channel\\nFor more info type m.help',\n color= 53380,\n )\n text.set_author(name= \"Discord_music_bot\",\n icon_url= \"https://img.icons8.com/color/48/000000/phonograph.png\")\n text.set_footer(text= \"m.help to know commands\")\n\n await general.send(embed=text)\n\n\n\n# removes any duplicate songs that is currently in the playlist\ndef duplicate():\n res = []\n for i in playlist:\n if i not in res:\n res.append(i)\n playlist.clear()\n for i in res:\n playlist.append(i)\n# removes any duplicate songs that is currently in the playlist\n\n\n#infinite loop to play music 24X7 untill closed/stopped \[email protected](seconds=5)\nasync def play_song(ctx, ch, channel,l):\n\n voice = discord.utils.get(client.voice_clients, guild=ctx.guild) \n \n # print(song_url)\n\n if len(song_url) == 0:\n duplicate()\n for i in playlist:\n song_url.append(i)\n song_played.clear()\n\n url=song_url[0]\n \n if not ch.is_playing() and not voice == None :\n try: \n ydl_opts = {'format': 'bestaudio/best'}\n with youtube_dl.YoutubeDL(ydl_opts) as ydl:\n info = ydl.extract_info(url, download=False)\n video_title = info.get('title', None)\n URL = info['formats'][0]['url']\n \n ch.play(discord.PCMVolumeTransformer(discord.FFmpegPCMAudio(URL, **FFMPEG_OPTIONS)))\n voice.source.volume = last_vol[0]\n text = embedding(f\" Playing :{video_title}\")\n await ctx.send(embed=text, delete_after=60.0)\n song_played.append(song_url[0])\n song_url.pop(0)\n except:\n await ctx.send(\"Connection Error!!\")\n\n \n\n#sets the bot to play on a particular channel\[email protected]()\nasync def playOn(ctx,*,channel):\n channel = discord.utils.get(ctx.guild.voice_channels, name=channel)\n if(channel is not None):\n voice_channel_to_connect.clear()\n voice_channel_to_connect.append(channel)\n await ctx.send(f\"{channel} is now set to play it Loud ! \")\n else:\n await ctx.send(\"**Couldnt Find the Channel**\")\n\n\n#skip a song\[email protected](help= \"Skip the current song\")\nasync def skip(ctx):\n ch=chvc[0]\n ch.stop()\n\n\n#sets volume to user defined value and this needs to be refined\[email protected]()\nasync def volume(ctx, x: int):\n if 0 <= x <= 100:\n y=x/100.0\n last_vol.pop(0)\n last_vol.append(y)\n vc = discord.utils.get(client.voice_clients, guild=ctx.guild)\n vc.source.volume = float(y)\n text = discord.Embed(\n title= \"**Volume Control**\",\n description = f\" Volume set to {int(x)} \",\n color= 53380,\n )\n text.set_author(name= \"Discord_music_bot\",\n icon_url= \"https://img.icons8.com/color/48/000000/phonograph.png\")\n text.set_footer(text= \"m.help to know commands\")\n await ctx.send(embed=text)\n else:\n await ctx.send(\"Volume level must be between 0 to 100\")\n\n\n#play command to start an infinite loop\[email protected](help=\"Channel name is optional.\" , brief=\"This command plays song from the available ones.Providing channel name is optional without which it will play on General\")\nasync def play(ctx, channel = None):\n #joining the desired channel\n\n voice = discord.utils.get(client.voice_clients, guild=ctx.guild)\n\n channel = discord.utils.get(ctx.guild.voice_channels, name=channel)\n\n if channel is None:\n if(len(voice_channel_to_connect) == 0):\n await ctx.send(f\"Couldnt the channel you told to join please set the channel using m.voice_channel <channel_name> or just pass it as argument in this command\")\n return\n\n else:\n channel = voice_channel_to_connect[0]\n\n\n #checking if it is playing any audio\n if voice == None:\n await ctx.send(f\"Joined **{channel}**\")\n else:\n await ctx.voice_client.disconnect()\n ch = await channel.connect()\n if(len(chvc)!=0):\n chvc.pop(0)\n chvc.append(ch)\n await ctx.send(f\"Playing on **{channel}** Channel\")\n \n #get the number of songs and if none is present it will show up a message\n duplicate()\n n = len(song_url)\n if not n==0:\n n=n-1\n play_song.start(ctx, ch, channel,n)\n else:\n text = discord.Embed(\n title= \"**No Music**\",\n description = \"There is no music to play\\nUse _add [url] to add a music\",\n color= 53380,\n )\n text.set_author(name= \"Discord_Music_bot\",\n icon_url= \"https://img.icons8.com/color/48/000000/phonograph.png\")\n text.set_footer(text= \"m.help to know commands\")\n await ctx.send(embed=text)\n \n \n#plays a particular music\[email protected](help = \"This stops the loop of playing song and plays the mentioned named song instead\")\nasync def play_this(ctx,channel = None,*,name):\n voice = discord.utils.get(client.voice_clients, guild=ctx.guild) \n channel = discord.utils.get(ctx.guild.voice_channels, name=channel)\n\n if channel is None:\n if(len(voice_channel_to_connect) == 0):\n await ctx.send(f\"Couldnt the channel you told to join please set the channel using m.voice_channel <channel_name> or just pass it as argument in this command\")\n return\n\n else:\n channel = voice_channel_to_connect[0]\n\n if len(chvc)==0 and voice == None:\n ch = await channel.connect()\n chvc.clear()\n chvc.append(ch)\n else :\n ch=chvc[0]\n ch.stop()\n play_song.stop()\n\n videosSearch = VideosSearch(name, limit = 1)\n result_song_list = videosSearch.result()\n\n title_song = result_song_list['result'][0]['title']\n urllink = result_song_list['result'][0]['link']\n \n try: \n ydl_opts = {'format': 'bestaudio/best'}\n with youtube_dl.YoutubeDL(ydl_opts) as ydl:\n info = ydl.extract_info(urllink, download=False)\n video_title = info.get('title', None)\n URL = info['formats'][0]['url']\n ch.play(discord.PCMVolumeTransformer(discord.FFmpegPCMAudio(URL, **FFMPEG_OPTIONS)))\n text = embedding(f\" Playing :{video_title}\")\n await ctx.send(embed=text, delete_after=60.0)\n \n except:\n await ctx.send(\"Connection Error !!! \",delete_after=60.0)\n\n\n\n#add music\[email protected](help='youtube link is required', brief='This adds a music to the playlist. The url must be of youtube')\nasync def add(ctx, * ,searched_song):\n\n videosSearch = VideosSearch(searched_song, limit = 1)\n result_song_list = videosSearch.result()\n\n title_song = result_song_list['result'][0]['title']\n urllink = result_song_list['result'][0]['link']\n thumbnail = result_song_list['result'][0]['thumbnails'][0]['url']\n\n\n if(not urllink in playlist):\n playlist.append(urllink)\n song_url.append(urllink)\n\n\n text = discord.Embed(\n title= \"**Song Added**\",\n description = f\"{title_song} is added to the Queue\\nLink : {urllink}\",\n color= 53380,\n )\n text.set_image(url = thumbnail)\n text.set_author(name= \"Discord_music_bot\",\n icon_url= \"https://img.icons8.com/color/48/000000/phonograph.png\")\n text.set_footer(text= \"m.help to know commands\")\n\n duplicate()\n np.savetxt('playlist.txt',playlist , fmt = '%s')\n content = np.loadtxt('playlist.txt' , dtype=str , delimiter = '\\n') \n content = content.tolist()\n \n await ctx.send(embed=text)\n \n\n#leave vc and stop playing\[email protected](help='This stops the loop' ,brief='This stops the music playing and the bot leaves the voice channel')\nasync def stop(ctx):\n voice = discord.utils.get(client.voice_clients, guild=ctx.guild) \n if voice == None:\n return\n await ctx.voice_client.disconnect()\n play_song.stop() \n song_url.clear()\n for i in playlist:\n song_url.append(i)\n song_played.clear()\n await ctx.send(\"Have left the channel\")\n\n\n#lists song\[email protected](help=\"This shows the songs present in the directory\" ,brief='This command lists all the songs available to play')\nasync def songs(ctx):\n l=len(playlist)\n if(l==0):\n await ctx.send(\"No music to play\")\n for i in range(0,l):\n videosSearch = VideosSearch(playlist[i], limit = 1)\n result_song_list = videosSearch.result()\n # print(result_song_list)\n title_song = result_song_list['result'][0]['title']\n text = discord.Embed(\n description = f\"{i+1}# Song Name : {title_song} \",\n color= 53380,\n )\n text.set_author(name= \"Discord_music_bot\",\n icon_url= \"https://img.icons8.com/color/48/000000/phonograph.png\")\n await ctx.send(embed=text)\n\n#removes every song\[email protected](help='The file name should be wiht mp3 extension' , brief='This command removes every0 available song')\nasync def clear_playlist(ctx):\n song_url.clear()\n playlist.clear()\n\n np.savetxt('playlist.txt',playlist , fmt = '%s')\n \n text= discord.Embed(\n description=\"**Playlist cleared**\",\n color = 53380,\n )\n text.set_author(name= \"Discord_music_bot\",\n icon_url= \"https://img.icons8.com/color/48/000000/phonograph.png\")\n text.set_footer(text= \"m.help to know commands\")\n await ctx.send(embed=text)\n\n\n#clear\[email protected](help='This command clears text messages', brief='This command clears given number of messages and by default it clears last 5 text messages')\nasync def clear(ctx, amount=5):\n await ctx.channel.purge(limit=amount)\n text = embedding(\"Cleared\")\n await ctx.send(embed=text)\n\n#remove a particular song \[email protected](help='The file name should be wiht mp3 extension' , brief='This command removes the specified file')\nasync def remove(ctx,x: int):\n x=x-1\n name_of_song=song_url[x]\n videosSearch = VideosSearch(song_url[x], limit = 1)\n result_song_list = videosSearch.result()\n title_song = result_song_list['result'][0]['title']\n text= embedding(f\"{title_song} Removed\")\n await ctx.send(embed=text)\n pos=0\n for i in playlist:\n pos=pos+1\n if i == name_of_song:\n playlist.pop(pos-1)\n break;\n\n song_url.pop(x)\n\n np.savetxt('playlist.txt',playlist , fmt = '%s')\n\n\n#custom help command\[email protected](invoke_without_command=True)\nasync def help(ctx):\n text = discord.Embed(\n title= \"**HELP TAB**\",\n url= \"https://github.com/DivyaKumarBaid/Discord_Music_bot\",\n color= 53380,\n )\n for x in command_dict : \n des_cmd = command_dict[x]+'\\n'\n text.add_field(name = x , value = des_cmd,inline = True)\n # text.add_field(name = '\\n', value = \"\\n\")\n text.set_author(name= \"Discord_music_bot\",\n icon_url= \"https://img.icons8.com/color/48/000000/phonograph.png\")\n text.set_footer(text= \"m.help to know commands\")\n await ctx.send(embed=text)\n \n#embeds text \ndef embedding(text: str):\n text= discord.Embed(\n description=f\"**{text}**\",\n color = 53380,\n )\n text.set_author(name= \"Discord_music_bot\",\n icon_url= \"https://img.icons8.com/color/48/000000/phonograph.png\")\n text.set_footer(text= \"m.help to know commands\")\n return(text)\n \n\n# checks for errors\[email protected]\nasync def on_command_error(ctx, error):\n if isinstance(error, commands.CommandNotFound):\n await ctx.send('Invalid Command Used. Type m.help to know the commands'\n )\n if isinstance(error, commands.MissingRequiredArgument):\n await ctx.send(\n 'Give proper values to the command an argument is missing')\n\nkeep_alive() #this keeps the bot alive\n\n#runs bot\nclient.run(my_secret)\n" ]
[ [ "numpy.savetxt", "numpy.loadtxt" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
SAP-samples/security-research-differentially-private-generative-models
[ "c0eced81da3bc0064beb538557f042732cda459f" ]
[ "tflib/inception_score.py" ]
[ "# SPDX-FileCopyrightText: 2020 SAP SE\n#\n# SPDX-License-Identifier: Apache-2.0\n\n# From https://github.com/openai/improved-gan/blob/master/inception_score/model.py\n# Code derived from tensorflow/tensorflow/models/image/imagenet/classify_image.py\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os.path\nimport sys\nimport tarfile\n\nimport numpy as np\nfrom six.moves import urllib\nimport tensorflow as tf\nimport glob\nimport scipy.misc\nimport math\nimport sys\n\nMODEL_DIR = '/tmp/imagenet'\nDATA_URL = 'http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz'\nsoftmax = None\n\n# Call this function with list of images. Each of elements should be a \n# numpy array with values ranging from 0 to 255.\ndef get_inception_score(images, splits=10):\n assert(type(images) == list)\n assert(type(images[0]) == np.ndarray)\n assert(len(images[0].shape) == 3)\n assert(np.max(images[0]) > 10)\n assert(np.min(images[0]) >= 0.0)\n inps = []\n for img in images:\n img = img.astype(np.float32)\n inps.append(np.expand_dims(img, 0))\n bs = 100\n with tf.Session() as sess:\n preds = []\n n_batches = int(math.ceil(float(len(inps)) / float(bs)))\n for i in range(n_batches):\n # sys.stdout.write(\".\")\n # sys.stdout.flush()\n inp = inps[(i * bs):min((i + 1) * bs, len(inps))]\n inp = np.concatenate(inp, 0)\n pred = sess.run(softmax, {'ExpandDims:0': inp})\n preds.append(pred)\n preds = np.concatenate(preds, 0)\n scores = []\n for i in range(splits):\n part = preds[(i * preds.shape[0] // splits):((i + 1) * preds.shape[0] // splits), :]\n kl = part * (np.log(part) - np.log(np.expand_dims(np.mean(part, 0), 0)))\n kl = np.mean(np.sum(kl, 1))\n scores.append(np.exp(kl))\n return np.mean(scores), np.std(scores)\n\n# This function is called automatically.\ndef _init_inception():\n global softmax\n if not os.path.exists(MODEL_DIR):\n os.makedirs(MODEL_DIR)\n filename = DATA_URL.split('/')[-1]\n filepath = os.path.join(MODEL_DIR, filename)\n if not os.path.exists(filepath):\n def _progress(count, block_size, total_size):\n sys.stdout.write('\\r>> Downloading %s %.1f%%' % (\n filename, float(count * block_size) / float(total_size) * 100.0))\n sys.stdout.flush()\n filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, _progress)\n print()\n statinfo = os.stat(filepath)\n print('Succesfully downloaded', filename, statinfo.st_size, 'bytes.')\n tarfile.open(filepath, 'r:gz').extractall(MODEL_DIR)\n with tf.gfile.FastGFile(os.path.join(\n MODEL_DIR, 'classify_image_graph_def.pb'), 'rb') as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n _ = tf.import_graph_def(graph_def, name='')\n # Works with an arbitrary minibatch size.\n with tf.Session() as sess:\n pool3 = sess.graph.get_tensor_by_name('pool_3:0')\n ops = pool3.graph.get_operations()\n for op_idx, op in enumerate(ops):\n for o in op.outputs:\n shape = o.get_shape()\n shape = [s.value for s in shape]\n new_shape = []\n for j, s in enumerate(shape):\n if s == 1 and j == 0:\n new_shape.append(None)\n else:\n new_shape.append(s)\n o._shape = tf.TensorShape(new_shape)\n w = sess.graph.get_operation_by_name(\"softmax/logits/MatMul\").inputs[1]\n logits = tf.matmul(tf.squeeze(pool3), w)\n softmax = tf.nn.softmax(logits)\n\nif softmax is None:\n r = 5\n # _init_inception()\n" ]
[ [ "tensorflow.TensorShape", "tensorflow.import_graph_def", "tensorflow.nn.softmax", "numpy.expand_dims", "numpy.log", "numpy.min", "tensorflow.squeeze", "numpy.concatenate", "numpy.max", "numpy.std", "numpy.mean", "tensorflow.Session", "tensorflow.GraphDef", "numpy.exp", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
AntoineSimoulin/pytree
[ "9408799c4b8d4d59b1103e2205ffe7b250614f92" ]
[ "pytree/models/n_ary/modeling_n_ary.py" ]
[ "import torch.nn as nn\nimport torch\nfrom typing import List, Tuple, Optional, overload, Union, cast\nfrom torch import Tensor\nfrom transformers import BertModel\nfrom pytree.data.packed_tree import PackedTree\n\n\nclass TreeEmbeddings(nn.Module):\n\n def __init__(self, config):\n super(TreeEmbeddings, self).__init__()\n self.use_bert = config.use_bert\n self.tune_bert = config.tune_bert\n self.normalize_bert_embeddings = config.normalize_bert_embeddings\n\n # embeddings\n if self.use_bert:\n self.bert = BertModel.from_pretrained(config.pretrained_model_name_or_path)\n for name, param in self.bert.named_parameters():\n param.requires_grad = self.tune_bert\n else:\n self.embeddings = nn.Embedding(config.vocab_size, config.embedding_size) # , sparse=True\n nn.init.xavier_uniform_(self.embeddings.weight.data, gain=1.0)\n self.embeddings.weight.requires_grad = True\n\n if config.xavier_init:\n self.xavier_init_weights()\n \n def load_pretrained_embeddings(self, embeddings_weights, requires_grad=False):\n self.embeddings = nn.Embedding.from_pretrained(embeddings_weights, sparse=True)\n self.embeddings.weight.requires_grad = requires_grad\n \n def forward(self, raw_inputs=None, packed_tree=None, bert_inputs=None):\n if self.use_bert:\n tokens_tensor, tokens_type_ids, attention_mask, sum_idx = bert_inputs\n if self.tune_bert:\n outputs = self.bert(input_ids=tokens_tensor,\n token_type_ids=tokens_type_ids,\n attention_mask=attention_mask)[0]\n else:\n with torch.no_grad():\n outputs = self.bert(input_ids=tokens_tensor,\n token_type_ids=tokens_type_ids,\n attention_mask=attention_mask)[0]\n if self.normalize_bert_embeddings:\n outputs = F.normalize(outputs, p=2, dim=2)\n cat_inputs = torch.reshape(outputs, (-1, outputs.shape[2]))\n embeds = torch.index_select(cat_inputs, 0, sum_idx.long())\n # embeds = torch.sigmoid(self.projection(embeds))\n else:\n # cat_inputs = torch.cat(raw_inputs)\n embeds = self.embeddings(raw_inputs)\n return embeds\n\n def xavier_init_weights(self):\n nn.init.xavier_uniform_(self.embeddings.weight.data, gain=1.0)\n\n\nclass NaryTree(nn.Module):\n def __init__(self, config):\n super(NaryTree, self).__init__()\n self.config = config\n self.embeddings = TreeEmbeddings(config)\n if config.cell_type == 'lstm':\n self.encoder = NaryTreeLSTMEncoder(config)\n elif config.cell_type == 'gru':\n self.encoder = NaryTreeGRUEncoder(config)\n\n def forward(self, inputs):\n embeds = self.embeddings(inputs['input_ids'])\n hidden, h_root = self.encoder(embeds, inputs['tree_ids'].to(embeds.device), inputs['tree_ids_r'].to(embeds.device), inputs['tree_ids_l'].to(embeds.device))\n return hidden, h_root\n\n\nclass TreeLSTM(nn.Module):\n \"\"\"[summary]\n\n Args:\n nn ([type]): [description]\n \"\"\"\n \n def __init__(self, config):\n super(TreeLSTM, self).__init__()\n self.hidden_size = config.hidden_size\n self.embedding_size = config.embedding_size\n self.vocab_size = config.vocab_size\n\n def xavier_init_weights(self):\n # nn.init.xavier_uniform_(self.embeddings.weight.data, gain=1.0)\n for name, param in self.named_parameters():\n if 'weight' in name:\n nn.init.xavier_uniform_(param.data, gain=1.0)\n if 'bias' in name:\n param.data.fill_(0)\n \n def forward(self,\n input_ids: Union[Tensor, PackedTree],\n tree_ids: Tensor = None,\n tree_ids_r: Tensor = None,\n tree_ids_l: Tensor = None,\n hx: Optional[Tuple[Tensor, Tensor]] = None) -> Tuple[Union[Tensor, PackedTree], Tuple[Tensor, Tensor]]:\n # if isinstance(orig_input, PackedTrees):\n batch_size = input_ids.size(0) # if self.batch_first else input.size(1)\n n_steps = tree_ids.size(1)\n sequence_length = input_ids.size(1)\n # else:\n # batch_size = input.size(0) if self.batch_first else input.size(1)\n # n_steps = tree_ids.size(0)\n \n if hx is None:\n h_zeros = torch.zeros(batch_size, sequence_length, self.hidden_size,\n dtype=input_ids.dtype, device=input_ids.device)\n c_zeros = torch.zeros(batch_size, sequence_length, self.hidden_size,\n dtype=input_ids.dtype, device=input_ids.device)\n hx = (h_zeros, c_zeros)\n\n for step in range(n_steps):\n hx = self.tree_lstm_cell(input_ids, hx, tree_ids[:, step, :], tree_ids_r[:, step, :], tree_ids_l[:, step, :]) # .select(0, step)\n roots = tree_ids[:, -1, :].max(axis=1)[0]\n h_root = torch.gather(hx[0], 1, roots.unsqueeze(1).unsqueeze(2).repeat(1, 1, self.hidden_size)).squeeze()\n return hx, h_root\n\n\nclass NaryTreeLSTMCell(nn.Module):\n\n def __init__(self, config):\n super(NaryTreeLSTMCell, self).__init__()\n self.N = config.N\n self.ioux = nn.Linear(config.embedding_size, 3 * config.hidden_size, bias=True)\n self.iouh = nn.ModuleList([nn.Linear(config.hidden_size, 3 * config.hidden_size, bias=False) for i in range(config.N)])\n self.fx = nn.Linear(config.embedding_size, config.hidden_size, bias=True)\n self.fh = nn.ModuleList([nn.Linear(config.hidden_size, config.hidden_size, bias=False) for i in range(config.N * config.N)])\n self.hidden_size = config.hidden_size\n self.embedding_size = config.embedding_size\n\n def forward(self, x, hx, tree_ids_d, tree_ids_dr, tree_ids_dl):\n\n # import pdb; pdb.set_trace()\n index = tree_ids_d.unsqueeze(-1).repeat(1, 1, self.hidden_size)\n index_r = tree_ids_dr.unsqueeze(-1).repeat(1, 1, self.hidden_size)\n index_l = tree_ids_dl.unsqueeze(-1).repeat(1, 1, self.hidden_size)\n updated_nodes = torch.zeros_like(index).scatter_add_(1, index, torch.ones_like(index))\n updated_nodes[:, 0, :] = 0\n updated_nodes = updated_nodes.bool()\n\n # iou_x = self.ioux(x)\n iou = self.ioux(x)\n # print('shape ioux', iou_x.shape)\n iou_hr = self.iouh[0](hx[0])\n # print('iouhr shape', iou_hr.shape)\n iou_hl = self.iouh[1](hx[0])\n # iou = iou_x + \\\n # torch.zeros_like(iou_x).scatter_add_(1, index_r, iou_hr) + \\\n # torch.zeros_like(iou_x).scatter_add_(1, index_l, iou_hl)\n # print('index r shape', index_r.shape)\n # print('index_r', index_r)\n iou = torch.scatter_add(iou, 1, index_r.repeat(1, 1, 3), iou_hr)\n iou = torch.scatter_add(iou, 1, index_l.repeat(1, 1, 3), iou_hl)\n # iou = iou_x.scatter_add_(1, index_r, iou_hr)\n # iou = iou_x.scatter_add_(1, index_l, iou_hl)\n \n i, o, u = torch.split(iou, iou.size(-1) // 3, dim=-1)\n i, o, u = torch.sigmoid(i), torch.sigmoid(o), torch.tanh(u)\n \n f = self.fx(x).gather(1, index) + \\\n self.fh[0](hx[0]).gather(1, index_r) + \\\n self.fh[1](hx[0]).gather(1, index_r) + \\\n self.fh[2](hx[0]).gather(1, index_l) + \\\n self.fh[3](hx[0]).gather(1, index_l)\n f = torch.sigmoid(f)\n fc = torch.mul(f, hx[1])\n \n # c = torch.mul(i, u) + torch.zeros_like(fc).scatter_add_(1, index, fc)\n c = torch.mul(i, u)\n c = c.scatter_add_(1, index, fc)\n h = torch.mul(o, torch.tanh(c))\n \n # h = hx[0].masked_scatter_(index.bool(), h)\n h = torch.where(updated_nodes, h, hx[0]) # index.bool()\n # c = hx[1].masked_scatter_(index.bool(), c)\n c = torch.where(updated_nodes, c, hx[1]) # index.bool()\n\n return h, c\n\n\nclass NaryTreeLSTMEncoder(TreeLSTM):\n r\"\"\"\n .. math::\n :nowrap:\n\n \\begin{align}\n \\tilde{h}_j &= \\sum_{k \\in C(j)} h_k, \\\\\n i_j &=\\sigma \\left( W^{(i)} x_j + U^{(i)} \\tilde{h}_j + b^{(i)} \\right), \\\\\n f_{jk} &= \\sigma\\left( W^{(f)} x_j + U^{(f)} h_k + b^{(f)} \\right),\\\\\n o_j &= \\sigma \\left( W^{(o)} x_j + U^{(o)} \\tilde{h}_j + b^{(o)} \\right), \\\\\n u_j &= \\tanh\\left( W^{(u)} x_j + U^{(u)} \\tilde{h}_j + b^{(u)} \\right), \\\\\n c_j &= i_j \\odot u_j + \\sum_{k\\in C(j)} f_{jk} \\odot c_{k}, \\\\\n h_j &= o_j \\odot \\tanh(c_j),\n \\end{align}\n\n \"\"\"\n\n def __init__(self, config):\n \"\"\"\n Class attributes:\n - ``embedding_size``: `int`. Dimension of the embeddings.\n - ``hidden_size``: `int`. Dimension of the Tree LSTM hidden layer\n - ``vocab_size``: `int`. Dimension of the vocabulary.\n - ``xavier_init``: `bool`, default 1. Whether to intiate networks weights using the glorot procedure.\n \"\"\"\n super(NaryTreeLSTMEncoder, self).__init__(config)\n self.tree_lstm_cell = NaryTreeLSTMCell(config)\n if config.xavier_init:\n self.xavier_init_weights()" ]
[ [ "torch.sigmoid", "torch.zeros", "torch.reshape", "torch.zeros_like", "torch.nn.Embedding", "torch.tanh", "torch.nn.Linear", "torch.mul", "torch.no_grad", "torch.nn.Embedding.from_pretrained", "torch.where", "torch.nn.init.xavier_uniform_", "torch.ones_like" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
quynhkhanh96/mmaction2
[ "3bca76ac45ba9f28cc4c4d0565515e02bbd4d059" ]
[ "demo/demo_skeleton.py" ]
[ "# Copyright (c) OpenMMLab. All rights reserved.\nimport argparse\nimport os\nimport os.path as osp\nimport shutil\n\nimport cv2\nimport mmcv\nimport numpy as np\nimport torch\nfrom mmcv import DictAction\n\nfrom mmaction.apis import inference_recognizer, init_recognizer\n\ntry:\n from mmdet.apis import inference_detector, init_detector\nexcept (ImportError, ModuleNotFoundError):\n raise ImportError('Failed to import `inference_detector` and '\n '`init_detector` form `mmdet.apis`. These apis are '\n 'required in this demo! ')\n\ntry:\n from mmpose.apis import (inference_top_down_pose_model, init_pose_model,\n vis_pose_result)\nexcept (ImportError, ModuleNotFoundError):\n raise ImportError('Failed to import `inference_top_down_pose_model`, '\n '`init_pose_model`, and `vis_pose_result` form '\n '`mmpose.apis`. These apis are required in this demo! ')\n\ntry:\n import moviepy.editor as mpy\nexcept ImportError:\n raise ImportError('Please install moviepy to enable output file')\n\nFONTFACE = cv2.FONT_HERSHEY_DUPLEX\nFONTSCALE = 0.75\nFONTCOLOR = (255, 255, 255) # BGR, white\nTHICKNESS = 1\nLINETYPE = 1\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(description='MMAction2 demo')\n parser.add_argument('video', help='video file/url')\n parser.add_argument('out_filename', help='output filename')\n parser.add_argument(\n '--config',\n default=('configs/skeleton/posec3d/'\n 'slowonly_r50_u48_240e_ntu120_xsub_keypoint.py'),\n help='skeleton model config file path')\n parser.add_argument(\n '--checkpoint',\n default=('https://download.openmmlab.com/mmaction/skeleton/posec3d/'\n 'slowonly_r50_u48_240e_ntu120_xsub_keypoint/'\n 'slowonly_r50_u48_240e_ntu120_xsub_keypoint-6736b03f.pth'),\n help='skeleton model checkpoint file/url')\n parser.add_argument(\n '--det-config',\n default='demo/faster_rcnn_r50_fpn_2x_coco.py',\n help='human detection config file path (from mmdet)')\n parser.add_argument(\n '--det-checkpoint',\n default=('http://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/'\n 'faster_rcnn_r50_fpn_2x_coco/'\n 'faster_rcnn_r50_fpn_2x_coco_'\n 'bbox_mAP-0.384_20200504_210434-a5d8aa15.pth'),\n help='human detection checkpoint file/url')\n parser.add_argument(\n '--pose-config',\n default='demo/hrnet_w32_coco_256x192.py',\n help='human pose estimation config file path (from mmpose)')\n parser.add_argument(\n '--pose-checkpoint',\n default=('https://download.openmmlab.com/mmpose/top_down/hrnet/'\n 'hrnet_w32_coco_256x192-c78dce93_20200708.pth'),\n help='human pose estimation checkpoint file/url')\n parser.add_argument(\n '--det-score-thr',\n type=float,\n default=0.9,\n help='the threshold of human detection score')\n parser.add_argument(\n '--label-map',\n default='tools/data/skeleton/label_map_ntu120.txt',\n help='label map file')\n parser.add_argument(\n '--device', type=str, default='cuda:0', help='CPU/CUDA device option')\n parser.add_argument(\n '--short-side',\n type=int,\n default=480,\n help='specify the short-side length of the image')\n parser.add_argument(\n '--cfg-options',\n nargs='+',\n action=DictAction,\n default={},\n help='override some settings in the used config, the key-value pair '\n 'in xxx=yyy format will be merged into config file. For example, '\n \"'--cfg-options model.backbone.depth=18 model.backbone.with_cp=True'\")\n args = parser.parse_args()\n return args\n\n\ndef frame_extraction(video_path, short_side):\n \"\"\"Extract frames given video_path.\n\n Args:\n video_path (str): The video_path.\n \"\"\"\n # Load the video, extract frames into ./tmp/video_name\n target_dir = osp.join('./tmp', osp.basename(osp.splitext(video_path)[0]))\n os.makedirs(target_dir, exist_ok=True)\n # Should be able to handle videos up to several hours\n frame_tmpl = osp.join(target_dir, 'img_{:06d}.jpg')\n vid = cv2.VideoCapture(video_path)\n frames = []\n frame_paths = []\n flag, frame = vid.read()\n cnt = 0\n new_h, new_w = None, None\n while flag:\n if new_h is None:\n h, w, _ = frame.shape\n new_w, new_h = mmcv.rescale_size((w, h), (short_side, np.Inf))\n\n frame = mmcv.imresize(frame, (new_w, new_h))\n\n frames.append(frame)\n frame_path = frame_tmpl.format(cnt + 1)\n frame_paths.append(frame_path)\n\n cv2.imwrite(frame_path, frame)\n cnt += 1\n flag, frame = vid.read()\n\n return frame_paths, frames\n\n\ndef detection_inference(args, frame_paths):\n \"\"\"Detect human boxes given frame paths.\n\n Args:\n args (argparse.Namespace): The arguments.\n frame_paths (list[str]): The paths of frames to do detection inference.\n\n Returns:\n list[np.ndarray]: The human detection results.\n \"\"\"\n model = init_detector(args.det_config, args.det_checkpoint, args.device)\n assert model.CLASSES[0] == 'person', ('We require you to use a detector '\n 'trained on COCO')\n results = []\n print('Performing Human Detection for each frame')\n prog_bar = mmcv.ProgressBar(len(frame_paths))\n for frame_path in frame_paths:\n result = inference_detector(model, frame_path)\n # We only keep human detections with score larger than det_score_thr\n result = result[0][result[0][:, 4] >= args.det_score_thr]\n results.append(result)\n prog_bar.update()\n return results\n\n\ndef pose_inference(args, frame_paths, det_results):\n model = init_pose_model(args.pose_config, args.pose_checkpoint,\n args.device)\n ret = []\n print('Performing Human Pose Estimation for each frame')\n prog_bar = mmcv.ProgressBar(len(frame_paths))\n for f, d in zip(frame_paths, det_results):\n # Align input format\n d = [dict(bbox=x) for x in list(d)]\n pose = inference_top_down_pose_model(model, f, d, format='xyxy')[0]\n ret.append(pose)\n prog_bar.update()\n return ret\n\n\ndef main():\n args = parse_args()\n\n frame_paths, original_frames = frame_extraction(args.video,\n args.short_side)\n num_frame = len(frame_paths)\n h, w, _ = original_frames[0].shape\n\n # Get clip_len, frame_interval and calculate center index of each clip\n config = mmcv.Config.fromfile(args.config)\n config.merge_from_dict(args.cfg_options)\n for component in config.data.test.pipeline:\n if component['type'] == 'PoseNormalize':\n component['mean'] = (w // 2, h // 2, .5)\n component['max_value'] = (w, h, 1.)\n\n model = init_recognizer(config, args.checkpoint, args.device)\n\n # Load label_map\n label_map = [x.strip() for x in open(args.label_map).readlines()]\n\n # Get Human detection results\n det_results = detection_inference(args, frame_paths)\n torch.cuda.empty_cache()\n\n pose_results = pose_inference(args, frame_paths, det_results)\n torch.cuda.empty_cache()\n\n fake_anno = dict(\n frame_dir='',\n label=-1,\n img_shape=(h, w),\n original_shape=(h, w),\n start_index=0,\n modality='Pose',\n total_frames=num_frame)\n num_person = max([len(x) for x in pose_results])\n\n num_keypoint = 17\n keypoint = np.zeros((num_person, num_frame, num_keypoint, 2),\n dtype=np.float16)\n keypoint_score = np.zeros((num_person, num_frame, num_keypoint),\n dtype=np.float16)\n for i, poses in enumerate(pose_results):\n for j, pose in enumerate(poses):\n pose = pose['keypoints']\n keypoint[j, i] = pose[:, :2]\n keypoint_score[j, i] = pose[:, 2]\n fake_anno['keypoint'] = keypoint\n fake_anno['keypoint_score'] = keypoint_score\n\n results = inference_recognizer(model, fake_anno)\n\n action_label = label_map[results[0][0]]\n\n pose_model = init_pose_model(args.pose_config, args.pose_checkpoint,\n args.device)\n vis_frames = [\n vis_pose_result(pose_model, frame_paths[i], pose_results[i])\n for i in range(num_frame)\n ]\n for frame in vis_frames:\n cv2.putText(frame, action_label, (10, 30), FONTFACE, FONTSCALE,\n FONTCOLOR, THICKNESS, LINETYPE)\n\n vid = mpy.ImageSequenceClip([x[:, :, ::-1] for x in vis_frames], fps=24)\n vid.write_videofile(args.out_filename, remove_temp=True)\n\n tmp_frame_dir = osp.dirname(frame_paths[0])\n shutil.rmtree(tmp_frame_dir)\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "numpy.zeros", "torch.cuda.empty_cache" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
AdaAlarm/tensorflow
[ "e0db063159751276a92d88a4ad6d481b1199318c", "3457a2b122e50b4d44ceaaed5a663d635e5c22df", "e0db063159751276a92d88a4ad6d481b1199318c", "3457a2b122e50b4d44ceaaed5a663d635e5c22df", "e0db063159751276a92d88a4ad6d481b1199318c" ]
[ "tensorflow/python/distribute/strategy_combinations.py", "tensorflow/python/data/experimental/kernel_tests/matching_files_dataset_test.py", "tensorflow/python/keras/engine/sequential.py", "tensorflow/python/data/kernel_tests/repeat_test.py", "tensorflow/compiler/mlir/tfr/python/tfr_gen_test.py" ]
[ "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Strategy combinations for combinations.combine().\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.python import tf2\nfrom tensorflow.python.distribute import central_storage_strategy\nfrom tensorflow.python.distribute import cluster_resolver\nfrom tensorflow.python.distribute import collective_all_reduce_strategy\nfrom tensorflow.python.distribute import combinations\nfrom tensorflow.python.distribute import distribution_strategy_context\nfrom tensorflow.python.distribute import mirrored_strategy as mirrored_lib\nfrom tensorflow.python.distribute import multi_process_runner\nfrom tensorflow.python.distribute import multi_worker_test_base\nfrom tensorflow.python.distribute import one_device_strategy as one_device_lib\nfrom tensorflow.python.distribute import parameter_server_strategy_v2\nfrom tensorflow.python.distribute import sharded_variable\nfrom tensorflow.python.distribute import test_util\nfrom tensorflow.python.distribute import tpu_strategy as tpu_lib\nfrom tensorflow.python.distribute.cluster_resolver import tpu_cluster_resolver\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.eager import remote\nfrom tensorflow.python.platform import flags\nfrom tensorflow.python.tpu import device_assignment as device_assignment_lib\nfrom tensorflow.python.tpu import tpu_strategy_util\nfrom tensorflow.python.training.server_lib import ClusterSpec\nfrom tensorflow.python.util.tf_export import tf_export\n\n_TF_INTERNAL_API_PREFIX = \"__internal__.distribute.combinations.\"\n\n_did_connect_to_cluster = False\n_topology = None\nCollectiveAllReduceExtended = (\n collective_all_reduce_strategy.CollectiveAllReduceExtended)\n\n\ndef _version_chooser(tf1_cls, tf2_cls):\n\n def creator(*args, **kwargs):\n if tf2.enabled():\n return tf2_cls(*args, **kwargs)\n return tf1_cls(*args, **kwargs)\n\n return creator\n\n\nMirroredStrategy = _version_chooser(mirrored_lib.MirroredStrategyV1,\n mirrored_lib.MirroredStrategy)\nCentralStorageStrategy = _version_chooser(\n central_storage_strategy.CentralStorageStrategyV1,\n central_storage_strategy.CentralStorageStrategy)\nOneDeviceStrategy = _version_chooser(one_device_lib.OneDeviceStrategyV1,\n one_device_lib.OneDeviceStrategy)\n# Only V2 CollectiveAllReduceStrategy combinations are supported.\nCollectiveAllReduceStrategy = (\n collective_all_reduce_strategy.CollectiveAllReduceStrategy)\n\n\n# pylint: disable=missing-docstring\ndef _get_tpu_strategy_creator(steps_per_run,\n use_single_core=False,\n enable_packed_variable=False,\n **kwargs):\n\n def _create_tpu_strategy():\n FLAGS = flags.FLAGS # pylint: disable=invalid-name\n global _did_connect_to_cluster\n global _topology\n\n try:\n # Attempt to locally discover the TPU. This will fail for Cloud TPU, in\n # which case we fall back to the values passed as flags.\n resolver = tpu_cluster_resolver.TPUClusterResolver()\n did_automatically_resolve = True\n except ValueError:\n did_automatically_resolve = False\n\n # These flags will be defined by tpu_test_wrapper.py.\n resolver = tpu_cluster_resolver.TPUClusterResolver(\n tpu=hasattr(FLAGS, \"tpu\") and FLAGS.tpu or \"\",\n zone=hasattr(FLAGS, \"zone\") and FLAGS.zone or None,\n project=hasattr(FLAGS, \"project\") and FLAGS.project or None,\n )\n\n # Only connect once per process, rather than per test method.\n if not _did_connect_to_cluster:\n if getattr(FLAGS, \"tpu\", \"\") or did_automatically_resolve:\n remote.connect_to_cluster(resolver)\n _did_connect_to_cluster = True\n _topology = tpu_strategy_util.initialize_tpu_system(resolver)\n\n device_assignment = None\n if use_single_core:\n device_assignment = device_assignment_lib.DeviceAssignment(\n _topology,\n core_assignment=device_assignment_lib.SINGLE_CORE_ASSIGNMENT)\n\n # Steps per run is only supported in TF 1.x\n if tf2.enabled():\n strategy = tpu_lib.TPUStrategy(resolver, device_assignment, **kwargs)\n else:\n strategy = tpu_lib.TPUStrategyV1(resolver, steps_per_run,\n device_assignment, **kwargs)\n strategy._enable_packed_variable_in_eager_mode = enable_packed_variable # pylint: disable=protected-access\n return strategy\n\n return _create_tpu_strategy\n\n\ndef _mirrored_strategy_with_collective_key_base(devices):\n mirrored_lib.MirroredStrategyV1._collective_key_base += 100000\n mirrored_lib.MirroredStrategy._collective_key_base += 100000\n return MirroredStrategy(devices)\n\n\ndef _mirrored_strategy_with_no_merge_call(devices):\n mirrored_lib.MirroredStrategyV1._collective_key_base += 100000\n mirrored_lib.MirroredStrategy._collective_key_base += 100000\n out = MirroredStrategy(devices)\n # Stub out merge call usage.\n out.extended._use_merge_call = lambda: False # pylint: disable=protected-access\n return out\n\n\ndef _get_multi_worker_mirrored_creator(required_gpus, use_merge_call=True):\n\n def _create_multi_worker_mirrored():\n tf_config = cluster_resolver.TFConfigClusterResolver()\n master = tf_config.master()\n if tf_config.rpc_layer:\n # Strip off the rpc_layer suffix.\n master = master[len(\"%s://\" % tf_config.rpc_layer):]\n resolver = cluster_resolver.SimpleClusterResolver(\n cluster_spec=tf_config.cluster_spec(),\n task_type=tf_config.task_type,\n task_id=tf_config.task_id,\n master=master,\n environment=tf_config.environment,\n num_accelerators={\"GPU\": required_gpus},\n rpc_layer=tf_config.rpc_layer or \"grpc\",\n )\n # Disable health check. We don't have a reliable to shutdown the strategy\n # (and thus the health check) at the end of a test. Turning on health check\n # causes some flakiness since we re-create part of the server when creating\n # a strategy, and our tests are capable of handling failures.\n CollectiveAllReduceExtended._enable_check_health = False # pylint: disable=protected-access\n # Always create the strategy in eager mode so that it starts the server and\n # configures the eager context. The eager context can no longer be\n # configured after initialization.\n with context.eager_mode():\n strategy = CollectiveAllReduceStrategy(cluster_resolver=resolver)\n\n if not use_merge_call:\n strategy.extended._use_merge_call = lambda: False # pylint: disable=protected-access\n # TODO(b/152320929): Wait for the cluster before proceeding, otherwise\n # collectives may hang if any worker launches collectives before the chief\n # creates the strategy.\n try:\n multi_process_runner.get_barrier().wait()\n except ValueError:\n # If the creator is called in the main process,\n # multi_process_runner.get_barrier() raises ValueError, which is safe to\n # ignore.\n pass\n return strategy\n\n return _create_multi_worker_mirrored\n\n\ndef _get_ps_strategy_creator(num_workers, num_ps, required_gpus=0):\n\n def _create_parameter_server():\n\n cluster_def = multi_worker_test_base.create_in_process_cluster(\n num_workers=num_workers, num_ps=num_ps, rpc_layer=\"grpc\")\n resolver = cluster_resolver.SimpleClusterResolver(\n ClusterSpec(cluster_def),\n num_accelerators={\"GPU\": required_gpus},\n rpc_layer=\"grpc\")\n strategy = parameter_server_strategy_v2.ParameterServerStrategyV2(\n resolver,\n variable_partitioner=sharded_variable.FixedShardsPartitioner(2))\n return strategy\n\n return _create_parameter_server\n\n\ndef _deferred_pool_runner(has_chief, num_workers, initializer=None):\n \"\"\"Returns a callable that returns the pool runner.\n\n It creates the pool runner only upon first invocation. This avoids creating it\n when this file is imported.\n\n Args:\n has_chief: whether there should be a chief.\n num_workers: the number of workers excluding the chief.\n initializer: initializer of each process.\n\n Returns:\n A callable that returns the runner.\n \"\"\"\n\n container = []\n\n def get_or_create():\n if not container:\n cluster_spec = multi_worker_test_base.create_cluster_spec(\n has_chief=has_chief,\n num_workers=num_workers,\n num_ps=0,\n has_eval=False)\n runner = multi_process_runner.MultiProcessPoolRunner(\n cluster_spec, initializer=initializer)\n container.append(runner)\n return container[0]\n\n return get_or_create\n\n\n# We need to create the strategy in the initializer to start the server before\n# any test runs.\n_two_worker_pool = _deferred_pool_runner(\n has_chief=True,\n num_workers=1,\n initializer=_get_multi_worker_mirrored_creator(required_gpus=0))\n_four_worker_pool = _deferred_pool_runner(\n has_chief=True,\n num_workers=3,\n initializer=_get_multi_worker_mirrored_creator(required_gpus=0))\n\n\n# pylint: disable=g-long-lambda\ndefault_strategy = combinations.NamedDistribution(\n \"Default\",\n distribution_strategy_context._get_default_strategy, # pylint: disable=protected-access\n required_gpus=None)\none_device_strategy = combinations.NamedDistribution(\n \"OneDeviceCPU\", lambda: OneDeviceStrategy(\"/cpu:0\"), required_gpus=None)\none_device_strategy_gpu = combinations.NamedDistribution(\n \"OneDeviceGPU\", lambda: OneDeviceStrategy(\"/gpu:0\"), required_gpus=1)\none_device_strategy_on_worker_1 = combinations.NamedDistribution(\n \"OneDeviceOnWorker1CPU\",\n lambda: OneDeviceStrategy(\"/job:worker/replica:0/task:1/cpu:0\"),\n required_gpus=None)\none_device_strategy_gpu_on_worker_1 = combinations.NamedDistribution(\n \"OneDeviceOnWorker1GPU\",\n lambda: OneDeviceStrategy(\"/job:worker/replica:0/task:1/gpu:0\"),\n required_gpus=1)\ntpu_strategy = combinations.NamedDistribution(\n \"TPU\", _get_tpu_strategy_creator(steps_per_run=2), required_tpu=True)\ntpu_strategy_packed_var = combinations.NamedDistribution(\n \"TPUPackedVar\",\n _get_tpu_strategy_creator(steps_per_run=2, enable_packed_variable=True),\n required_tpu=True)\ntpu_strategy_one_step = combinations.NamedDistribution(\n \"TPUOneStep\", _get_tpu_strategy_creator(steps_per_run=1), required_tpu=True)\ntpu_strategy_one_core = combinations.NamedDistribution(\n \"TPUOneCore\",\n _get_tpu_strategy_creator(steps_per_run=2, use_single_core=True),\n required_tpu=True)\ntpu_strategy_one_step_one_core = combinations.NamedDistribution(\n \"TPUOneStepOneCore\",\n _get_tpu_strategy_creator(steps_per_run=1, use_single_core=True),\n required_tpu=True)\ncloud_tpu_strategy = combinations.NamedDistribution(\n \"CloudTPU\",\n _get_tpu_strategy_creator(steps_per_run=2),\n required_tpu=True,\n use_cloud_tpu=True)\nmirrored_strategy_with_one_cpu = combinations.NamedDistribution(\n \"Mirrored1CPU\",\n lambda: _mirrored_strategy_with_collective_key_base([\"/cpu:0\"]))\nmirrored_strategy_with_one_gpu = combinations.NamedDistribution(\n \"Mirrored1GPU\",\n lambda: _mirrored_strategy_with_collective_key_base([\"/gpu:0\"]),\n required_gpus=1)\nmirrored_strategy_with_gpu_and_cpu = combinations.NamedDistribution(\n \"MirroredCPUAndGPU\",\n lambda: _mirrored_strategy_with_collective_key_base([\"/gpu:0\", \"/cpu:0\"]),\n required_gpus=1)\nmirrored_strategy_with_two_gpus = combinations.NamedDistribution(\n \"Mirrored2GPUs\",\n lambda: _mirrored_strategy_with_collective_key_base([\"/gpu:0\", \"/gpu:1\"]),\n required_gpus=2)\nmirrored_strategy_with_two_gpus_no_merge_call = combinations.NamedDistribution(\n \"Mirrored2GPUsNoMergeCall\",\n lambda: _mirrored_strategy_with_no_merge_call([\"/gpu:0\", \"/gpu:1\"]),\n required_physical_gpus=2)\n# Should call set_virtual_cpus_to_at_least(3) in your test's setUp methods.\nmirrored_strategy_with_cpu_1_and_2 = combinations.NamedDistribution(\n \"Mirrored2CPU\",\n lambda: _mirrored_strategy_with_collective_key_base([\"/cpu:1\", \"/cpu:2\"]))\nmirrored_strategy_with_cpu_1_and_2.__doc__ = (\n \"\"\"Mirrored strategy with 2 virtual CPUs.\n\n Should set up logical devices before use\n \"\"\")\ncentral_storage_strategy_with_two_gpus = combinations.NamedDistribution(\n \"CentralStorage2GPUs\",\n lambda: CentralStorageStrategy([\"/gpu:0\", \"/gpu:1\"]),\n required_gpus=2)\ncentral_storage_strategy_with_gpu_and_cpu = combinations.NamedDistribution(\n \"CentralStorageCPUAndGPU\",\n lambda: CentralStorageStrategy([\"/gpu:0\", \"/cpu:0\"]),\n required_gpus=1)\n# chief + 1 worker, with CPU.\nmulti_worker_mirrored_2x1_cpu = combinations.NamedDistribution(\n \"MultiWorkerMirrored2x1CPU\",\n _get_multi_worker_mirrored_creator(required_gpus=0),\n has_chief=True,\n num_workers=1,\n pool_runner_fn=_two_worker_pool,\n no_xla=True,\n)\n# chief + 1 worker, with 1 GPU each.\nmulti_worker_mirrored_2x1_gpu = combinations.NamedDistribution(\n \"MultiWorkerMirrored2x1GPU\",\n _get_multi_worker_mirrored_creator(required_gpus=1),\n has_chief=True,\n num_workers=1,\n required_gpus=1,\n pool_runner_fn=_two_worker_pool,\n no_xla=True,\n)\n# chief + 1 worker, with 2 GPU each.\nmulti_worker_mirrored_2x2_gpu = combinations.NamedDistribution(\n \"MultiWorkerMirrored2x2GPU\",\n _get_multi_worker_mirrored_creator(required_gpus=2),\n has_chief=True,\n num_workers=1,\n required_gpus=2,\n pool_runner_fn=_two_worker_pool,\n no_xla=True,\n)\nmulti_worker_mirrored_2x2_gpu_no_merge_call = combinations.NamedDistribution(\n \"MultiWorkerMirrored2x2GPUNoMergeCall\",\n _get_multi_worker_mirrored_creator(\n required_gpus=2, use_merge_call=False),\n has_chief=True,\n num_workers=1,\n required_physical_gpus=2,\n pool_runner_fn=_two_worker_pool,\n no_xla=True,\n)\n# chief + 3 workers, with CPU.\nmulti_worker_mirrored_4x1_cpu = combinations.NamedDistribution(\n \"MultiWorkerMirrored4x1CPU\",\n _get_multi_worker_mirrored_creator(required_gpus=0),\n has_chief=True,\n num_workers=3,\n pool_runner_fn=_four_worker_pool,\n no_xla=True,\n)\n\nparameter_server_strategy_3worker_2ps_cpu = combinations.NamedDistribution(\n \"ParameterServer3Worker2PSCPU\",\n _get_ps_strategy_creator(num_workers=3, num_ps=2),\n)\n\nparameter_server_strategy_1worker_2ps_cpu = combinations.NamedDistribution(\n \"ParameterServer1Worker2PSCPU\",\n _get_ps_strategy_creator(num_workers=1, num_ps=2),\n)\n\nparameter_server_strategy_3worker_2ps_1gpu = combinations.NamedDistribution(\n \"ParameterServer3Worker2PS1GPU\",\n _get_ps_strategy_creator(num_workers=3, num_ps=2, required_gpus=1),\n required_gpus=1,\n)\n\nparameter_server_strategy_1worker_2ps_1gpu = combinations.NamedDistribution(\n \"ParameterServer1Worker2PS1GPU\",\n _get_ps_strategy_creator(num_workers=1, num_ps=2, required_gpus=1),\n required_gpus=1,\n)\n\ngraph_and_eager_modes = [\"graph\", \"eager\"]\n\n\n# TODO(crccw): remove after tf-nightly picks up the new API.\ndef set_virtual_cpus_to_at_least(num_virtual_cpus):\n test_util.set_logical_devices_to_at_least(\"CPU\", num_virtual_cpus)\n\n\nstrategies_minus_tpu = [\n default_strategy,\n one_device_strategy,\n one_device_strategy_gpu,\n mirrored_strategy_with_gpu_and_cpu,\n mirrored_strategy_with_two_gpus,\n central_storage_strategy_with_gpu_and_cpu,\n]\n\nstrategies_minus_default_and_tpu = [\n one_device_strategy,\n one_device_strategy_gpu,\n mirrored_strategy_with_gpu_and_cpu,\n mirrored_strategy_with_two_gpus,\n]\n\ntpu_strategies = [\n tpu_strategy, # steps_per_run=2\n tpu_strategy_one_step,\n tpu_strategy_packed_var,\n cloud_tpu_strategy,\n]\n\nall_strategies_minus_default = strategies_minus_default_and_tpu + tpu_strategies\n\nall_strategies = strategies_minus_tpu + tpu_strategies\n\ntwo_replica_strategies = [\n mirrored_strategy_with_gpu_and_cpu,\n mirrored_strategy_with_two_gpus,\n multi_worker_mirrored_2x1_cpu,\n multi_worker_mirrored_2x1_gpu,\n tpu_strategy, # steps_per_run=2\n tpu_strategy_one_step,\n central_storage_strategy_with_gpu_and_cpu,\n]\n\nfour_replica_strategies = [\n multi_worker_mirrored_2x2_gpu,\n multi_worker_mirrored_4x1_cpu,\n]\n\n# TODO(b/159831907): replace with two_replica_strategies after the tests using\n# it work with MWMS.\nmultidevice_strategies = [\n mirrored_strategy_with_gpu_and_cpu,\n mirrored_strategy_with_two_gpus,\n tpu_strategy, # steps_per_run=2\n tpu_strategy_one_step\n]\n\nmultiworker_strategies = [\n multi_worker_mirrored_2x1_cpu, multi_worker_mirrored_2x1_gpu,\n multi_worker_mirrored_2x2_gpu\n]\n\n\ndef strategy_minus_tpu_combinations():\n return combinations.combine(\n distribution=strategies_minus_tpu, mode=[\"graph\", \"eager\"])\n\n\ndef tpu_strategy_combinations():\n return combinations.combine(distribution=tpu_strategies, mode=[\"graph\"])\n\n\ndef all_strategy_combinations():\n return strategy_minus_tpu_combinations() + tpu_strategy_combinations()\n\n\ndef all_strategy_minus_default_and_tpu_combinations():\n return combinations.combine(\n distribution=[\n one_device_strategy, one_device_strategy_gpu,\n mirrored_strategy_with_gpu_and_cpu, mirrored_strategy_with_two_gpus\n ],\n mode=[\"graph\", \"eager\"])\n\n\ndef all_strategy_combinations_minus_default():\n return (all_strategy_minus_default_and_tpu_combinations() +\n tpu_strategy_combinations())\n\n\ntf_export(\n _TF_INTERNAL_API_PREFIX + \"central_storage_strategy_with_gpu_and_cpu\",\n v1=[]).export_constant(__name__,\n \"central_storage_strategy_with_gpu_and_cpu\")\ntf_export(\n _TF_INTERNAL_API_PREFIX + \"central_storage_strategy_with_two_gpus\",\n v1=[]).export_constant(__name__, \"central_storage_strategy_with_two_gpus\")\ntf_export(\n _TF_INTERNAL_API_PREFIX + \"cloud_tpu_strategy\",\n v1=[]).export_constant(__name__, \"cloud_tpu_strategy\")\ntf_export(\n _TF_INTERNAL_API_PREFIX + \"default_strategy\",\n v1=[]).export_constant(__name__, \"default_strategy\")\ntf_export(\n _TF_INTERNAL_API_PREFIX + \"mirrored_strategy_with_cpu_1_and_2\",\n v1=[]).export_constant(__name__, \"mirrored_strategy_with_cpu_1_and_2\")\ntf_export(\n _TF_INTERNAL_API_PREFIX + \"mirrored_strategy_with_gpu_and_cpu\",\n v1=[]).export_constant(__name__, \"mirrored_strategy_with_gpu_and_cpu\")\ntf_export(\n _TF_INTERNAL_API_PREFIX + \"mirrored_strategy_with_one_cpu\",\n v1=[]).export_constant(__name__, \"mirrored_strategy_with_one_cpu\")\ntf_export(\n _TF_INTERNAL_API_PREFIX + \"mirrored_strategy_with_one_gpu\",\n v1=[]).export_constant(__name__, \"mirrored_strategy_with_one_gpu\")\ntf_export(\n _TF_INTERNAL_API_PREFIX + \"mirrored_strategy_with_two_gpus\",\n v1=[]).export_constant(__name__, \"mirrored_strategy_with_two_gpus\")\ntf_export(\n _TF_INTERNAL_API_PREFIX + \"mirrored_strategy_with_two_gpus_no_merge_call\",\n v1=[]).export_constant(__name__,\n \"mirrored_strategy_with_two_gpus_no_merge_call\")\ntf_export(\n _TF_INTERNAL_API_PREFIX + \"multi_worker_mirrored_2x1_cpu\",\n v1=[]).export_constant(__name__, \"multi_worker_mirrored_2x1_cpu\")\ntf_export(\n _TF_INTERNAL_API_PREFIX + \"multi_worker_mirrored_2x1_gpu\",\n v1=[]).export_constant(__name__, \"multi_worker_mirrored_2x1_gpu\")\ntf_export(\n _TF_INTERNAL_API_PREFIX + \"multi_worker_mirrored_2x2_gpu\",\n v1=[]).export_constant(__name__, \"multi_worker_mirrored_2x2_gpu\")\ntf_export(\n _TF_INTERNAL_API_PREFIX + \"multi_worker_mirrored_2x2_gpu_no_merge_call\",\n v1=[]).export_constant(__name__,\n \"multi_worker_mirrored_2x2_gpu_no_merge_call\")\ntf_export(\n _TF_INTERNAL_API_PREFIX + \"one_device_strategy\",\n v1=[]).export_constant(__name__, \"one_device_strategy\")\ntf_export(\n _TF_INTERNAL_API_PREFIX + \"one_device_strategy_gpu\",\n v1=[]).export_constant(__name__, \"one_device_strategy_gpu\")\ntf_export(\n _TF_INTERNAL_API_PREFIX + \"tpu_strategy\",\n v1=[]).export_constant(__name__, \"tpu_strategy\")\ntf_export(\n _TF_INTERNAL_API_PREFIX + \"parameter_server_strategy_3worker_2ps_cpu\",\n v1=[]).export_constant(__name__,\n \"parameter_server_strategy_3worker_2ps_cpu\")\ntf_export(\n _TF_INTERNAL_API_PREFIX + \"parameter_server_strategy_1worker_2ps_cpu\",\n v1=[]).export_constant(__name__,\n \"parameter_server_strategy_1worker_2ps_cpu\")\ntf_export(\n _TF_INTERNAL_API_PREFIX + \"parameter_server_strategy_3worker_2ps_1gpu\",\n v1=[]).export_constant(__name__,\n \"parameter_server_strategy_3worker_2ps_1gpu\")\ntf_export(\n _TF_INTERNAL_API_PREFIX + \"parameter_server_strategy_1worker_2ps_1gpu\",\n v1=[]).export_constant(__name__,\n \"parameter_server_strategy_1worker_2ps_1gpu\")\ntf_export(\n _TF_INTERNAL_API_PREFIX + \"tpu_strategy_one_core\",\n v1=[]).export_constant(__name__, \"tpu_strategy_one_core\")\ntf_export(\n _TF_INTERNAL_API_PREFIX + \"tpu_strategy_packed_var\",\n v1=[]).export_constant(__name__, \"tpu_strategy_packed_var\")\n", "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for the private `MatchingFilesDataset`.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport shutil\nimport tempfile\n\nfrom absl.testing import parameterized\n\nfrom tensorflow.python.data.experimental.ops import matching_files\nfrom tensorflow.python.data.kernel_tests import checkpoint_test_base\nfrom tensorflow.python.data.kernel_tests import test_base\nfrom tensorflow.python.framework import combinations\nfrom tensorflow.python.framework import errors\nfrom tensorflow.python.platform import test\nfrom tensorflow.python.util import compat\n\n\nclass MatchingFilesDatasetTest(test_base.DatasetTestBase,\n parameterized.TestCase):\n\n def setUp(self):\n super(MatchingFilesDatasetTest, self).setUp()\n self.tmp_dir = tempfile.mkdtemp()\n\n def tearDown(self):\n shutil.rmtree(self.tmp_dir, ignore_errors=True)\n super(MatchingFilesDatasetTest, self).tearDown()\n\n def _touchTempFiles(self, filenames):\n for filename in filenames:\n open(os.path.join(self.tmp_dir, filename), 'a').close()\n\n @combinations.generate(test_base.default_test_combinations())\n def testNonExistingDirectory(self):\n \"\"\"Test the MatchingFiles dataset with a non-existing directory.\"\"\"\n\n self.tmp_dir = os.path.join(self.tmp_dir, 'nonexistingdir')\n dataset = matching_files.MatchingFilesDataset(\n os.path.join(self.tmp_dir, '*'))\n self.assertDatasetProduces(\n dataset, expected_error=(errors.NotFoundError, ''))\n\n @combinations.generate(test_base.default_test_combinations())\n def testEmptyDirectory(self):\n \"\"\"Test the MatchingFiles dataset with an empty directory.\"\"\"\n\n dataset = matching_files.MatchingFilesDataset(\n os.path.join(self.tmp_dir, '*'))\n self.assertDatasetProduces(\n dataset, expected_error=(errors.NotFoundError, ''))\n\n @combinations.generate(test_base.default_test_combinations())\n def testSimpleDirectory(self):\n \"\"\"Test the MatchingFiles dataset with a simple directory.\"\"\"\n\n filenames = ['a', 'b', 'c']\n self._touchTempFiles(filenames)\n\n dataset = matching_files.MatchingFilesDataset(\n os.path.join(self.tmp_dir, '*'))\n self.assertDatasetProduces(\n dataset,\n expected_output=[\n compat.as_bytes(os.path.join(self.tmp_dir, filename))\n for filename in filenames\n ],\n assert_items_equal=True)\n\n @combinations.generate(test_base.default_test_combinations())\n def testFileSuffixes(self):\n \"\"\"Test the MatchingFiles dataset using the suffixes of filename.\"\"\"\n\n filenames = ['a.txt', 'b.py', 'c.py', 'd.pyc']\n self._touchTempFiles(filenames)\n\n dataset = matching_files.MatchingFilesDataset(\n os.path.join(self.tmp_dir, '*.py'))\n self.assertDatasetProduces(\n dataset,\n expected_output=[\n compat.as_bytes(os.path.join(self.tmp_dir, filename))\n for filename in filenames[1:-1]\n ],\n assert_items_equal=True)\n\n @combinations.generate(test_base.default_test_combinations())\n def testFileMiddles(self):\n \"\"\"Test the MatchingFiles dataset using the middles of filename.\"\"\"\n\n filenames = ['aa.txt', 'bb.py', 'bbc.pyc', 'cc.pyc']\n self._touchTempFiles(filenames)\n\n dataset = matching_files.MatchingFilesDataset(\n os.path.join(self.tmp_dir, 'b*.py*'))\n self.assertDatasetProduces(\n dataset,\n expected_output=[\n compat.as_bytes(os.path.join(self.tmp_dir, filename))\n for filename in filenames[1:3]\n ],\n assert_items_equal=True)\n\n @combinations.generate(test_base.default_test_combinations())\n def testNestedDirectories(self):\n \"\"\"Test the MatchingFiles dataset with nested directories.\"\"\"\n\n filenames = []\n width = 8\n depth = 4\n for i in range(width):\n for j in range(depth):\n new_base = os.path.join(self.tmp_dir, str(i),\n *[str(dir_name) for dir_name in range(j)])\n os.makedirs(new_base)\n child_files = ['a.py', 'b.pyc'] if j < depth - 1 else ['c.txt', 'd.log']\n for f in child_files:\n filename = os.path.join(new_base, f)\n filenames.append(filename)\n open(filename, 'w').close()\n\n patterns = [\n os.path.join(self.tmp_dir, os.path.join(*['**' for _ in range(depth)]),\n suffix) for suffix in ['*.txt', '*.log']\n ]\n\n dataset = matching_files.MatchingFilesDataset(patterns)\n next_element = self.getNext(dataset)\n expected_filenames = [\n compat.as_bytes(filename)\n for filename in filenames\n if filename.endswith('.txt') or filename.endswith('.log')\n ]\n actual_filenames = []\n while True:\n try:\n actual_filenames.append(compat.as_bytes(self.evaluate(next_element())))\n except errors.OutOfRangeError:\n break\n\n self.assertItemsEqual(expected_filenames, actual_filenames)\n\n\nclass MatchingFilesDatasetCheckpointTest(\n checkpoint_test_base.CheckpointTestBase, parameterized.TestCase):\n\n def _build_iterator_graph(self, test_patterns):\n return matching_files.MatchingFilesDataset(test_patterns)\n\n @combinations.generate(test_base.default_test_combinations())\n def testMatchingFilesCore(self):\n tmp_dir = tempfile.mkdtemp()\n width = 16\n depth = 8\n for i in range(width):\n for j in range(depth):\n new_base = os.path.join(tmp_dir, str(i),\n *[str(dir_name) for dir_name in range(j)])\n if not os.path.exists(new_base):\n os.makedirs(new_base)\n child_files = ['a.py', 'b.pyc'] if j < depth - 1 else ['c.txt', 'd.log']\n for f in child_files:\n filename = os.path.join(new_base, f)\n open(filename, 'w').close()\n\n patterns = [\n os.path.join(tmp_dir, os.path.join(*['**'\n for _ in range(depth)]), suffix)\n for suffix in ['*.txt', '*.log']\n ]\n\n num_outputs = width * len(patterns)\n self.run_core_tests(lambda: self._build_iterator_graph(patterns),\n num_outputs)\n\n shutil.rmtree(tmp_dir, ignore_errors=True)\n\n\nif __name__ == '__main__':\n test.main()\n", "# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n# pylint: disable=protected-access\n\"\"\"Home of the `Sequential` model.\"\"\"\n\nimport copy\nimport warnings\n\nfrom tensorflow.python import tf2\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import tensor_util\nfrom tensorflow.python.keras import layers as layer_module\nfrom tensorflow.python.keras.engine import base_layer\nfrom tensorflow.python.keras.engine import functional\nfrom tensorflow.python.keras.engine import input_layer\nfrom tensorflow.python.keras.engine import training_utils\nfrom tensorflow.python.keras.saving.saved_model import model_serialization\nfrom tensorflow.python.keras.utils import generic_utils\nfrom tensorflow.python.keras.utils import layer_utils\nfrom tensorflow.python.keras.utils import tf_inspect\nfrom tensorflow.python.keras.utils import tf_utils\nfrom tensorflow.python.module import module\nfrom tensorflow.python.ops.numpy_ops import np_arrays\nfrom tensorflow.python.platform import tf_logging as logging\nfrom tensorflow.python.training.tracking import base as trackable\nfrom tensorflow.python.util import nest\nfrom tensorflow.python.util.tf_export import keras_export\n\n\nSINGLE_LAYER_OUTPUT_ERROR_MSG = ('All layers in a Sequential model should have '\n 'a single output tensor. For multi-output '\n 'layers, use the functional API.')\n\n\n@keras_export('keras.Sequential', 'keras.models.Sequential')\nclass Sequential(functional.Functional):\n \"\"\"`Sequential` groups a linear stack of layers into a `tf.keras.Model`.\n\n `Sequential` provides training and inference features on this model.\n\n Examples:\n\n >>> # Optionally, the first layer can receive an `input_shape` argument:\n >>> model = tf.keras.Sequential()\n >>> model.add(tf.keras.layers.Dense(8, input_shape=(16,)))\n >>> # Afterwards, we do automatic shape inference:\n >>> model.add(tf.keras.layers.Dense(4))\n\n >>> # This is identical to the following:\n >>> model = tf.keras.Sequential()\n >>> model.add(tf.keras.Input(shape=(16,)))\n >>> model.add(tf.keras.layers.Dense(8))\n\n >>> # Note that you can also omit the `input_shape` argument.\n >>> # In that case the model doesn't have any weights until the first call\n >>> # to a training/evaluation method (since it isn't yet built):\n >>> model = tf.keras.Sequential()\n >>> model.add(tf.keras.layers.Dense(8))\n >>> model.add(tf.keras.layers.Dense(4))\n >>> # model.weights not created yet\n\n >>> # Whereas if you specify the input shape, the model gets built\n >>> # continuously as you are adding layers:\n >>> model = tf.keras.Sequential()\n >>> model.add(tf.keras.layers.Dense(8, input_shape=(16,)))\n >>> model.add(tf.keras.layers.Dense(4))\n >>> len(model.weights)\n 4\n\n >>> # When using the delayed-build pattern (no input shape specified), you can\n >>> # choose to manually build your model by calling\n >>> # `build(batch_input_shape)`:\n >>> model = tf.keras.Sequential()\n >>> model.add(tf.keras.layers.Dense(8))\n >>> model.add(tf.keras.layers.Dense(4))\n >>> model.build((None, 16))\n >>> len(model.weights)\n 4\n\n ```python\n # Note that when using the delayed-build pattern (no input shape specified),\n # the model gets built the first time you call `fit`, `eval`, or `predict`,\n # or the first time you call the model on some input data.\n model = tf.keras.Sequential()\n model.add(tf.keras.layers.Dense(8))\n model.add(tf.keras.layers.Dense(1))\n model.compile(optimizer='sgd', loss='mse')\n # This builds the model for the first time:\n model.fit(x, y, batch_size=32, epochs=10)\n ```\n \"\"\"\n\n @trackable.no_automatic_dependency_tracking\n def __init__(self, layers=None, name=None):\n \"\"\"Creates a `Sequential` model instance.\n\n Args:\n layers: Optional list of layers to add to the model.\n name: Optional name for the model.\n \"\"\"\n # Skip the init in FunctionalModel since model doesn't have input/output yet\n super(functional.Functional, self).__init__( # pylint: disable=bad-super-call\n name=name, autocast=False)\n base_layer.keras_api_gauge.get_cell('Sequential').set(True)\n self.supports_masking = True\n self._compute_output_and_mask_jointly = True\n self._auto_track_sub_layers = False\n self._inferred_input_shape = None\n self._has_explicit_input_shape = False\n self._input_dtype = None\n self._layer_call_argspecs = {}\n self._created_nodes = set()\n # Flag that indicate whether the sequential network topology has been\n # created. It is false when there isn't any layer, or the layers doesn't\n # have input shape.\n self._graph_initialized = False\n\n # Unfortunately some Sequential models using custom layers or FeatureColumn\n # layers have multiple inputs. This is fundamentally incompatible with\n # most of the Sequential API, and we have to disable a number of features\n # for such models.\n self._use_legacy_deferred_behavior = False\n\n # Add to the model any layers passed to the constructor.\n if layers:\n if not isinstance(layers, (list, tuple)):\n layers = [layers]\n for layer in layers:\n self.add(layer)\n\n @property\n def layers(self):\n # Historically, `sequential.layers` only returns layers that were added\n # via `add`, and omits the auto-generated `InputLayer` that comes at the\n # bottom of the stack.\n # `Trackable` manages the `_layers` attributes and does filtering\n # over it.\n layers = super(Sequential, self).layers\n if layers and isinstance(layers[0], input_layer.InputLayer):\n return layers[1:]\n return layers[:]\n\n @trackable.no_automatic_dependency_tracking\n def add(self, layer):\n \"\"\"Adds a layer instance on top of the layer stack.\n\n Args:\n layer: layer instance.\n\n Raises:\n TypeError: If `layer` is not a layer instance.\n ValueError: In case the `layer` argument does not\n know its input shape.\n ValueError: In case the `layer` argument has\n multiple output tensors, or is already connected\n somewhere else (forbidden in `Sequential` models).\n \"\"\"\n # If we are passed a Keras tensor created by keras.Input(), we can extract\n # the input layer from its keras history and use that without any loss of\n # generality.\n if hasattr(layer, '_keras_history'):\n origin_layer = layer._keras_history[0]\n if isinstance(origin_layer, input_layer.InputLayer):\n layer = origin_layer\n logging.warning(\n 'Please add `keras.layers.InputLayer` instead of `keras.Input` to '\n 'Sequential model. `keras.Input` is intended to be used by '\n 'Functional model.')\n\n if isinstance(layer, module.Module):\n if not isinstance(layer, base_layer.Layer):\n layer = functional.ModuleWrapper(layer)\n else:\n raise TypeError('The added layer must be '\n 'an instance of class Layer. '\n 'Found: ' + str(layer))\n\n tf_utils.assert_no_legacy_layers([layer])\n if not self._is_layer_name_unique(layer):\n raise ValueError('All layers added to a Sequential model '\n 'should have unique names. Name \"%s\" is already the name'\n ' of a layer in this model. Update the `name` argument '\n 'to pass a unique name.' % (layer.name,))\n\n self.built = False\n set_inputs = False\n self._maybe_create_attribute('_self_tracked_trackables', [])\n if not self._self_tracked_trackables:\n if isinstance(layer, input_layer.InputLayer):\n # Case where the user passes an Input or InputLayer layer via `add`.\n set_inputs = True\n else:\n batch_shape, dtype = training_utils.get_input_shape_and_dtype(layer)\n if batch_shape:\n # Instantiate an input layer.\n x = input_layer.Input(\n batch_shape=batch_shape, dtype=dtype, name=layer.name + '_input')\n # This will build the current layer\n # and create the node connecting the current layer\n # to the input layer we just created.\n layer(x)\n set_inputs = True\n\n if set_inputs:\n outputs = nest.flatten(layer._inbound_nodes[-1].outputs)\n if len(outputs) != 1:\n raise ValueError(SINGLE_LAYER_OUTPUT_ERROR_MSG)\n self.outputs = outputs\n self.inputs = layer_utils.get_source_inputs(self.outputs[0])\n self.built = True\n self._has_explicit_input_shape = True\n\n elif self.outputs:\n # If the model is being built continuously on top of an input layer:\n # refresh its output.\n output_tensor = layer(self.outputs[0])\n if len(nest.flatten(output_tensor)) != 1:\n raise ValueError(SINGLE_LAYER_OUTPUT_ERROR_MSG)\n self.outputs = [output_tensor]\n self.built = True\n\n if set_inputs or self._graph_initialized:\n self._init_graph_network(self.inputs, self.outputs)\n self._graph_initialized = True\n else:\n self._self_tracked_trackables.append(layer)\n self._handle_deferred_layer_dependencies([layer])\n\n self._layer_call_argspecs[layer] = tf_inspect.getfullargspec(layer.call)\n\n @trackable.no_automatic_dependency_tracking\n def pop(self):\n \"\"\"Removes the last layer in the model.\n\n Raises:\n TypeError: if there are no layers in the model.\n \"\"\"\n if not self.layers:\n raise TypeError('There are no layers in the model.')\n\n layer = self._self_tracked_trackables.pop()\n self._layer_call_argspecs.pop(layer)\n if not self.layers:\n self.outputs = None\n self.inputs = None\n self.built = False\n self._inferred_input_shape = None\n self._has_explicit_input_shape = False\n self._graph_initialized = False\n elif self._graph_initialized:\n self.layers[-1]._outbound_nodes = []\n self.outputs = [self.layers[-1].output]\n self._init_graph_network(self.inputs, self.outputs)\n self.built = True\n\n @trackable.no_automatic_dependency_tracking\n def _build_graph_network_for_inferred_shape(self,\n input_shape,\n input_dtype=None):\n if input_shape is None or not self.layers:\n return\n if not tf2.enabled() or not ops.executing_eagerly_outside_functions():\n # This behavior is disabled in V1 or when eager execution is disabled.\n return\n if (not self._has_explicit_input_shape and\n not self._use_legacy_deferred_behavior):\n # Determine whether the input shape is novel, i.e. whether the model\n # should be rebuilt.\n input_shape = tuple(input_shape)\n if self._inferred_input_shape is None:\n new_shape = input_shape\n else:\n new_shape = relax_input_shape(self._inferred_input_shape, input_shape)\n if (new_shape is not None and new_shape != self._inferred_input_shape):\n # A novel shape has been received: we need to rebuild the model.\n # In case we are inside a graph function, we step out of it.\n with ops.init_scope():\n inputs = input_layer.Input(\n batch_shape=new_shape,\n dtype=input_dtype,\n name=self.layers[0].name + '_input')\n layer_input = inputs\n created_nodes = set()\n for layer in self.layers:\n # Clear nodes previously created via this method. This prevents\n # node accumulation and ensures that e.g. `layer.output` is\n # always connected to `model.inputs`\n # (this is important e.g. for the feature extraction use case).\n # We don't just do `layer._inbound_nodes = []` in order\n # not to break shared layers added to Sequential models (which is\n # technically illegal as per the `add()` docstring,\n # but wasn't previously disabled).\n clear_previously_created_nodes(layer, self._created_nodes)\n try:\n # Create Functional API connection by calling the current layer\n layer_output = layer(layer_input)\n except: # pylint:disable=bare-except\n # Functional API calls may fail for a number of reasons:\n # 1) The layer may be buggy. In this case it will be easier for\n # the user to debug if we fail on the first call on concrete data,\n # instead of our own call on a symbolic input.\n # 2) The layer is dynamic (graph-incompatible) and hasn't\n # overridden `compute_output_shape`. In this case, it is\n # impossible to build a graph network.\n # 3) The layer is otherwise incompatible with the Functional API\n # (e.g. this is the case for some probabilistic layers that rely\n # on hacks and that do not return tensors).\n # In all these cases, we should avoid creating a graph network\n # (or we simply can't).\n self._use_legacy_deferred_behavior = True\n return\n if len(nest.flatten(layer_output)) != 1:\n raise ValueError(SINGLE_LAYER_OUTPUT_ERROR_MSG)\n # Keep track of nodes just created above\n track_nodes_created_by_last_call(layer, created_nodes)\n layer_input = layer_output\n outputs = layer_output\n self._created_nodes = created_nodes\n try:\n # Initialize a graph Network. This call will never fail for\n # a stack of valid Keras layers.\n # However some users have layers that are fundamentally incompatible\n # with the Functional API, which do not return tensors. In this\n # case, we fall back to the legacy deferred behavior.\n # TODO(fchollet): consider raising here, as we should not be\n # supporting such layers.\n self._init_graph_network(inputs, outputs)\n self._graph_initialized = True\n except: # pylint:disable=bare-except\n self._use_legacy_deferred_behavior = True\n self._inferred_input_shape = new_shape\n\n @generic_utils.default\n def build(self, input_shape=None):\n if self._graph_initialized:\n self._init_graph_network(self.inputs, self.outputs)\n else:\n if input_shape is None:\n raise ValueError('You must provide an `input_shape` argument.')\n self._build_graph_network_for_inferred_shape(input_shape)\n if not self.built:\n input_shape = tuple(input_shape)\n self._build_input_shape = input_shape\n super(Sequential, self).build(input_shape)\n self.built = True\n\n def call(self, inputs, training=None, mask=None): # pylint: disable=redefined-outer-name\n # If applicable, update the static input shape of the model.\n if not self._has_explicit_input_shape:\n if not tensor_util.is_tf_type(inputs) and not isinstance(\n inputs, np_arrays.ndarray):\n # This is a Sequential with mutiple inputs. This is technically an\n # invalid use case of Sequential, but we tolerate it for backwards\n # compatibility.\n self._use_legacy_deferred_behavior = True\n self._build_input_shape = nest.map_structure(_get_shape_tuple, inputs)\n if tf2.enabled():\n logging.warning('Layers in a Sequential model should only have a '\n 'single input tensor, but we receive a %s input: %s'\n '\\nConsider rewriting this model with the Functional '\n 'API.' % (type(inputs), inputs))\n else:\n self._build_graph_network_for_inferred_shape(inputs.shape, inputs.dtype)\n\n if self._graph_initialized:\n if not self.built:\n self._init_graph_network(self.inputs, self.outputs)\n return super(Sequential, self).call(inputs, training=training, mask=mask)\n\n outputs = inputs # handle the corner case where self.layers is empty\n for layer in self.layers:\n # During each iteration, `inputs` are the inputs to `layer`, and `outputs`\n # are the outputs of `layer` applied to `inputs`. At the end of each\n # iteration `inputs` is set to `outputs` to prepare for the next layer.\n kwargs = {}\n argspec = self._layer_call_argspecs[layer].args\n if 'mask' in argspec:\n kwargs['mask'] = mask\n if 'training' in argspec:\n kwargs['training'] = training\n\n outputs = layer(inputs, **kwargs)\n\n if len(nest.flatten(outputs)) != 1:\n raise ValueError(SINGLE_LAYER_OUTPUT_ERROR_MSG)\n # `outputs` will be the inputs to the next layer.\n inputs = outputs\n mask = getattr(outputs, '_keras_mask', None)\n return outputs\n\n def compute_output_shape(self, input_shape):\n shape = input_shape\n for layer in self.layers:\n shape = layer.compute_output_shape(shape)\n return shape\n\n def compute_mask(self, inputs, mask):\n # TODO(omalleyt): b/123540974 This function is not really safe to call\n # by itself because it will duplicate any updates and losses in graph\n # mode by `call`ing the Layers again.\n outputs = self.call(inputs, mask=mask)\n return getattr(outputs, '_keras_mask', None)\n\n def predict_proba(self, x, batch_size=32, verbose=0):\n \"\"\"Generates class probability predictions for the input samples.\n\n The input samples are processed batch by batch.\n\n Args:\n x: input data, as a Numpy array or list of Numpy arrays\n (if the model has multiple inputs).\n batch_size: integer.\n verbose: verbosity mode, 0 or 1.\n\n Returns:\n A Numpy array of probability predictions.\n \"\"\"\n warnings.warn('`model.predict_proba()` is deprecated and '\n 'will be removed after 2021-01-01. '\n 'Please use `model.predict()` instead.')\n preds = self.predict(x, batch_size, verbose)\n if preds.min() < 0. or preds.max() > 1.:\n logging.warning('Network returning invalid probability values. '\n 'The last layer might not normalize predictions '\n 'into probabilities '\n '(like softmax or sigmoid would).')\n return preds\n\n def predict_classes(self, x, batch_size=32, verbose=0):\n \"\"\"Generate class predictions for the input samples.\n\n The input samples are processed batch by batch.\n\n Args:\n x: input data, as a Numpy array or list of Numpy arrays\n (if the model has multiple inputs).\n batch_size: integer.\n verbose: verbosity mode, 0 or 1.\n\n Returns:\n A numpy array of class predictions.\n \"\"\"\n warnings.warn('`model.predict_classes()` is deprecated and '\n 'will be removed after 2021-01-01. '\n 'Please use instead:'\n '* `np.argmax(model.predict(x), axis=-1)`, '\n ' if your model does multi-class classification '\n ' (e.g. if it uses a `softmax` last-layer activation).'\n '* `(model.predict(x) > 0.5).astype(\"int32\")`, '\n ' if your model does binary classification '\n ' (e.g. if it uses a `sigmoid` last-layer activation).')\n proba = self.predict(x, batch_size=batch_size, verbose=verbose)\n if proba.shape[-1] > 1:\n return proba.argmax(axis=-1)\n else:\n return (proba > 0.5).astype('int32')\n\n def get_config(self):\n layer_configs = []\n for layer in super(Sequential, self).layers:\n # `super().layers` include the InputLayer if available (it is filtered out\n # of `self.layers`). Note that `self._self_tracked_trackables` is managed\n # by the tracking infrastructure and should not be used.\n layer_configs.append(generic_utils.serialize_keras_object(layer))\n config = {\n 'name': self.name,\n 'layers': copy.deepcopy(layer_configs)\n }\n if not self._is_graph_network and self._build_input_shape is not None:\n config['build_input_shape'] = self._build_input_shape\n return config\n\n @classmethod\n def from_config(cls, config, custom_objects=None):\n if 'name' in config:\n name = config['name']\n build_input_shape = config.get('build_input_shape')\n layer_configs = config['layers']\n else:\n name = None\n build_input_shape = None\n layer_configs = config\n model = cls(name=name)\n for layer_config in layer_configs:\n layer = layer_module.deserialize(layer_config,\n custom_objects=custom_objects)\n model.add(layer)\n if (not model.inputs and build_input_shape and\n isinstance(build_input_shape, (tuple, list))):\n model.build(build_input_shape)\n return model\n\n @property\n def input_spec(self):\n if hasattr(self, '_manual_input_spec'):\n return self._manual_input_spec\n if self.layers and hasattr(self.layers[0], 'input_spec'):\n return self.layers[0].input_spec\n return None\n\n @input_spec.setter\n def input_spec(self, value):\n self._manual_input_spec = value\n\n @property\n def _trackable_saved_model_saver(self):\n return model_serialization.SequentialSavedModelSaver(self)\n\n def _is_layer_name_unique(self, layer):\n for ref_layer in self.layers:\n if layer.name == ref_layer.name and ref_layer is not layer:\n return False\n return True\n\n def _assert_weights_created(self):\n if self._graph_initialized:\n return\n # When the graph has not been initialized, use the Model's implementation to\n # to check if the weights has been created.\n super(functional.Functional, self)._assert_weights_created() # pylint: disable=bad-super-call\n\n\ndef _get_shape_tuple(t):\n if hasattr(t, 'shape'):\n shape = t.shape\n if isinstance(shape, tuple):\n return shape\n if shape.rank is not None:\n return tuple(shape.as_list())\n return None\n return None\n\n\ndef relax_input_shape(shape_1, shape_2):\n if shape_1 is None or shape_2 is None:\n return None\n if len(shape_1) != len(shape_2):\n return None\n return tuple(None if d1 != d2 else d1 for d1, d2 in zip(shape_1, shape_2))\n\n\ndef clear_previously_created_nodes(layer, created_nodes):\n \"\"\"Remove nodes from `created_nodes` from the layer's inbound_nodes.\"\"\"\n for node in layer._inbound_nodes:\n prev_layers = node.inbound_layers\n for prev_layer in nest.flatten(prev_layers):\n prev_layer._outbound_nodes = [\n n for n in prev_layer._outbound_nodes\n if n not in created_nodes]\n layer._inbound_nodes = [\n n for n in layer._inbound_nodes if n not in created_nodes]\n\n\ndef track_nodes_created_by_last_call(layer, created_nodes):\n \"\"\"Adds to `created_nodes` the nodes created by the last call to `layer`.\"\"\"\n if not layer._inbound_nodes:\n return\n created_nodes.add(layer._inbound_nodes[-1])\n prev_layers = layer._inbound_nodes[-1].inbound_layers\n for prev_layer in nest.flatten(prev_layers):\n if prev_layer._outbound_nodes:\n created_nodes.add(prev_layer._outbound_nodes[-1])\n", "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for `tf.data.Dataset.repeat()`.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom absl.testing import parameterized\nimport numpy as np\n\nfrom tensorflow.python.data.kernel_tests import checkpoint_test_base\nfrom tensorflow.python.data.kernel_tests import test_base\nfrom tensorflow.python.data.ops import dataset_ops\nfrom tensorflow.python.framework import combinations\nfrom tensorflow.python.platform import test\n\n\nclass RepeatTest(test_base.DatasetTestBase, parameterized.TestCase):\n\n @combinations.generate(\n combinations.times(test_base.default_test_combinations(),\n combinations.combine(count=[0, 3, 7])))\n def testFiniteRepeat(self, count):\n \"\"\"Test a dataset that repeats its input multiple times.\"\"\"\n components = (np.array(1), np.array([1, 2, 3]), np.array(37.0))\n dataset = dataset_ops.Dataset.from_tensors(components).repeat(count)\n self.assertEqual(\n [c.shape for c in components],\n [shape for shape in dataset_ops.get_legacy_output_shapes(dataset)])\n self.assertDatasetProduces(dataset, [components] * count)\n\n @combinations.generate(test_base.default_test_combinations())\n def testInfiniteRepeat(self):\n # NOTE(mrry): There's not a good way to test that the sequence is infinite.\n components = (np.array(1), np.array([1, 2, 3]), np.array(37.0))\n dataset = dataset_ops.Dataset.from_tensors(components).repeat(-1)\n self.assertEqual(\n [c.shape for c in components],\n [shape for shape in dataset_ops.get_legacy_output_shapes(dataset)])\n get_next = self.getNext(dataset)\n for _ in range(17):\n results = self.evaluate(get_next())\n for component, result_component in zip(components, results):\n self.assertAllEqual(component, result_component)\n\n @combinations.generate(test_base.default_test_combinations())\n def testRepeatRepeat(self):\n \"\"\"Test the composition of repeat datasets.\"\"\"\n components = (np.array(1), np.array([1, 2, 3]), np.array(37.0))\n inner_count, outer_count = 7, 14\n\n dataset = dataset_ops.Dataset.from_tensors(components).repeat(\n inner_count).repeat(outer_count)\n self.assertEqual(\n [c.shape for c in components],\n [shape for shape in dataset_ops.get_legacy_output_shapes(dataset)])\n self.assertDatasetProduces(dataset,\n [components] * (inner_count * outer_count))\n\n\nclass RepeatDatasetCheckpointTest(checkpoint_test_base.CheckpointTestBase,\n parameterized.TestCase):\n\n def _build_repeat_dataset(self, count, take_count=3):\n components = (np.arange(10),)\n return dataset_ops.Dataset.from_tensor_slices(components).take(\n take_count).repeat(count)\n\n @combinations.generate(test_base.default_test_combinations())\n def testFiniteRepeat(self):\n count = 10\n self.run_core_tests(lambda: self._build_repeat_dataset(count), 3 * count)\n\n @combinations.generate(test_base.default_test_combinations())\n def testEmptyRepeat(self):\n self.run_core_tests(lambda: self._build_repeat_dataset(0), 0)\n\n @combinations.generate(test_base.default_test_combinations())\n def testInfiniteRepeat(self):\n self.verify_unused_iterator(\n lambda: self._build_repeat_dataset(-1), 10, verify_exhausted=False)\n self.verify_multiple_breaks(\n lambda: self._build_repeat_dataset(-1), 20, verify_exhausted=False)\n self.verify_reset_restored_iterator(\n lambda: self._build_repeat_dataset(-1), 20, verify_exhausted=False)\n\n # Test repeat empty dataset\n self.run_core_tests(lambda: self._build_repeat_dataset(-1, 0), 0)\n\n @combinations.generate(test_base.default_test_combinations())\n def testInvalidRepeat(self):\n with self.assertRaisesRegex(ValueError,\n \"Shape must be rank 0 but is rank 1\"):\n self.run_core_tests(lambda: self._build_repeat_dataset([1, 2], 0), 0)\n\n\nif __name__ == \"__main__\":\n test.main()\n", "# Copyright 2020 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for `tfr_gen` module.\"\"\"\n\n# pylint: disable=missing-function-docstring\n\nimport sys\n\nfrom tensorflow.compiler.mlir.python.mlir_wrapper import filecheck_wrapper as fw\nfrom tensorflow.compiler.mlir.tfr.python import composite\nfrom tensorflow.compiler.mlir.tfr.python.tfr_gen import tfr_gen_from_module as tfr_gen\nfrom tensorflow.compiler.mlir.tfr.resources import gen_test_ops as test_ops\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.ops import gen_array_ops as array_ops\nfrom tensorflow.python.ops import gen_math_ops as math_ops\nfrom tensorflow.python.platform import test\n\n\nComposite = composite.Composite\n\n#--- test fn for mlir location ---\n\n\n@Composite('TestInputNOp')\ndef _tfr_loc_test(x):\n n = 10\n x_sum = x[0]\n for i in range(1, n):\n x_sum = math_ops.Add(x_sum, x[i])\n return x_sum\n\n\n#--- test fn for tfr tensors ---\n\n\[email protected]('TestNoOp')\ndef _tfr_tensor_empty_arg():\n pass\n\n\[email protected]('TestIdentityOp')\ndef _tfr_tensor_tensor(x):\n return x\n\n\[email protected]('TestIdentityNOp')\ndef _tfr_tensor_tensor_list(x):\n return x\n\n\[email protected]('TestInputNOp')\ndef _tfr_tensor_tensor_list_get_elt(x):\n return x[1]\n\n\[email protected]('TestOutputNOp')\ndef _tfr_tensor_tensor_list_output(x):\n return [x, x]\n\n\[email protected]('TestTwoInputsOp')\ndef _tfr_tensor_tensor_list_split(x, y, pred):\n z, _ = array_ops.Split(axis=0, value=x, num_split=2)\n (y, pred) # pylint: disable=pointless-statement\n return z\n\n\[email protected]('TestTwoOutputsOp')\ndef _tfr_tensor_two_output(x):\n z = array_ops.Split(axis=0, value=x, num_split=2)\n return z[0], z[1]\n\n\[email protected]('TestNumAttrsOp')\ndef _tfr_tensor_tensor_with_cst(x1, y1, x2, y2):\n x = array_ops.OneHot(\n indices=[0, 2, -1, x1], depth=y1, on_value=True, off_value=False)\n (x, x2, y2) # pylint: disable=pointless-statement\n return\n\n#--- test fn for scf control flow ---\n\n\[email protected]('TestTwoInputsOp')\ndef _tfr_control_flow_if(x, y, pred):\n if pred:\n return x\n else:\n return y\n\n\[email protected]('TestThreeInputsOp')\ndef _tfr_control_flow_nested_if(x, y, z, select):\n if select == 'x':\n return x\n elif select == 'y':\n return y\n else:\n return z\n\n\[email protected]('TestInputNOp')\ndef _tfr_control_flow_range_for(x):\n # TODO(fengliuai): use len(x) instead\n n = 10\n x_sum = x[0]\n for i in range(1, n):\n x_sum = math_ops.Add(x_sum, x[i])\n return x_sum\n\n\[email protected]('TestInputNOp')\ndef _tfr_control_flow_tensor_list_size(ins):\n n = len(ins)\n if n == 0:\n return array_ops.Const(value=[[0, 1], [2, 3]], dtype=dtypes.int64)\n else:\n return math_ops.AddN(ins)\n\n\n#--- test fn for tf ops ---\n\n\[email protected]('TestComplexTFOp')\ndef _tfr_tf_ops_complex(lhs, rhs):\n left_padding, _ = array_ops.SplitV(\n value=lhs, size_splits=[rhs, -1], axis=0, num_split=2)\n _, right_padding = array_ops.SplitV(\n value=lhs, size_splits=[rhs, rhs], axis=1, num_split=2)\n return [left_padding, right_padding]\n\n\[email protected]('TestIdentityOp')\ndef _tfr_tf_ops_tensor(x):\n return array_ops.Identity(x)\n\n\[email protected]('TestTwoInputsOp')\ndef _tfr_tf_ops_tensors(x, y, pred):\n if pred:\n return math_ops.Add(x, y)\n else:\n return array_ops.Concat(0, [x, y])\n\n\[email protected]('TestInputNOp')\ndef _tfr_tf_ops_with_defaults(ins):\n return test_ops.TestTwoInputsOp(ins[0], ins[1])\n\n\n#--- test fn for tfr attributes ---\n\n\[email protected]('TestNumAttrsOp')\ndef _tfr_attrs_num_type(x, y, x1, y1):\n # int\n z0 = [x, y]\n z1 = x == y\n z2 = x < y\n z3 = x <= y\n z4 = x > y\n z5 = x >= y\n z6 = x != y\n z7 = x + y\n z8 = x - y\n z8 += x\n z8 += 1\n (z0, z1, z2, z3, z4, z5, z6, z7, z8) # pylint: disable=pointless-statement\n\n # float\n z9 = x1 > y1\n z10 = x1 + y1\n z11 = [x1, y1]\n (z9, z10, z11) # pylint: disable=pointless-statement\n return\n\n\[email protected]('TestNonNumAttrsOp')\ndef _tfr_attrs_tfr_type(x, y, z):\n z1 = x == y\n z2 = x == 'test'\n z3 = y == z\n (z1, z2, z3) # pylint: disable=pointless-statement\n return\n\n\n#--- test fn for shapes ---\n\n\[email protected]('TestIdentityOp')\ndef _tfr_shapes(x):\n s1 = x.shape\n s3 = x.shape.as_list()\n\n for i in range(len(s3)):\n s3[i] # pylint: disable=pointless-statement\n\n for i in range(1, len(s3), 2):\n s3[i] # pylint: disable=pointless-statement\n\n s5 = array_ops.Shape(x)\n (s1, s3, s5) # pylint: disable=pointless-statement\n return x\n\n\n#--- test fn for nested functions ---\n\n\[email protected]('TestIdentityNOp')\ndef _tfr_temp_op(x):\n return x\n\n\[email protected]('TestIdentityOp')\ndef _tfr_temp_use_op(x):\n y = _tfr_temp_op([x])\n return y[0]\n\n#--- test fn for quant built-ins ---\n\n\n# pylint: disable=undefined-variable\[email protected]('TestIdentityOp')\ndef _tfr_quant_test(x):\n y = _tfr_quant_raw_data(x)\n s, z = _tfr_quant_qparam(x)\n s = _tfr_quant_scale_factor(1.0, [s, s])\n s = _tfr_quant_scale_factor(1.0, [s])\n y = math_ops.Sub(y, z)\n qmin, qmax = _tfr_quant_act_range('RELU', 1.0, 0)\n (qmin, qmax) # pylint: disable=pointless-statement\n d = _tfr_quant_rescale(y, s, 0)\n e = math_ops.Cast(x=d, DstT=dtypes.int16)\n f = math_ops.Cast(x=e, DstT=dtypes.int8)\n return f\n\n\nclass TFRGenTestBase(test.TestCase):\n\n def _check_code(self, tfr_code, exp_tfr_code):\n return self.assertTrue(fw.check(str(tfr_code), exp_tfr_code), str(tfr_code))\n\n\nclass TFRGenTensorTest(TFRGenTestBase):\n \"\"\"MLIR Generation Tests for MLIR TFR Program.\"\"\"\n\n def test_tfr_loc(self):\n mlir_code = tfr_gen(sys.modules[__name__], '_tfr_loc', [test_ops])\n mlir_code_exp = r\"\"\"\n CHECK-LABEL: tfr.func @tf__test_input_n_op(%x: !tfr.tensor_list) -> (!tfr.tensor) {\n CHECK-NEXT: %[[n:.*]] = constant 10 : i64\n CHECK-SAME loc(\"tfr_gen_test.py\":%{{.*}}:6)\n CHECK-NEXT: %[[cst:.*]] = constant 0 : index\n CHECK-SAME loc(\"tfr_gen_test.py\":%[[sum_line:.*]]:10)\n CHECK-NEXT: %[[elt:.*]] = tfr.get_element %x[%[[cst]]] : (!tfr.tensor_list, index) -> !tfr.tensor\n CHECK-SAME loc(\"tfr_gen_test.py\":%[[sum_line]]:10)\n CHECK-NEXT: %[[cst_1:.*]] = constant 1 : i64\n CHECK-SAME loc(\"tfr_gen_test.py\":%[[for_line:.*]]:2)\n CHECK-NEXT: %[[begin:.*]] = index_cast %[[cst_1]] : i64 to index\n CHECK-SAME loc(\"tfr_gen_test.py\":%[[for_line]]:2)\n CHECK-NEXT: %[[end:.*]] = index_cast %[[n]] : i64 to index\n CHECK-SAME loc(\"tfr_gen_test.py\":%[[for_line]]:2)\n CHECK-NEXT: %[[step:.*]] = constant 1 : index\n CHECK-SAME loc(\"tfr_gen_test.py\":%[[for_line]]:2)\n CHECK-NEXT: %[[for_stmt:.*]] = scf.for %[[itr_1:.*]] = %[[begin]] to %[[end]] step %[[step]]\n CHECK-SAME: iter_args(%[[it_arg:.*]] = %[[elt]]) -> (!tfr.tensor) {\n CHECK-NEXT: %[[elt_1:.*]] = tfr.get_element %x[%itr_1] : (!tfr.tensor_list, index) -> !tfr.tensor\n CHECK-SAME loc(\"tfr_gen_test.py\":%[[add_line:.*]]:34)\n CHECK-NEXT: %[[Add:.*]] = tfr.call @tf__add(%[[it_arg]], %[[elt_1]]) : (!tfr.tensor, !tfr.tensor) -> (!tfr.tensor)\n CHECK-SAME loc(\"tfr_gen_test.py\":%[[add_line]]:12)\n CHECK-NEXT: scf.yield %[[Add]] : !tfr.tensor\n CHECK-SAME loc(unknown)\n CHECK-NEXT: }\n CHECK-SAME loc(\"tfr_gen_test.py\":%[[for_line]]:2)\n CHECK-NEXT: %{{.*}} = constant true\n CHECK-SAME loc(unknown)\n CHECK-NEXT: tfr.return %[[for_stmt]] : !tfr.tensor\n CHECK-SAME loc(unknown)\n CHECK-NEXT: }\n CHECK-SAME loc(\"tfr_gen_test.py\":%{{def_line:.*}}:0)\n \"\"\"\n self._check_code(mlir_code, mlir_code_exp)\n\n def test_tfr_tensors(self):\n mlir_code = tfr_gen(sys.modules[__name__], '_tfr_tensor', [test_ops])\n mlir_code_exp = r\"\"\"\n CHECK-LABEL: tfr.func @tf__test_no_op() -> () {\n CHECK-NEXT: tfr.return\n CHECK-NEXT: }\n\n CHECK-LABEL: tfr.func @tf__test_identity_op(%x: !tfr.tensor) -> (!tfr.tensor) {\n CHECK-NEXT: constant true\n CHECK-NEXT: tfr.return %x : !tfr.tensor\n CHECK-NEXT: }\n\n CHECK-LABEL: tfr.func @tf__test_identity_n_op(%x: !tfr.tensor_list) -> (!tfr.tensor_list) {\n CHECK-NEXT: constant true\n CHECK-NEXT: tfr.return %x : !tfr.tensor_list\n CHECK-NEXT: }\n\n CHECK-LABEL: tfr.func @tf__test_input_n_op(%x: !tfr.tensor_list) -> (!tfr.tensor) {\n CHECK-NEXT: constant true\n CHECK-NEXT: %[[index:.*]] = constant 1 : index\n CHECK-NEXT: %[[sub:.*]] = tfr.get_element %x[%cst_1] : (!tfr.tensor_list, index) -> !tfr.tensor\n CHECK-NEXT: tfr.return %[[sub]] : !tfr.tensor\n CHECK-NEXT: }\n\n CHECK-LABEL: tfr.func @tf__test_output_n_op(%x: !tfr.tensor) -> (!tfr.tensor_list) {\n CHECK-NEXT: constant true\n CHECK-NEXT: %[[list:.*]] = \"tfr.build_list\"(%x, %x) : (!tfr.tensor, !tfr.tensor) -> !tfr.tensor_list\n CHECK-NEXT: tfr.return %[[list]] : !tfr.tensor_list\n CHECK-NEXT: }\n\n CHECK-LABEL: tfr.func @tf__test_two_inputs_op(%x: !tfr.tensor, %y: !tfr.tensor, %pred: i1{tfr.name=\"pred\",tfr.default=false}) -> (!tfr.tensor) {\n CHECK-NEXT: %[[cst:.*]] = constant 0 : i64\n CHECK-NEXT: %[[cst_1:.*]] = constant 2 : i64\n CHECK-NEXT: %[[cst_2:.*]] = \"tfr.constant_tensor\"(%[[cst]]) : (i64) -> !tfr.tensor\n CHECK-NEXT: %[[Split:.*]] = tfr.call @tf__split(%[[cst_2]], %x, %[[cst_1]]) : (!tfr.tensor, !tfr.tensor, i64) -> (!tfr.tensor_list)\n CHECK-NEXT: %[[cst_4:.*]] = constant 0 : index\n CHECK-NEXT: %[[elt:.*]] = tfr.get_element %[[Split]][%idx] : (!tfr.tensor_list, index) -> !tfr.tensor\n CHECK-NEXT: %[[cst_5:.*]] = constant 1 : index\n CHECK-NEXT: %[[elt_1:.*]] = tfr.get_element %[[Split]][%idx_1] : (!tfr.tensor_list, index) -> !tfr.tensor\n CHECK-NEXT: constant true\n CHECK-NEXT: tfr.return %[[elt]] : !tfr.tensor\n CHECK-NEXT: }\n\n CHECK-LABEL: tfr.func @tf__test_two_outputs_op(%x: !tfr.tensor) -> (!tfr.tensor, !tfr.tensor) {\n CHECK-NEXT: %[[cst:.*]] = constant 0 : i64\n CHECK-NEXT: %[[cst_1:.*]] = constant 2 : i64\n CHECK-NEXT: %[[cst_2:.*]] = \"tfr.constant_tensor\"(%[[cst]]) : (i64) -> !tfr.tensor\n CHECK-NEXT: %[[Split:.*]] = tfr.call @tf__split(%[[cst_2]], %x, %[[cst_1]]) : (!tfr.tensor, !tfr.tensor, i64) -> (!tfr.tensor_list)\n CHECK-NEXT: constant true\n CHECK-NEXT: %[[cst_4:.*]] = constant 0 : index\n CHECK-NEXT: %[[elt:.*]] = tfr.get_element %[[Split]][%cst_4] : (!tfr.tensor_list, index) -> !tfr.tensor\n CHECK-NEXT: %[[cst_5:.*]] = constant 1 : index\n CHECK-NEXT: %[[elt_1:.*]] = tfr.get_element %[[Split]][%cst_5] : (!tfr.tensor_list, index) -> !tfr.tensor\n CHECK-NEXT: tfr.return %[[elt]], %[[elt_1]] : !tfr.tensor, !tfr.tensor\n CHECK-NEXT: }\n\n CHECK-LABEL: tfr.func @tf__test_num_attrs_op(%x1: i64{tfr.name=\"x1\",tfr.default=-10}, %y1: i64{tfr.name=\"y1\",tfr.default=1}, %x2: f32{tfr.name=\"x2\",tfr.default=0.0}, %y2: f32{tfr.name=\"y2\",tfr.default=-3.0}) -> () {\n CHECK-NEXT: %[[cst:.*]] = constant 0 : i64\n CHECK-NEXT: %[[cst_1:.*]] = constant 2 : i64\n CHECK-NEXT: %[[cst_2:.*]] = constant 1 : i64\n CHECK-NEXT: %[[zero:.*]] = constant 0 : i64\n CHECK-NEXT: %[[cst_3:.*]] = subi %zero, %cst_2 : i64\n CHECK-NEXT: %[[list:.*]] = \"tfr.build_list\"(%[[cst]], %[[cst_1]], %[[cst_3]], %x1) : (i64, i64, i64, i64) -> !tfr.attr\n CHECK-NEXT: %[[cst_4:.*]] = constant true\n CHECK-NEXT: %[[cst_5:.*]] = constant false\n CHECK-NEXT: %[[cst_6:.*]] = \"tfr.constant_tensor\"(%[[list]]) : (!tfr.attr) -> !tfr.tensor\n CHECK-NEXT: %[[cst_7:.*]] = \"tfr.constant_tensor\"(%y1) : (i64) -> !tfr.tensor\n CHECK-NEXT: %[[cst_8:.*]] = \"tfr.constant_tensor\"(%[[cst_4]]) : (i1) -> !tfr.tensor\n CHECK-NEXT: %[[cst_9:.*]] = \"tfr.constant_tensor\"(%[[cst_5]]) : (i1) -> !tfr.tensor\n CHECK-NEXT: %[[cst_10:.*]] = constant -1 : i64\n CHECK-NEXT: %[[OneHot:.*]] = tfr.call @tf__one_hot(%[[cst_6]], %[[cst_7]], %[[cst_8]], %[[cst_9]], %[[cst_10]])\n CHECK-SAME: (!tfr.tensor, !tfr.tensor, !tfr.tensor, !tfr.tensor, i64) -> (!tfr.tensor)\n CHECK-NEXT: constant true\n CHECK-NEXT: tfr.return\n CHECK-NEXT: }\n \"\"\"\n self._check_code(mlir_code, mlir_code_exp)\n\n def test_tfr_control_flow(self):\n mlir_code = tfr_gen(sys.modules[__name__], '_tfr_control_flow', [test_ops])\n mlir_code_exp = r\"\"\"\n CHECK-LABEL: tfr.func @tf__test_two_inputs_op(%x: !tfr.tensor, %y: !tfr.tensor,\n CHECK-SAME: %pred: i1{tfr.name=\"pred\",tfr.default=false}) -> (!tfr.tensor) {\n CHECK-NEXT: %[[if:.*]] = scf.if %pred -> (!tfr.tensor) {\n CHECK-NEXT: constant true\n CHECK-NEXT: scf.yield %x : !tfr.tensor\n CHECK-NEXT: } else {\n CHECK-NEXT: constant true\n CHECK-NEXT: scf.yield %y : !tfr.tensor\n CHECK-NEXT: }\n CHECK-NEXT: tfr.return %if_stmt : !tfr.tensor\n CHECK-NEXT: }\n\n CHECK-LABEL: tfr.func @tf__test_three_inputs_op(%x: !tfr.tensor, %y: !tfr.tensor, %z: !tfr.tensor,\n CHECK-SAME: %select: !tfr.attr{tfr.name=\"act\",tfr.default=\"z\"}) -> (!tfr.tensor) {\n CHECK-NEXT: %[[cst:.*]] = tfr.constant \"x\" -> !tfr.attr\n CHECK-NEXT: %[[eq:.*]] = tfr.equal %select, %[[cst]] -> i1\n CHECK-NEXT: %[[if_stmt:.*]] = scf.if %[[eq]] -> (!tfr.tensor) {\n CHECK-NEXT: %[[cst_1:.*]] = constant true\n CHECK-NEXT: scf.yield %x : !tfr.tensor\n CHECK-NEXT: } else {\n CHECK-NEXT: %[[cst_2:.*]] = tfr.constant \"y\" -> !tfr.attr\n CHECK-NEXT: %[[eq_1:.*]] = tfr.equal %select, %[[cst_2]] -> i1\n CHECK-NEXT: %[[if_stmt1:.*]] = scf.if %[[eq_1]] -> (!tfr.tensor) {\n CHECK-NEXT: %[[cst_3:.*]] = constant true\n CHECK-NEXT: scf.yield %y : !tfr.tensor\n CHECK-NEXT: } else {\n CHECK-NEXT: %[[cst_4:.*]] = constant true\n CHECK-NEXT: scf.yield %z : !tfr.tensor\n CHECK-NEXT: }\n CHECK-NEXT: scf.yield %[[if_stmt1]] : !tfr.tensor\n CHECK-NEXT: }\n CHECK-NEXT: tfr.return %[[if_stmt]] : !tfr.tensor\n CHECK-NEXT: }\n\n CHECK-LABEL: tfr.func @tf__test_input_n_op(%x: !tfr.tensor_list) -> (!tfr.tensor) {\n CHECK-NEXT: %[[n:.*]] = constant 10 : i64\n CHECK-NEXT: %[[cst:.*]] = constant 0 : index\n CHECK-NEXT: %[[elt:.*]] = tfr.get_element %x[%[[cst]]] : (!tfr.tensor_list, index) -> !tfr.tensor\n CHECK-NEXT: %[[cst_1:.*]] = constant 1 : i64\n CHECK-NEXT: %[[begin:.*]] = index_cast %[[cst_1]] : i64 to index\n CHECK-NEXT: %[[end:.*]] = index_cast %[[n]] : i64 to index\n CHECK-NEXT: %[[step:.*]] = constant 1 : index\n CHECK-NEXT: %[[for_stmt:.*]] = scf.for %[[itr_1:.*]] = %[[begin]] to %[[end]] step %[[step]]\n CHECK-SAME: iter_args(%[[it_arg:.*]] = %[[elt]]) -> (!tfr.tensor) {\n CHECK-NEXT: %[[elt_1:.*]] = tfr.get_element %x[%itr_1] : (!tfr.tensor_list, index) -> !tfr.tensor\n CHECK-NEXT: %[[Add:.*]] = tfr.call @tf__add(%[[it_arg]], %[[elt_1]]) : (!tfr.tensor, !tfr.tensor) -> (!tfr.tensor)\n CHECK-NEXT: scf.yield %[[Add]] : !tfr.tensor\n CHECK-NEXT: }\n CHECK-NEXT: %{{.*}} = constant true\n CHECK-NEXT: tfr.return %[[for_stmt]] : !tfr.tensor\n CHECK-NEXT: }\n\n CHECK-LABEL: tfr.func @tf__test_input_n_op(%ins: !tfr.tensor_list) -> (!tfr.tensor) {\n CHECK: %[[attr:.*]] = tfr.constant i64 -> !tfr.attr\n CHECK: %Const = tfr.call @tf__const(%{{.*}}, %[[attr]]) : (!tfr.attr, !tfr.attr) -> (!tfr.tensor)\n \"\"\"\n self._check_code(mlir_code, mlir_code_exp)\n\n def test_tfr_tf_ops(self):\n mlir_code = tfr_gen(sys.modules[__name__], '_tfr_tf_ops', [test_ops])\n mlir_code_exp = r\"\"\"\n CHECK-LABEL: tfr.func @tf__test_complex_tf_op(%lhs: !tfr.tensor, %rhs: !tfr.tensor) -> (!tfr.tensor_list) {\n CHECK-NEXT: %[[cst:.*]] = constant 1 : i64\n CHECK-NEXT: %[[zero:.*]] = constant 0 : i64\n CHECK-NEXT: %[[cst_1:.*]] = subi %[[zero]], %cst : i64\n CHECK-NEXT: %[[cst_2:.*]] = \"tfr.constant_tensor\"(%[[cst_1]]) : (i64) -> !tfr.tensor\n CHECK-NEXT: %[[list:.*]] = \"tfr.build_list\"(%rhs, %[[cst_2]]) : (!tfr.tensor, !tfr.tensor) -> !tfr.tensor_list\n CHECK-NEXT: %[[cst_3:.*]] = constant 0 : i64\n CHECK-NEXT: %[[cst_4:.*]] = constant 2 : i64\n CHECK-NEXT: %[[zero_1:.*]] = constant 0 : i64\n CHECK-NEXT: %[[pack:.*]] = tfr.call @tf__pack(%[[list]], %[[zero_1]]) : (!tfr.tensor_list, i64) -> !tfr.tensor\n CHECK-NEXT: %[[cst_5:.*]] = \"tfr.constant_tensor\"(%[[cst_3]]) : (i64) -> !tfr.tensor\n CHECK-NEXT: %[[SplitV:.*]] = tfr.call @tf__split_v(%lhs, %[[pack]], %[[cst_5]], %[[cst_4]])\n CHECK-NEXT: %[[idx:.*]] = constant 0 : index\n CHECK-NEXT: %[[elt:.*]] = tfr.get_element %SplitV[%idx] : (!tfr.tensor_list, index) -> !tfr.tensor\n CHECK-NEXT: %[[idx_1:.*]] = constant 1 : index\n CHECK-NEXT: %[[elt_1:.*]] = tfr.get_element %SplitV[%idx_1] : (!tfr.tensor_list, index) -> !tfr.tensor\n CHECK-NEXT: %[[list_1:.*]] = \"tfr.build_list\"(%rhs, %rhs) : (!tfr.tensor, !tfr.tensor) -> !tfr.tensor_list\n CHECK-NEXT: %[[cst_6:.*]] = constant 1 : i64\n CHECK-NEXT: %[[cst_7:.*]] = constant 2 : i64\n CHECK-NEXT: %[[zero_2:.*]] = constant 0 : i64\n CHECK-NEXT: %[[pack_1:.*]] = tfr.call @tf__pack(%[[list_1]], %[[zero_2]]) : (!tfr.tensor_list, i64) -> !tfr.tensor\n CHECK-NEXT: %[[cst_8:.*]] = \"tfr.constant_tensor\"(%[[cst_6]]) : (i64) -> !tfr.tensor\n CHECK-NEXT: %[[SplitV_1:.*]] = tfr.call @tf__split_v(%lhs, %[[pack_1]], %[[cst_8]], %[[cst_7]])\n CHECK-NEXT: %[[idx_2:.*]] = constant 0 : index\n CHECK-NEXT: %[[elt_2:.*]] = tfr.get_element %SplitV_1[%idx_2] : (!tfr.tensor_list, index) -> !tfr.tensor\n CHECK-NEXT: %[[idx_3:.*]] = constant 1 : index\n CHECK-NEXT: %[[elt_3:.*]] = tfr.get_element %SplitV_1[%idx_3] : (!tfr.tensor_list, index) -> !tfr.tensor\n CHECK-NEXT: %[[cst_9:.*]] = constant true\n CHECK-NEXT: %[[list_2:.*]] = \"tfr.build_list\"(%[[elt]], %[[elt_3]]) : (!tfr.tensor, !tfr.tensor) -> !tfr.tensor_list\n CHECK-NEXT: tfr.return %[[list_2]] : !tfr.tensor_list\n CHECK-NEXT: }\n\n CHECK-LABEL: tfr.func @tf__test_identity_op(%x: !tfr.tensor) -> (!tfr.tensor) {\n CHECK-NEXT: %cst = constant true\n CHECK-NEXT: %[[Id:.*]] = tfr.call @tf__identity(%x) : (!tfr.tensor) -> (!tfr.tensor)\n CHECK-NEXT: tfr.return %[[Id]] : !tfr.tensor\n CHECK-NEXT: }\n\n CHECK-LABEL: tfr.func @tf__test_two_inputs_op(%x: !tfr.tensor, %y: !tfr.tensor,\n CHECK-SAME: %pred: i1{tfr.name=\"pred\",tfr.default=false}) -> (!tfr.tensor) {\n CHECK-NEXT: %[[if_stmt:.*]] = scf.if %pred -> (!tfr.tensor) {\n CHECK-NEXT: %cst = constant true\n CHECK-NEXT: %[[Add:.*]] = tfr.call @tf__add(%x, %y) : (!tfr.tensor, !tfr.tensor) -> (!tfr.tensor)\n CHECK-NEXT: scf.yield %[[Add]] : !tfr.tensor\n CHECK-NEXT: } else {\n CHECK-NEXT: %cst_1 = constant true\n CHECK-NEXT: %[[cst_2:.*]] = constant 0 : i64\n CHECK-NEXT: %[[list:.*]] = \"tfr.build_list\"(%x, %y) : (!tfr.tensor, !tfr.tensor) -> !tfr.tensor_list\n CHECK-NEXT: %[[Concat:.*]] = tfr.call @tf__concat(%[[cst_2]], %[[list]]) : (i64, !tfr.tensor_list) -> (!tfr.tensor)\n CHECK-NEXT: scf.yield %[[Concat]] : !tfr.tensor\n CHECK-NEXT: }\n CHECK-NEXT: tfr.return %[[if_stmt]] : !tfr.tensor\n CHECK-NEXT: }\n\n CHECK-LABEL: tfr.func @tf__test_input_n_op(%ins: !tfr.tensor_list) -> (!tfr.tensor) {\n CHECK-NEXT: %cst = constant true\n CHECK-NEXT: %[[cst_1:.*]] = constant 0 : index\n CHECK-NEXT: %[[elt:.*]] = tfr.get_element %ins[%cst_1] : (!tfr.tensor_list, index) -> !tfr.tensor\n CHECK-NEXT: %[[cst_2:.*]] = constant 1 : index\n CHECK-NEXT: %[[elt_1:.*]] = tfr.get_element %ins[%cst_2] : (!tfr.tensor_list, index) -> !tfr.tensor\n CHECK-NEXT: %[[cst_3:.*]] = constant false\n CHECK-NEXT: %[[call:.*]] = tfr.call @tf__test_two_inputs_op(\n CHECK-SAME: %[[elt]], %[[elt_1]], %[[cst_3]]) : (!tfr.tensor, !tfr.tensor, i1) -> (!tfr.tensor)\n CHECK-NEXT: tfr.return %[[call]] : !tfr.tensor\n CHECK-NEXT: }\n\n CHECK-LABEL: tfr.func @tf__add_(!tfr.tensor<T>,!tfr.tensor<T>) -> (!tfr.tensor<T>) attributes {T}\n\n CHECK-LABEL: tfr.func @tf__concat_(!tfr.tensor<i32_>,!tfr.tensor_list<N,T>) -> (!tfr.tensor<T>) attributes {N,T,i32_}\n\n CHECK-LABEL: tfr.func @tf__identity_(!tfr.tensor<T>) -> (!tfr.tensor<T>) attributes {T}\n\n CHECK-LABEL: tfr.func @tf__pack_(!tfr.tensor_list<N,T>,i64{tfr.name=\"axis\",tfr.type=\"int\"}) -> (!tfr.tensor<T>) attributes {N,T,axis}\n\n CHECK-LABEL: tfr.func @tf__split_v_(!tfr.tensor<T>,!tfr.tensor<Tlen>,!tfr.tensor<i32_>,i64{tfr.name=\"num_split\",tfr.type=\"int\"}) -> (!tfr.tensor_list<num_split,T>) attributes {T,Tlen,i32_,num_split}\n\n CHECK-LABEL: tfr.func @tf__test_two_inputs_op_(!tfr.tensor<T>,!tfr.tensor<T>,i1{tfr.name=\"pred\",tfr.type=\"bool\"}) -> (!tfr.tensor<T>) attributes {T,pred}\n\n CHECK-LABEL: tfr.func @tf__test_complex_tf_op_(!tfr.tensor<T>,!tfr.tensor<Tlen>,i64{tfr.name=\"N\",tfr.type=\"int\"}) -> (!tfr.tensor_list<N,T>) attributes {N,T,Tlen}\n\n CHECK-LABEL: tfr.func @tf__test_identity_op_(!tfr.tensor<T>) -> (!tfr.tensor<T>) attributes {T}\n\n CHECK-LABEL: tfr.func @tf__test_two_inputs_op_(!tfr.tensor<T>,!tfr.tensor<T>,i1{tfr.name=\"pred\",tfr.type=\"bool\"}) -> (!tfr.tensor<T>) attributes {T,pred}\n\n CHECK-LABEL: tfr.func @tf__test_input_n_op_(!tfr.tensor_list<N,T>) -> (!tfr.tensor<T>) attributes {N,T}\n \"\"\"\n self._check_code(mlir_code, mlir_code_exp)\n\n def test_tfr_attrs(self):\n mlir_code = tfr_gen(sys.modules[__name__], '_tfr_attrs', [test_ops])\n mlir_code_exp = r\"\"\"\n CHECK-LABEL: tfr.func @tf__test_num_attrs_op(\n CHECK-SAME: %x: i64{tfr.name=\"x1\",tfr.default=-10},\n CHECK-SAME: %y: i64{tfr.name=\"y1\",tfr.default=1},\n CHECK-SAME: %x1: f32{tfr.name=\"x2\",tfr.default=0.0},\n CHECK-SAME: %y1: f32{tfr.name=\"y2\",tfr.default=-3.0}) -> () {\n CHECK-NEXT: %{{.*}} = \"tfr.build_list\"(%x, %y) : (i64, i64) -> !tfr.attr\n CHECK-NEXT: %{{.*}} = cmpi \"eq\", %x, %y : i64\n CHECK-NEXT: %{{.*}} = cmpi \"ult\", %x, %y : i64\n CHECK-NEXT: %{{.*}} = cmpi \"ule\", %x, %y : i64\n CHECK-NEXT: %{{.*}} = cmpi \"ugt\", %x, %y : i64\n CHECK-NEXT: %{{.*}} = cmpi \"uge\", %x, %y : i64\n CHECK-NEXT: %{{.*}} = cmpi \"ne\", %x, %y : i64\n CHECK-NEXT: %{{.*}} = addi %x, %y : i64\n CHECK-NEXT: %{{.*}} = subi %x, %y : i64\n CHECK-NEXT: %[[add_1:.*]] = addi %sub, %x : i64\n CHECK-NEXT: %[[cst:.*]] = constant 1 : i64\n CHECK-NEXT: %{{.*}} = addi %[[add_1]], %[[cst]] : i64\n CHECK-NEXT: %{{.*}} = cmpf \"ugt\", %x1, %y1 : f32\n CHECK-NEXT: %{{.*}} = addf %x1, %y1 : f32\n CHECK-NEXT: %{{.*}} = \"tfr.build_list\"(%x1, %y1) : (f32, f32) -> !tfr.attr\n CHECK-NEXT: %{{.*}} = constant true\n CHECK-NEXT: tfr.return\n CHECK-NEXT: }\n\n CHECK-LABEL: tfr.func @tf__test_non_num_attrs_op(\n CHECK-SAME: %x: !tfr.attr{tfr.name=\"z\"},\n CHECK-SAME: %y: !tfr.attr{tfr.name=\"x\",tfr.default=\"hello\"},\n CHECK-SAME: %z: !tfr.attr{tfr.name=\"y\",tfr.default=f32}) -> () {\n CHECK-NEXT: %{{.*}} = tfr.equal %x, %y -> i1\n CHECK-NEXT: %[[cst:.*]] = tfr.constant \"test\" -> !tfr.attr\n CHECK-NEXT: %{{.*}} = tfr.equal %x, %[[cst]] -> i1\n CHECK-NEXT: %{{.*}} = tfr.equal %y, %z -> i1\n CHECK-NEXT: %{{.*}} = constant true\n CHECK-NEXT: tfr.return\n CHECK-NEXT: }\n \"\"\"\n self._check_code(mlir_code, mlir_code_exp)\n\n def test_tf_tensor_shape(self):\n mlir_code = tfr_gen(sys.modules[__name__], '_tfr_shapes', [test_ops])\n mlir_code_exp = r\"\"\"\n CHECK-LABEL: tfr.func @tf__test_identity_op(%x: !tfr.tensor) -> (!tfr.tensor) {\n CHECK-NEXT: %[[shape:.*]] = tfr.get_shape %x -> !shape.shape\n\n CHECK-NEXT: %[[shape_1:.*]] = tfr.get_shape %x -> !shape.shape\n CHECK-NEXT: %[[len:.*]] = shape.rank %[[shape_1]] : !shape.shape -> !shape.size\n CHECK-NEXT: %[[index:.*]] = shape.size_to_index %[[len]] : !shape.size\n CHECK-NEXT: %[[begin:.*]] = constant 0 : index\n CHECK-NEXT: %[[step:.*]] = constant 1 : index\n CHECK-NEXT: scf.for %[[itr_1:.*]] = %[[begin]] to %[[index]] step %[[step]] {\n CHECK-NEXT: %[[size:.*]] = shape.get_extent %[[shape_1]], %[[itr_1]]: !shape.shape, index -> !shape.size\n CHECK-NEXT: %[[elt:.*]] = shape.size_to_index %[[size]] : !shape.size\n CHECK-NEXT: scf.yield\n CHECK-NEXT: }\n\n CHECK-NEXT: %[[cst:.*]] = constant 1 : i64\n CHECK-NEXT: %[[len_1:.*]] = shape.rank %shape_1 : !shape.shape -> !shape.size\n CHECK-NEXT: %[[len_size_1:.*]] = shape.size_to_index %[[len_1]] : !shape.size\n CHECK-NEXT: %[[cst_1:.*]] = constant 2 : i64\n CHECK-NEXT: %[[begin_1:.*]] = index_cast %[[cst]] : i64 to index\n CHECK-NEXT: %[[step_1:.*]] = index_cast %[[cst_1]] : i64 to index\n CHECK-NEXT: scf.for %[[itr_3:.*]] = %[[begin_1]] to %[[len_size_1]] step %[[step_1]]\n\n CHECK: %[[cst:.*]] = tfr.constant i32 -> !tfr.attr\n CHECK-NEXT: %[[Shape:.*]] = tfr.call @tf__shape(%x, %[[cst]]) : (!tfr.tensor, !tfr.attr) -> (!tfr.tensor)\n CHECK-NEXT: %{{.*}} = constant true\n CHECK-NEXT: tfr.return %x : !tfr.tensor\n CHECK-NEXT: }\n \"\"\"\n self._check_code(mlir_code, mlir_code_exp)\n\n def test_temp_function(self):\n mlir_code = tfr_gen(sys.modules[__name__], '_tfr_temp', [test_ops])\n mlir_code_exp = r\"\"\"\n CHECK-LABEL: tfr.func @tf__test_identity_n_op(%x: !tfr.tensor_list) -> (!tfr.tensor_list)\n\n CHECK-LABEL: tfr.func @tf__test_identity_op(%x: !tfr.tensor) -> (!tfr.tensor) {\n CHECK-NEXT: %[[list:.*]] = \"tfr.build_list\"(%x) : (!tfr.tensor) -> !tfr.tensor_list\n CHECK-NEXT: %[[call:.*]] = tfr.call @tf__test_identity_n_op(%[[list]]) : (!tfr.tensor_list)\n \"\"\"\n self._check_code(mlir_code, mlir_code_exp)\n\n def test_quant_builtins(self):\n mlir_code = tfr_gen(sys.modules[__name__], '_tfr_quant', [test_ops])\n mlir_code_exp = r\"\"\"\n CHECK-LABEL: tfr.func @tf__test_identity_op(%x: !tfr.tensor) -> (!tfr.tensor) {\n CHECK-NEXT: %[[raw_data:.*]] = tfr.quant_raw_data(%x) : (!tfr.tensor) -> (!tfr.tensor)\n CHECK-NEXT: %[[qparam:.*]]:2 = tfr.quant_qparam(%x) : (!tfr.tensor) -> (!tfr.tensor, !tfr.tensor)\n CHECK: %[[list:.*]] = \"tfr.build_list\"(%[[qparam]]#0, %[[qparam]]#0) : (!tfr.tensor, !tfr.tensor) -> !tfr.tensor_list\n CHECK: %[[factor:.*]] = tfr.quant_scale_factor(%{{.*}}, %[[list]]) : (f32, !tfr.tensor_list) -> (!tfr.tensor)\n CHECK: %[[list1:.*]] = \"tfr.build_list\"(%[[factor]]) : (!tfr.tensor) -> !tfr.tensor_list\n CHECK: %[[factor1:.*]] = tfr.quant_scale_factor(%{{.*}}, %[[list1]]) : (f32, !tfr.tensor_list) -> (!tfr.tensor)\n CHECK-NEXT: %[[Sub:.*]] = tfr.call @tf__sub(%[[raw_data]], %[[qparam]]#1) : (!tfr.tensor, !tfr.tensor) -> (!tfr.tensor)\n CHECK: %[[act_range:.*]]:2 = tfr.quant_act_range(%{{.*}}, %{{.*}}, %{{.*}}) : (!tfr.attr, f32, i64) -> (!tfr.tensor, !tfr.tensor)\n CHECK: %[[rescale:.*]] = tfr.quant_rescale(%[[Sub]], %[[factor1]], %{{.*}}) : (!tfr.tensor, !tfr.tensor, i64) -> (!tfr.tensor)\n CHECK: %[[attr:.*]] = tfr.constant i16 -> !tfr.attr\n CHECK: %[[Cast:.*]] = tfr.call @tf__cast(%[[rescale]], %[[attr]], %{{.*}}) : (!tfr.tensor, !tfr.attr, i1) -> (!tfr.tensor)\n CHECK: %[[attr_1:.*]] = tfr.constant i8 -> !tfr.attr\n CHECK: tfr.call @tf__cast(%[[Cast]], %[[attr_1]], %{{.*}}) : (!tfr.tensor, !tfr.attr, i1) -> (!tfr.tensor)\n CHECK: }\n \"\"\"\n self._check_code(mlir_code, mlir_code_exp)\n\n\nif __name__ == '__main__':\n test.main()\n" ]
[ [ "tensorflow.python.distribute.cluster_resolver.tpu_cluster_resolver.TPUClusterResolver", "tensorflow.python.tpu.device_assignment.DeviceAssignment", "tensorflow.python.distribute.multi_process_runner.MultiProcessPoolRunner", "tensorflow.python.eager.remote.connect_to_cluster", "tensorflow.python.distribute.multi_worker_test_base.create_cluster_spec", "tensorflow.python.distribute.sharded_variable.FixedShardsPartitioner", "tensorflow.python.distribute.combinations.combine", "tensorflow.python.tpu.tpu_strategy_util.initialize_tpu_system", "tensorflow.python.util.tf_export.tf_export", "tensorflow.python.distribute.multi_process_runner.get_barrier", "tensorflow.python.eager.context.eager_mode", "tensorflow.python.training.server_lib.ClusterSpec", "tensorflow.python.distribute.multi_worker_test_base.create_in_process_cluster", "tensorflow.python.distribute.tpu_strategy.TPUStrategyV1", "tensorflow.python.distribute.tpu_strategy.TPUStrategy", "tensorflow.python.distribute.combinations.NamedDistribution", "tensorflow.python.distribute.cluster_resolver.TFConfigClusterResolver", "tensorflow.python.tf2.enabled", "tensorflow.python.distribute.test_util.set_logical_devices_to_at_least" ], [ "tensorflow.python.data.kernel_tests.test_base.default_test_combinations", "tensorflow.python.data.experimental.ops.matching_files.MatchingFilesDataset", "tensorflow.python.platform.test.main", "tensorflow.python.util.compat.as_bytes" ], [ "tensorflow.python.platform.tf_logging.warning", "tensorflow.python.keras.layers.deserialize", "tensorflow.python.keras.utils.tf_utils.assert_no_legacy_layers", "tensorflow.python.keras.engine.training_utils.get_input_shape_and_dtype", "tensorflow.python.keras.saving.saved_model.model_serialization.SequentialSavedModelSaver", "tensorflow.python.util.tf_export.keras_export", "tensorflow.python.keras.utils.layer_utils.get_source_inputs", "tensorflow.python.keras.engine.input_layer.Input", "tensorflow.python.keras.engine.functional.ModuleWrapper", "tensorflow.python.framework.ops.init_scope", "tensorflow.python.framework.tensor_util.is_tf_type", "tensorflow.python.framework.ops.executing_eagerly_outside_functions", "tensorflow.python.tf2.enabled", "tensorflow.python.keras.engine.base_layer.keras_api_gauge.get_cell", "tensorflow.python.util.nest.map_structure", "tensorflow.python.keras.utils.generic_utils.serialize_keras_object", "tensorflow.python.util.nest.flatten", "tensorflow.python.keras.utils.tf_inspect.getfullargspec" ], [ "tensorflow.python.data.ops.dataset_ops.Dataset.from_tensors", "tensorflow.python.data.ops.dataset_ops.Dataset.from_tensor_slices", "tensorflow.python.data.kernel_tests.test_base.default_test_combinations", "numpy.arange", "tensorflow.python.platform.test.main", "tensorflow.python.data.ops.dataset_ops.get_legacy_output_shapes", "numpy.array", "tensorflow.python.framework.combinations.combine" ], [ "tensorflow.python.ops.gen_array_ops.OneHot", "tensorflow.compiler.mlir.tfr.python.tfr_gen.tfr_gen_from_module", "tensorflow.python.ops.gen_array_ops.Split", "tensorflow.python.ops.gen_array_ops.SplitV", "tensorflow.compiler.mlir.tfr.python.composite.Composite", "tensorflow.python.ops.gen_math_ops.Sub", "tensorflow.python.ops.gen_math_ops.Cast", "tensorflow.python.ops.gen_array_ops.Const", "tensorflow.python.ops.gen_array_ops.Concat", "tensorflow.python.platform.test.main", "tensorflow.python.ops.gen_math_ops.AddN", "tensorflow.python.ops.gen_array_ops.Identity", "tensorflow.compiler.mlir.tfr.resources.gen_test_ops.TestTwoInputsOp", "tensorflow.python.ops.gen_array_ops.Shape", "tensorflow.python.ops.gen_math_ops.Add" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.7", "2.6", "2.4", "2.9", "2.5", "2.8", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.8", "2.7", "2.6", "2.4", "2.3", "2.9", "2.5", "2.2", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.7", "2.6", "2.4", "2.9", "2.5", "2.8", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
mridullpandey/sunpy
[ "65bf70731a8147899b8c0fca8b3b1a386e47c010", "65bf70731a8147899b8c0fca8b3b1a386e47c010", "65bf70731a8147899b8c0fca8b3b1a386e47c010", "65bf70731a8147899b8c0fca8b3b1a386e47c010" ]
[ "sunpy/physics/tests/test_differential_rotation.py", "sunpy/instr/tests/test_lyra.py", "sunpy/instr/tests/test_rhessi.py", "examples/time_series/timeseries_convolution_filter.py" ]
[ "import os\nimport pytest\n\nimport numpy as np\nimport astropy.units as u\nfrom astropy.coordinates import SkyCoord\nfrom astropy.coordinates import Longitude\nfrom astropy.tests.helper import assert_quantity_allclose\nfrom astropy.time import TimeDelta\n\nfrom sunpy.coordinates import frames\nfrom sunpy.coordinates.ephemeris import get_earth\nfrom sunpy.map.maputils import map_edges\nfrom sunpy.physics.differential_rotation import (diff_rot, solar_rotate_coordinate,\n differential_rotate,\n _get_new_observer, _rotate_submap_edge,\n _get_extreme_position, _get_bounding_coordinates,\n _warp_sun_coordinates)\nimport sunpy.data.test\nimport sunpy.map\n\n# pylint: disable=C0103,R0904,W0201,W0212,W0232,E1103\n\n# Please note the numbers in these tests are not checked for physical\n# accuracy, only that they are the values the function was outputting upon\n# implementation. This is not a significant issue for the diff_rot function\n# since it is relatively simple and the values it produces can be easily\n# compared to other implementations of the same simple function. The same\n# cannot be said for the solar_rotate_coordinate function. This functionality\n# relies accurate knowledge of the solar ephemeris in particular.\n# There is no reference implementation of the solar_rotate_coordinate function\n# of demonstrated trustworthiness at time of writing in any language. There\n# are no known independent values or tests that can be used to test the\n# veracity of the solar_rotate_coordinate function. This being the case, the\n# solar_rotate_coordinate function is tested against values that it generated.\n# Therefore these tests test for consistency, not accuracy. Note that when the\n# 0.8.0 branch was released, the solar ephemeris calculation was handed off to\n# the relevant Astropy code. The solar_rotate_coordinate tests were changed\n# for self-consistency. Note that the change in position comparing the results\n# of pre- and 0.8.0 sunpy solar coordinate rotation functionality (rot_hpc\n# and solar_rotate_coordinate respectively) was on the order of 0.5 arcseconds.\n# At time of writing, the difference between the rotation\n# calculated using the pre-0.8.0 rot_hpc function and the SSWIDL equivalent\n# rot_xy.pro for the tests given in pre-0.8.0 were on the order of hundredths\n# of an arcsecond. I suspect that the reason for the small differences is\n# because the sunpy's ephemeris and coordinate transformation infrastructure\n# was largely based on that in SSWIDL.\n\n\ntestpath = sunpy.data.test.rootdir\n\[email protected]\ndef aia171_test_map():\n return sunpy.map.Map(os.path.join(testpath, 'aia_171_level1.fits'))\n\n\[email protected]\ndef all_off_disk_map(aia171_test_map):\n return aia171_test_map.submap((1, 1)*u.pix, (11, 12)*u.pix)\n\n\[email protected]\ndef all_on_disk_map(aia171_test_map):\n return aia171_test_map.submap((30, 60)*u.pix, (50, 85)*u.pix)\n\n\[email protected]\ndef straddles_limb_map(aia171_test_map):\n return aia171_test_map.submap((64, 80)*u.pix, (120, 127)*u.pix)\n\n\[email protected]\ndef aia171_test_map_with_mask(aia171_test_map):\n shape = aia171_test_map.data.shape\n mask = np.zeros_like(aia171_test_map.data, dtype=bool)\n mask[0:shape[0]//2, 0:shape[1]//2] = True\n return sunpy.map.Map(np.ma.array(aia171_test_map.data, mask=mask), aia171_test_map.meta)\n\n\[email protected]\ndef aia171_test_submap(aia171_test_map):\n bl = SkyCoord(-512 * u.arcsec, 100 * u.arcsec, frame=aia171_test_map.coordinate_frame)\n ur = SkyCoord(-100 * u.arcsec, 400 * u.arcsec, frame=aia171_test_map.coordinate_frame)\n return aia171_test_map.submap(bl, ur)\n\n\[email protected]\ndef seconds_per_day():\n return 24 * 60 * 60.0 * u.s\n\n\ndef test_single(seconds_per_day):\n rot = diff_rot(10 * seconds_per_day, 30 * u.deg)\n assert_quantity_allclose(rot, 136.8216 * u.deg, rtol=1e-3)\n\n\ndef test_array(seconds_per_day):\n rot = diff_rot(10 * seconds_per_day, np.linspace(-70, 70, 2) * u.deg)\n assert_quantity_allclose(rot, Longitude(np.array([110.2725, 110.2725]) * u.deg), rtol=1e-3)\n\n\ndef test_synodic(seconds_per_day):\n rot = diff_rot(10 * seconds_per_day, 30 * u.deg, rot_type='howard', frame_time='synodic')\n assert_quantity_allclose(rot, 126.9656 * u.deg, rtol=1e-3)\n\n\ndef test_sidereal(seconds_per_day):\n rot = diff_rot(10 * seconds_per_day, 30 * u.deg, rot_type='howard', frame_time='sidereal')\n assert_quantity_allclose(rot, 136.8216 * u.deg, rtol=1e-3)\n\n\ndef test_howard(seconds_per_day):\n rot = diff_rot(10 * seconds_per_day, 30 * u.deg, rot_type='howard')\n assert_quantity_allclose(rot, 136.8216 * u.deg, rtol=1e-3)\n\n\ndef test_allen(seconds_per_day):\n rot = diff_rot(10 * seconds_per_day, 30 * u.deg, rot_type='allen')\n assert_quantity_allclose(rot, 136.9 * u.deg, rtol=1e-3)\n\n\ndef test_snodgrass(seconds_per_day):\n rot = diff_rot(10 * seconds_per_day, 30 * u.deg, rot_type='snodgrass')\n assert_quantity_allclose(rot, 135.4232 * u.deg, rtol=1e-3)\n\n\ndef test_fail(seconds_per_day):\n with pytest.raises(ValueError):\n rot = diff_rot(10 * seconds_per_day, 30 * u.deg, rot_type='garbage')\n\n\ndef test_solar_rotate_coordinate():\n # Testing along the Sun-Earth line, observer is on the Earth\n obs_time = '2010-09-10 12:34:56'\n observer = get_earth(obs_time)\n c = SkyCoord(-570*u.arcsec, 120*u.arcsec, obstime=obs_time, observer=observer, frame=frames.Helioprojective)\n new_time = '2010-09-11 12:34:56'\n new_observer = get_earth(new_time)\n\n # Test that when both the observer and the time are specified, an error is raised.\n with pytest.raises(ValueError):\n d = solar_rotate_coordinate(c, observer=observer, time=new_time)\n\n # Test that the code properly filters the observer keyword\n with pytest.raises(ValueError):\n d = solar_rotate_coordinate(c, observer='earth')\n\n # Test that the code properly filters the time keyword\n with pytest.raises(ValueError):\n with pytest.warns(UserWarning, match=\"Using 'time' assumes an Earth-based observer\"):\n d = solar_rotate_coordinate(c, time='noon')\n\n # Test that the code gives the same output for multiple different inputs\n # that define the same observer location and time.\n for i, definition in enumerate((1 * u.day, TimeDelta(1*u.day), new_time, new_observer)):\n if i in (0, 1, 2):\n with pytest.warns(UserWarning, match=\"Using 'time' assumes an Earth-based observer\"):\n d = solar_rotate_coordinate(c, time=definition)\n else:\n d = solar_rotate_coordinate(c, observer=definition)\n\n # Test that a SkyCoordinate is created\n assert isinstance(d, SkyCoord)\n\n # Test the coordinate\n np.testing.assert_almost_equal(d.Tx.to(u.arcsec).value, -371.8885208634674, decimal=1)\n np.testing.assert_almost_equal(d.Ty.to(u.arcsec).value, 105.35006656251727, decimal=1)\n np.testing.assert_allclose(d.distance.to(u.km).value, 1.499642e+08, rtol=1e-5)\n\n # Test that the SkyCoordinate is Helioprojective\n assert isinstance(d.frame, frames.Helioprojective)\n\n\ndef test_differential_rotate(aia171_test_map, all_off_disk_map, all_on_disk_map, straddles_limb_map):\n\n # Test a map that is entirely off the disk of the Sun\n # Should report an error\n with pytest.raises(ValueError):\n dmap = differential_rotate(all_off_disk_map)\n\n # Test a full disk map\n new_observer = get_earth(aia171_test_map.date + 6*u.hr)\n dmap = differential_rotate(aia171_test_map, observer=new_observer)\n assert dmap.data.shape == aia171_test_map.data.shape\n\n # Test a map that is entirely on disk - triggers sub full disk branches\n # Rotated map should have a smaller extent in the x - direction\n new_observer = get_earth(all_on_disk_map.date - 48*u.hr)\n dmap = differential_rotate(all_on_disk_map, observer=new_observer)\n assert dmap.data.shape[1] < all_on_disk_map.data.shape[1]\n\n # This rotated map should have a larger extent in the x direction\n new_observer = get_earth(all_on_disk_map.date + 48*u.hr)\n dmap = differential_rotate(all_on_disk_map, observer=new_observer)\n assert dmap.data.shape[1] > all_on_disk_map.data.shape[1]\n\n # Test a map that straddles the limb - triggers sub full disk branches\n # Rotated map should have a smaller extent in the x - direction\n new_observer = get_earth(straddles_limb_map.date + 48*u.hr)\n dmap = differential_rotate(straddles_limb_map, observer=new_observer)\n assert dmap.data.shape[1] < straddles_limb_map.data.shape[1]\n\n # The output map should have the positional properties of the observer\n assert dmap.date == new_observer.obstime\n assert dmap.heliographic_latitude == new_observer.lat\n assert dmap.heliographic_longitude == new_observer.lon\n\n\n# Tests of the helper functions\ndef test_get_new_observer(aia171_test_map):\n initial_obstime = aia171_test_map.date\n rotation_interval = 2 * u.day\n new_time = initial_obstime + rotation_interval\n time_delta = new_time - initial_obstime\n observer = get_earth(initial_obstime + rotation_interval)\n\n # The observer time is set along with other definitions of time\n for time in (rotation_interval, new_time, time_delta):\n with pytest.raises(ValueError):\n new_observer = _get_new_observer(initial_obstime, observer, time)\n\n # Obstime property is present but the value is None\n observer_obstime_is_none = SkyCoord(12*u.deg, 46*u.deg, frame=frames.HeliographicStonyhurst)\n with pytest.raises(ValueError):\n new_observer = _get_new_observer(None, observer_obstime_is_none, None)\n\n # When the observer is set, it gets passed back out\n new_observer = _get_new_observer(initial_obstime, observer, None)\n assert isinstance(new_observer, SkyCoord)\n np.testing.assert_almost_equal(new_observer.transform_to(frames.HeliographicStonyhurst).lon.to(u.deg).value,\n observer.transform_to(frames.HeliographicStonyhurst).lon.to(u.deg).value, decimal=3)\n np.testing.assert_almost_equal(new_observer.transform_to(frames.HeliographicStonyhurst).lat.to(u.deg).value,\n observer.transform_to(frames.HeliographicStonyhurst).lat.to(u.deg).value, decimal=3)\n np.testing.assert_almost_equal(new_observer.transform_to(frames.HeliographicStonyhurst).radius.to(u.au).value,\n observer.transform_to(frames.HeliographicStonyhurst).radius.to(u.au).value, decimal=3)\n\n # When the time is set, a coordinate for Earth comes back out\n for time in (rotation_interval, new_time, time_delta):\n with pytest.warns(UserWarning, match=\"Using 'time' assumes an Earth-based observer\"):\n new_observer = _get_new_observer(initial_obstime, None, time)\n assert isinstance(new_observer, SkyCoord)\n\n np.testing.assert_almost_equal(new_observer.transform_to(frames.HeliographicStonyhurst).lon.to(u.deg).value,\n observer.transform_to(frames.HeliographicStonyhurst).lon.to(u.deg).value, decimal=3)\n np.testing.assert_almost_equal(new_observer.transform_to(frames.HeliographicStonyhurst).lat.to(u.deg).value,\n observer.transform_to(frames.HeliographicStonyhurst).lat.to(u.deg).value, decimal=3)\n np.testing.assert_almost_equal(new_observer.transform_to(frames.HeliographicStonyhurst).radius.to(u.au).value,\n observer.transform_to(frames.HeliographicStonyhurst).radius.to(u.au).value, decimal=3)\n\n # The observer and the time cannot both be None\n with pytest.raises(ValueError):\n new_observer = _get_new_observer(initial_obstime, None, None)\n\n\ndef test_rotate_submap_edge(aia171_test_map, all_off_disk_map, all_on_disk_map, straddles_limb_map):\n\n observer = get_earth(aia171_test_map.date + 2*u.day)\n\n # For a map that has all the edges off disk, the function should\n # return just the edges of the map - no solar rotation applied.\n for this_map in (aia171_test_map, all_off_disk_map):\n edges = map_edges(this_map)\n for this_edge in range(0, 4):\n pixels = edges[this_edge]\n res = _rotate_submap_edge(this_map, pixels, observer)\n assert all(res.Tx == (this_map.pixel_to_world(pixels[:, 0], pixels[:, 1])).Tx)\n assert all(res.Ty == (this_map.pixel_to_world(pixels[:, 0], pixels[:, 1])).Ty)\n\n # For an on disk map, all the edges should change\n edges = map_edges(all_on_disk_map)\n for this_edge in range(0, 4):\n pixels = edges[this_edge]\n res = _rotate_submap_edge(all_on_disk_map, pixels, observer)\n assert all(res.Tx != (all_on_disk_map.pixel_to_world(pixels[:, 0], pixels[:, 1])).Tx)\n assert all(res.Ty != (all_on_disk_map.pixel_to_world(pixels[:, 0], pixels[:, 1])).Ty)\n\n # For the limb map, two of the edges move and two do not\n edges = map_edges(straddles_limb_map)\n for this_edge in (0, 3): # Top and right edges do not move\n pixels = edges[this_edge]\n res = _rotate_submap_edge(straddles_limb_map, pixels, observer)\n assert all(res.Tx == (straddles_limb_map.pixel_to_world(pixels[:, 0], pixels[:, 1])).Tx)\n assert all(res.Ty == (straddles_limb_map.pixel_to_world(pixels[:, 0], pixels[:, 1])).Ty)\n\n for this_edge in (1, 2): # Bottom and left edges do move\n pixels = edges[this_edge]\n res = _rotate_submap_edge(straddles_limb_map, pixels, observer)\n assert all(res.Tx != (straddles_limb_map.pixel_to_world(pixels[:, 0], pixels[:, 1])).Tx)\n assert all(res.Ty != (straddles_limb_map.pixel_to_world(pixels[:, 0], pixels[:, 1])).Ty)\n\n\ndef test_get_extreme_position():\n coords = SkyCoord([-1, 0, 1, np.nan]*u.arcsec, [-2, 0, 2, -np.nan]*u.arcsec, frame=frames.Helioprojective)\n\n with pytest.warns(RuntimeWarning, match='All-NaN axis encountered'):\n assert _get_extreme_position(coords, 'Tx', operator=np.nanmin) == -1\n assert _get_extreme_position(coords, 'Ty', operator=np.nanmin) == -2\n\n assert _get_extreme_position(coords, 'Tx', operator=np.nanmax) == 1\n assert _get_extreme_position(coords, 'Ty', operator=np.nanmax) == 2\n\n with pytest.raises(ValueError):\n _get_extreme_position(coords, 'lon', operator=np.nanmax)\n\n\ndef test_get_bounding_coordinates():\n coords = SkyCoord([-1, 0, 1] * u.arcsec, [-2, 0, 2] * u.arcsec, frame=frames.Helioprojective,\n observer=get_earth(\"1999-09-13 00:00:00\"))\n bl, tr = _get_bounding_coordinates(coords)\n\n assert bl.Tx == -1*u.arcsec\n assert bl.Ty == -2*u.arcsec\n assert bl.observer == coords[0].observer\n\n assert tr.Tx == 1*u.arcsec\n assert tr.Ty == 2*u.arcsec\n assert tr.observer == coords[0].observer\n\n\ndef test_warp_sun_coordinates(all_on_disk_map):\n # Define an observer\n new_observer = get_earth(all_on_disk_map.date + 6*u.hr)\n\n dummy_array = np.zeros((500, 2))\n\n # Call the warp\n xy2 = _warp_sun_coordinates(dummy_array, all_on_disk_map, new_observer)\n\n # Test the properties of the output\n assert xy2.shape == dummy_array.shape\n assert isinstance(xy2, np.ndarray)\n\n # Test the values - values are not independently found\n # We are passing in 500 pairs of (0,0) so all the output pixels should be the same\n np.testing.assert_almost_equal(xy2[:, 0], -2.08384686, decimal=2)\n np.testing.assert_almost_equal(xy2[:, 1], -0.23927568, decimal=2)\n\n\[email protected]_compare\ndef test_differential_rotation(aia171_test_map):\n with pytest.warns(UserWarning, match=\"Using 'time' assumes an Earth-based observer\"):\n rot_map = differential_rotate(aia171_test_map, time=2*u.day)\n return rot_map.data\n", "import os.path\nimport datetime\nimport tempfile\n\nimport numpy as np\nimport pandas\nimport pytest\n\nimport astropy.units as u\nfrom astropy.time import TimeDelta\n\nfrom sunpy import timeseries\nfrom sunpy.data.test import rootdir\nfrom sunpy.instr import lyra\nfrom sunpy.time import is_time_equal, parse_time\n\n# Define location for test LYTAF database files\nTEST_DATA_PATH = rootdir\n\n# Define some test data for test_remove_lytaf_events()\nTIME = parse_time(np.array([datetime.datetime(2013, 2, 1) + datetime.timedelta(minutes=i)\n for i in range(120)]))\nCHANNELS = [np.zeros(len(TIME)) + 0.4, np.zeros(len(TIME)) + 0.1]\nEMPTY_LYTAF = np.empty((0,), dtype=[(\"insertion_time\", object),\n (\"begin_time\", object),\n (\"reference_time\", object),\n (\"end_time\", object),\n (\"event_type\", object),\n (\"event_definition\", object)])\nLYTAF_TEST = np.append(\n EMPTY_LYTAF,\n np.array([(parse_time(datetime.datetime.utcfromtimestamp(1371459961)),\n parse_time(datetime.datetime.utcfromtimestamp(1359677220)),\n parse_time(datetime.datetime.utcfromtimestamp(1359677250)),\n parse_time(datetime.datetime.utcfromtimestamp(1359677400)),\n \"LAR\", \"Large Angle Rotation.\")],\n dtype=EMPTY_LYTAF.dtype))\nLYTAF_TEST = np.append(\n LYTAF_TEST,\n np.array([(parse_time(datetime.datetime.utcfromtimestamp(1371460063)),\n parse_time(datetime.datetime.utcfromtimestamp(1359681764)),\n parse_time(datetime.datetime.utcfromtimestamp(1359682450)),\n parse_time(datetime.datetime.utcfromtimestamp(1359683136)),\n \"UV occ.\", \"Occultation in the UV spectrum.\")],\n dtype=LYTAF_TEST.dtype))\n\n\[email protected]_data\ndef test_split_series_using_lytaf():\n \"\"\"\n test the downloading of the LYTAF file and subsequent queries.\n \"\"\"\n # test split_series_using_lytaf\n # construct a dummy signal for testing purposes\n basetime = parse_time('2010-06-13 02:00')\n seconds = 3600\n dummy_time = [basetime + TimeDelta(s*u.second) for s in range(seconds)]\n dummy_data = np.random.random(seconds)\n\n lytaf_tmp = lyra.get_lytaf_events('2010-06-13 02:00', '2010-06-13 06:00',\n combine_files=[\"ppt\"])\n split = lyra.split_series_using_lytaf(dummy_time, dummy_data, lytaf_tmp)\n assert type(split) == list\n assert len(split) == 4\n assert is_time_equal(split[0]['subtimes'][0], parse_time((2010, 6, 13, 2, 0)))\n assert is_time_equal(split[0]['subtimes'][-1], parse_time((2010, 6, 13, 2, 7, 2)))\n assert is_time_equal(split[3]['subtimes'][0], parse_time((2010, 6, 13, 2, 59, 42)))\n assert is_time_equal(split[3]['subtimes'][-1], parse_time((2010, 6, 13, 2, 59, 58)))\n\n # Test case when no LYTAF events found in time series.\n split_no_lytaf = lyra.split_series_using_lytaf(dummy_time,\n dummy_data, LYTAF_TEST)\n assert type(split_no_lytaf) == list\n assert type(split_no_lytaf[0]) == dict\n assert not set(split_no_lytaf[0].keys()).symmetric_difference({'subtimes', 'subdata'})\n assert split_no_lytaf[0][\"subtimes\"] == dummy_time\n assert split_no_lytaf[0][\"subdata\"].all() == dummy_data.all()\n\n\[email protected]\ndef lyra_ts():\n # Create sample TimeSeries\n lyrats = timeseries.TimeSeries(\n os.path.join(rootdir, 'lyra_20150101-000000_lev3_std_truncated.fits.gz'),\n source='LYRA')\n lyrats.data = pandas.DataFrame(index=TIME,\n data={\"CHANNEL1\": CHANNELS[0],\n \"CHANNEL2\": CHANNELS[1],\n \"CHANNEL3\": CHANNELS[0],\n \"CHANNEL4\": CHANNELS[1]})\n return lyrats\n\n\[email protected]_data\ndef test_remove_lytaf_events_from_timeseries(lyra_ts):\n \"\"\"\n Test if artifact are correctly removed from a TimeSeries.\n \"\"\"\n # Check correct errors are raised due to bad input\n with pytest.raises(AttributeError):\n ts_test = lyra.remove_lytaf_events_from_timeseries(\n [], force_use_local_lytaf=True)\n\n # Run remove_artifacts_from_timeseries, returning artifact\n # status\n ts_test, artifact_status_test = \\\n lyra.remove_lytaf_events_from_timeseries(\n lyra_ts, artifacts=[\"LAR\", \"Offpoint\"], return_artifacts=True,\n force_use_local_lytaf=True)\n # Generate expected data by calling _remove_lytaf_events and\n # constructing expected dataframe manually.\n time, channels, artifact_status_expected = lyra._remove_lytaf_events(\n lyra_ts.data.index, channels=[np.asanyarray(lyra_ts.data[\"CHANNEL1\"]),\n np.asanyarray(lyra_ts.data[\"CHANNEL2\"]),\n np.asanyarray(lyra_ts.data[\"CHANNEL3\"]),\n np.asanyarray(lyra_ts.data[\"CHANNEL4\"])],\n artifacts=[\"LAR\", \"Offpoint\"], return_artifacts=True,\n force_use_local_lytaf=True)\n dataframe_expected = pandas.DataFrame(index=time,\n data={\"CHANNEL1\": channels[0],\n \"CHANNEL2\": channels[1],\n \"CHANNEL3\": channels[2],\n \"CHANNEL4\": channels[3]})\n # Assert expected result is returned\n pandas.util.testing.assert_frame_equal(ts_test.data, dataframe_expected)\n assert artifact_status_test.keys() == artifact_status_expected.keys()\n np.testing.assert_array_equal(artifact_status_test[\"lytaf\"],\n artifact_status_expected[\"lytaf\"])\n np.testing.assert_array_equal(artifact_status_test[\"removed\"],\n artifact_status_expected[\"removed\"])\n np.testing.assert_array_equal(artifact_status_test[\"not_removed\"],\n artifact_status_expected[\"not_removed\"])\n assert artifact_status_test[\"not_found\"] == \\\n artifact_status_expected[\"not_found\"]\n\n # Run remove_artifacts_from_timeseries, without returning\n # artifact status\n ts_test = \\\n lyra.remove_lytaf_events_from_timeseries(\n lyra_ts, artifacts=[\"LAR\", \"Offpoint\"],\n force_use_local_lytaf=True)\n # Assert expected result is returned\n pandas.util.testing.assert_frame_equal(ts_test.data, dataframe_expected)\n\n\[email protected]()\ndef local_cache(sunpy_cache):\n sunpy_cache = sunpy_cache('sunpy.instr.lyra.cache')\n sunpy_cache.add('http://proba2.oma.be/lyra/data/lytaf/annotation_lyra.db',\n os.path.join(TEST_DATA_PATH, 'annotation_lyra.db'))\n sunpy_cache.add('http://proba2.oma.be/lyra/data/lytaf/annotation_manual.db',\n os.path.join(TEST_DATA_PATH, 'annotation_manual.db'))\n sunpy_cache.add('http://proba2.oma.be/lyra/data/lytaf/annotation_ppt.db',\n os.path.join(TEST_DATA_PATH, 'annotation_ppt.db'))\n sunpy_cache.add('http://proba2.oma.be/lyra/data/lytaf/annotation_science.db',\n os.path.join(TEST_DATA_PATH, 'annotation_science.db'))\n\n\ndef test_remove_lytaf_events_1(local_cache):\n \"\"\"\n Test _remove_lytaf_events() with some artifacts found and others not.\n \"\"\"\n # Run _remove_lytaf_events\n time_test, channels_test, artifacts_status_test = \\\n lyra._remove_lytaf_events(\n TIME, channels=CHANNELS, artifacts=[\"LAR\", \"Offpoint\"],\n return_artifacts=True, force_use_local_lytaf=True)\n # Generated expected result\n bad_indices = np.logical_and(TIME >= LYTAF_TEST[\"begin_time\"][0],\n TIME <= LYTAF_TEST[\"end_time\"][0])\n bad_indices = np.arange(len(TIME))[bad_indices]\n time_expected = np.delete(TIME, bad_indices)\n channels_expected = [np.delete(CHANNELS[0], bad_indices),\n np.delete(CHANNELS[1], bad_indices)]\n artifacts_status_expected = {\"lytaf\": LYTAF_TEST, \"removed\": LYTAF_TEST[0],\n \"not_removed\": LYTAF_TEST[1],\n \"not_found\": [\"Offpoint\"]}\n # Assert test values are same as expected\n assert time_test.all() == time_expected.all()\n assert (channels_test[0]).all() == (channels_expected[0]).all()\n assert (channels_test[1]).all() == (channels_expected[1]).all()\n assert artifacts_status_test.keys() == artifacts_status_expected.keys()\n np.testing.assert_array_equal(artifacts_status_test[\"lytaf\"],\n artifacts_status_expected[\"lytaf\"])\n np.testing.assert_array_equal(artifacts_status_test[\"removed\"],\n artifacts_status_expected[\"removed\"])\n np.testing.assert_array_equal(artifacts_status_test[\"not_removed\"],\n artifacts_status_expected[\"not_removed\"])\n assert artifacts_status_test[\"not_found\"] == \\\n artifacts_status_expected[\"not_found\"]\n\n # Test that correct values are returned when channels kwarg not\n # supplied.\n # Run _remove_lytaf_events\n time_test, artifacts_status_test = \\\n lyra._remove_lytaf_events(\n TIME, artifacts=[\"LAR\", \"Offpoint\"],\n return_artifacts=True, force_use_local_lytaf=True)\n # Assert test values are same as expected\n assert time_test.all() == time_expected.all()\n assert artifacts_status_test.keys() == artifacts_status_expected.keys()\n np.testing.assert_array_equal(artifacts_status_test[\"lytaf\"],\n artifacts_status_expected[\"lytaf\"])\n np.testing.assert_array_equal(artifacts_status_test[\"removed\"],\n artifacts_status_expected[\"removed\"])\n np.testing.assert_array_equal(artifacts_status_test[\"not_removed\"],\n artifacts_status_expected[\"not_removed\"])\n assert artifacts_status_test[\"not_found\"] == \\\n artifacts_status_expected[\"not_found\"]\n\n\ndef test_remove_lytaf_events_2(local_cache):\n \"\"\"\n Test _remove_lytaf_events() with no user artifacts found.\n \"\"\"\n # Run _remove_lytaf_events\n with pytest.warns(UserWarning, match='None of user supplied artifacts were found.'):\n time_test, channels_test, artifacts_status_test = \\\n lyra._remove_lytaf_events(\n TIME, channels=CHANNELS, artifacts=\"Offpoint\",\n return_artifacts=True, force_use_local_lytaf=True)\n # Generated expected result\n time_expected = TIME\n channels_expected = CHANNELS\n artifacts_status_expected = {\"lytaf\": LYTAF_TEST, \"removed\": EMPTY_LYTAF,\n \"not_removed\": LYTAF_TEST,\n \"not_found\": [\"Offpoint\"]}\n # Assert test values are same as expected\n assert np.all(time_test == time_expected)\n assert (channels_test[0]).all() == (channels_expected[0]).all()\n assert (channels_test[1]).all() == (channels_expected[1]).all()\n assert artifacts_status_test.keys() == artifacts_status_expected.keys()\n np.testing.assert_array_equal(artifacts_status_test[\"lytaf\"],\n artifacts_status_expected[\"lytaf\"])\n np.testing.assert_array_equal(artifacts_status_test[\"removed\"],\n artifacts_status_expected[\"removed\"])\n np.testing.assert_array_equal(artifacts_status_test[\"not_removed\"],\n artifacts_status_expected[\"not_removed\"])\n assert artifacts_status_test[\"not_found\"] == \\\n artifacts_status_expected[\"not_found\"]\n\n # Test correct values are returned when return_artifacts kwarg not\n # supplied.\n # Case 1: channels kwarg is True\n # Run _remove_lytaf_events\n with pytest.warns(UserWarning, match='None of user supplied artifacts were found.'):\n time_test, channels_test = lyra._remove_lytaf_events(\n TIME, channels=CHANNELS, artifacts=[\"Offpoint\"], force_use_local_lytaf=True)\n assert np.all(time_test == time_expected)\n assert (channels_test[0]).all() == (channels_expected[0]).all()\n assert (channels_test[1]).all() == (channels_expected[1]).all()\n # Case 2: channels kwarg is False\n # Run _remove_lytaf_events\n with pytest.warns(UserWarning, match='None of user supplied artifacts were found.'):\n time_test = lyra._remove_lytaf_events(\n TIME, artifacts=[\"Offpoint\"], force_use_local_lytaf=True)\n assert np.all(time_test == time_expected)\n\n\ndef test_remove_lytaf_events_3(local_cache):\n \"\"\"\n Test if correct errors are raised by _remove_lytaf_events().\n \"\"\"\n with pytest.raises(TypeError):\n lyra._remove_lytaf_events(TIME, channels=6, artifacts=[\"LAR\"],\n force_use_local_lytaf=True)\n with pytest.raises(ValueError):\n lyra._remove_lytaf_events(TIME,\n force_use_local_lytaf=True)\n with pytest.raises(TypeError):\n lyra._remove_lytaf_events(TIME, artifacts=[6],\n force_use_local_lytaf=True)\n with pytest.raises(ValueError):\n lyra._remove_lytaf_events(TIME,\n artifacts=[\"LAR\", \"incorrect artifact type\"],\n force_use_local_lytaf=True)\n\n\ndef test_get_lytaf_events(local_cache):\n \"\"\"\n Test if LYTAF events are correctly downloaded and read in.\n \"\"\"\n # Run get_lytaf_events\n lytaf_test = lyra.get_lytaf_events(\"2008-01-01\", \"2014-01-01\",\n force_use_local_lytaf=True)\n # Form expected result of extract_combined_lytaf\n insertion_time = [datetime.datetime.utcfromtimestamp(1371459961),\n datetime.datetime.utcfromtimestamp(1371460063),\n datetime.datetime.utcfromtimestamp(1371460411),\n datetime.datetime.utcfromtimestamp(1371460493),\n datetime.datetime.utcfromtimestamp(1371460403),\n datetime.datetime.utcfromtimestamp(1371470988),\n datetime.datetime.utcfromtimestamp(1371211791),\n datetime.datetime.utcfromtimestamp(1371212303)]\n begin_time = [datetime.datetime.utcfromtimestamp(1359677220),\n datetime.datetime.utcfromtimestamp(1359681764),\n datetime.datetime.utcfromtimestamp(1360748513),\n datetime.datetime.utcfromtimestamp(1361115900),\n datetime.datetime.utcfromtimestamp(1361980964),\n datetime.datetime.utcfromtimestamp(1368581100),\n datetime.datetime.utcfromtimestamp(1371032084),\n datetime.datetime.utcfromtimestamp(1371158167)]\n reference_time = [datetime.datetime.utcfromtimestamp(1359677250),\n datetime.datetime.utcfromtimestamp(1359682450),\n datetime.datetime.utcfromtimestamp(1360751528),\n datetime.datetime.utcfromtimestamp(1361116200),\n datetime.datetime.utcfromtimestamp(1361983979),\n datetime.datetime.utcfromtimestamp(1368582480),\n datetime.datetime.utcfromtimestamp(1371045475),\n datetime.datetime.utcfromtimestamp(1371162600)]\n end_time = [datetime.datetime.utcfromtimestamp(1359677400),\n datetime.datetime.utcfromtimestamp(1359683136),\n datetime.datetime.utcfromtimestamp(1360754543),\n datetime.datetime.utcfromtimestamp(1361116320),\n datetime.datetime.utcfromtimestamp(1361986994),\n datetime.datetime.utcfromtimestamp(1368583080),\n datetime.datetime.utcfromtimestamp(1371050025),\n datetime.datetime.utcfromtimestamp(1371167100)]\n event_type = [\"LAR\", \"UV occ.\", \"Vis LED on\", \"M Flare\", \"UV LED on\",\n \"X Flare\", \"Off-limb event\", \"Unexplained feature\"]\n event_description = [\"Large Angle Rotation.\",\n \"Occultation in the UV spectrum.\",\n \"Visual LED is turned on.\",\n \"M class solar flare.\",\n \"UV LED is turned on.\",\n \"X class solar flare.\",\n \"Off-limb event in SWAP.\",\n \"Unexplained feature.\"]\n lytaf_expected = np.empty((8,), dtype=[(\"insertion_time\", object),\n (\"begin_time\", object),\n (\"reference_time\", object),\n (\"end_time\", object),\n (\"event_type\", object),\n (\"event_definition\", object)])\n lytaf_expected[\"insertion_time\"] = insertion_time\n lytaf_expected[\"begin_time\"] = begin_time\n lytaf_expected[\"reference_time\"] = reference_time\n lytaf_expected[\"end_time\"] = end_time\n lytaf_expected[\"event_type\"] = event_type\n lytaf_expected[\"event_definition\"] = event_description\n # Assert that extract_combined_lytaf gives the right result\n np.testing.assert_array_equal(lytaf_test, lytaf_expected)\n\n # Check correct error is raised if names of different lytaf files\n # are incorrectly input.\n with pytest.raises(ValueError):\n lytaf_test = lyra.get_lytaf_events(\"2008-01-01\", \"2014-01-01\",\n combine_files=[\"gigo\"],\n force_use_local_lytaf=True)\n\n\ndef test_get_lytaf_event_types(local_cache):\n \"\"\"\n Test that LYTAF event types are printed.\n \"\"\"\n lyra.get_lytaf_event_types()\n\n\ndef test_lytaf_event2string():\n \"\"\"\n Test _lytaf_event2string() associates correct numbers and events.\n \"\"\"\n out_test = lyra._lytaf_event2string(list(range(12)))\n assert out_test == ['LAR', 'N/A', 'UV occult.', 'Vis. occult.', 'Offpoint',\n 'SAA', 'Auroral zone', 'Moon in LYRA', 'Moon in SWAP',\n 'Venus in LYRA', 'Venus in SWAP']\n out_test_single = lyra._lytaf_event2string(1)\n assert out_test_single == ['LAR']\n\n\ndef test_prep_columns():\n \"\"\"\n Test whether _prep_columns correctly prepares data.\n \"\"\"\n # Generate simple input data\n time_input = TIME[0:2]\n time_input.precision = 9\n channels_input = [CHANNELS[0][0:2], CHANNELS[1][0:2]]\n filecolumns_input = [\"time\", \"channel0\", \"channel1\"]\n\n # Test case when channels and filecolumns are supplied by user.\n string_time_test, filecolumns_test = lyra._prep_columns(\n time_input, channels_input, filecolumns_input)\n # Generate expected output and verify _prep_columns() works\n string_time_expected = np.array(time_input.isot)\n filecolumns_expected = [\"time\", \"channel0\", \"channel1\"]\n np.testing.assert_array_equal(string_time_test, string_time_expected)\n assert filecolumns_test == filecolumns_expected\n\n # Test case when channels supplied by user by not filecolumns\n string_time_test, filecolumns_test = lyra._prep_columns(time_input,\n channels_input)\n np.testing.assert_array_equal(string_time_test, string_time_expected)\n assert filecolumns_test == filecolumns_expected\n\n # Test case when neither channels nor filecolumns supplied by user\n string_time_test, filecolumns_test = lyra._prep_columns(time_input)\n np.testing.assert_array_equal(string_time_test, string_time_expected)\n assert filecolumns_test == [\"time\"]\n\n # Test correct exceptions are raised\n with pytest.raises(TypeError):\n string_time_test, filecolumns_test = lyra._prep_columns(\n time_input, channels_input, [\"channel0\", 1])\n with pytest.raises(ValueError):\n string_time_test = lyra._prep_columns(time_input,\n filecolumns=filecolumns_input)\n", "import platform\nimport textwrap\nfrom unittest import mock\nfrom distutils.version import LooseVersion\n\nimport numpy as np\nimport pytest\n\nimport sunpy.instr.rhessi as rhessi\nimport sunpy.io\nimport sunpy.map\nfrom sunpy.data.test import get_test_filepath\nfrom sunpy.time import is_time_equal, parse_time\n\n\[email protected]\ndef cross_month_timerange():\n \"\"\"\n Time range which crosses a month boundary.\n\n Dbase files are monthly therefore this is to make sure that two\n dbase files are returned.\n \"\"\"\n return sunpy.time.TimeRange((\"2016/01/25\", \"2016/02/05\"))\n\n\ndef test_backprojection():\n \"\"\"\n Test that backprojection returns a map with the expected time.\n \"\"\"\n test_filename = 'hsi_calib_ev_20020220_1106_20020220_1106_25_40.fits'\n amap = rhessi.backprojection(get_test_filepath(test_filename))\n assert isinstance(amap, sunpy.map.GenericMap)\n assert is_time_equal(amap.date, parse_time((2002, 2, 20, 11, 6, 21)))\n\n\ndef test_parse_obssum_dbase_file():\n fname = get_test_filepath(\"hsi_obssumm_filedb_201104.txt\")\n obssum = rhessi.parse_observing_summary_dbase_file(fname)\n assert obssum['filename'][0] == 'hsi_obssumm_20110401_043.fit'\n assert obssum['filename'][-1] == 'hsi_obssumm_20110430_029.fit'\n\n assert obssum['orb_st'][0] == 0\n assert obssum['orb_st'][-1] == 0\n\n assert obssum['orb_end'][0] == 0\n assert obssum['orb_end'][-1] == 0\n\n assert obssum['start_time'][0] == parse_time((2011, 4, 1, 0, 0, 0))\n assert obssum['start_time'][-1] == parse_time((2011, 4, 30, 0, 0, 0))\n\n assert obssum['end_time'][0] == parse_time((2011, 4, 2, 0, 0, 0))\n assert obssum['end_time'][-1] == parse_time((2011, 5, 1, 0, 0, 0))\n\n assert obssum['status_flag'][0] == 0\n assert obssum['status_flag'][-1] == 0\n\n assert obssum['npackets'][0] == 0\n assert obssum['npackets'][-1] == 0\n\n\ndef test_parse_observing_summary_dbase_file():\n \"\"\"\n Test that we get the observing summary database file with the content we\n expect.\n \"\"\"\n obssum = rhessi.parse_observing_summary_dbase_file(get_test_filepath(\"hsi_obssumm_filedb_201104.txt\"))\n\n assert obssum['filename'][0][0:20] == 'hsi_obssumm_20110401'\n assert obssum['filename'][1][0:20] == 'hsi_obssumm_20110402'\n\n assert obssum['orb_st'][0] == 0\n assert obssum['orb_st'][-1] == 0\n\n assert obssum['orb_end'][0] == 0\n assert obssum['orb_end'][-1] == 0\n\n assert obssum['start_time'][0] == parse_time((2011, 4, 1, 0, 0, 0))\n assert obssum['start_time'][-1] == parse_time((2011, 4, 30, 0, 0, 0))\n\n assert obssum['end_time'][0] == parse_time((2011, 4, 2, 0, 0, 0))\n assert obssum['end_time'][-1] == parse_time((2011, 5, 1, 0, 0, 0))\n\n assert obssum['status_flag'][0] == 0\n assert obssum['status_flag'][-1] == 0\n\n assert obssum['npackets'][0] == 0\n assert obssum['npackets'][-1] == 0\n\n\ndef test_get_parse_obssum_hdulist():\n hdulist = sunpy.io.read_file(get_test_filepath('hsi_obssumm_20110404_042.fits.gz'))\n header, _data = rhessi.parse_observing_summary_hdulist(hdulist)\n assert header.get('DATE_OBS') == '2011-04-04T00:00:00.000'\n assert header.get('DATE_END') == '2011-04-05T00:00:00.000'\n assert header.get('TELESCOP') == 'HESSI'\n\n\ndef test_uncompress_countrate():\n \"\"\"\n Test that function fails if given uncompressed counts out of range.\n \"\"\"\n # Should only accept bytearr (uncompressed counts must be 0 - 255)\n with pytest.raises(ValueError):\n rhessi.uncompress_countrate(np.array([-1, 300]))\n\n counts = rhessi.uncompress_countrate(np.array([0, 128, 255]))\n\n # Valid min, max\n assert counts[0] == 0\n assert counts[2] == 1015792\n\n # Random test value\n assert counts[1] == 4080\n\n\n# Test `rhessi.parse_obssumm_dbase_file(...)`\n\n\ndef hessi_data():\n return textwrap.dedent(\"\"\"\\\n HESSI Filedb File:\n Created: 1972-04-14T12:41:26.000\n Number of Files: 2\n Filename Orb_st Orb_end Start_time End_time Status_flag Npackets Drift_start Drift_end Data source\n hsi_obssumm_19721101_139.fit 7 8 01-Nov-72 00:00:00 02-Nov-72 00:00:00 3 2 0.000 0.000\n hsi_obssumm_19721102_144.fit 9 10 02-Nov-72 00:00:00 03-Nov-72 00:00:00 4 1 0.000 0.000\n \"\"\")\n\n\ndef test_parse_observing_summary_dbase_file_mock():\n \"\"\"\n Ensure that all required data are extracted from the RHESSI observing\n summary database file mocked in ``hessi_data()``.\n \"\"\"\n # We need to mock this test differently for <= 3.7.0 and below.\n if LooseVersion(platform.python_version()) <= LooseVersion(\"3.7.0\"):\n mock_file = mock.mock_open()\n mock_file.return_value.__iter__.return_value = hessi_data().splitlines()\n else:\n mock_file = mock.mock_open(read_data=hessi_data())\n\n dbase_data = {}\n with mock.patch('sunpy.instr.rhessi.open', mock_file, create=True):\n dbase_data = rhessi.parse_observing_summary_dbase_file(None)\n\n assert len(dbase_data.keys()) == 7\n\n # verify each of the 7 fields\n assert dbase_data['filename'] == ['hsi_obssumm_19721101_139.fit',\n 'hsi_obssumm_19721102_144.fit']\n assert dbase_data['orb_st'] == [7, 9]\n assert dbase_data['orb_end'] == [8, 10]\n assert dbase_data['start_time'] == [parse_time((1972, 11, 1, 0, 0)),\n parse_time((1972, 11, 2, 0, 0))]\n assert dbase_data['end_time'] == [parse_time((1972, 11, 2, 0, 0)),\n parse_time((1972, 11, 3, 0, 0))]\n assert dbase_data['status_flag'] == [3, 4]\n assert dbase_data['npackets'] == [2, 1]\n\n\n# Test `rhessi._build_energy_bands(...)`\n\[email protected]\ndef raw_bands():\n \"\"\"\n The RHESSI summary data standard energy bands.\n \"\"\"\n return ['3 - 6', '6 - 12', '12 - 25', '25 - 50', '50 - 100', '100 - 300',\n '300 - 800', '800 - 7000', '7000 - 20000']\n\n\ndef test_build_energy_bands_no_match(raw_bands):\n \"\"\"\n If an energy unit cannot be found in the ``label`` then raise a\n `ValueError`\n \"\"\"\n with pytest.raises(ValueError):\n rhessi._build_energy_bands(label='Energy bands GHz', bands=raw_bands)\n\n\ndef test_build_energy_bands(raw_bands):\n \"\"\"\n Success case.\n \"\"\"\n built_ranges = rhessi._build_energy_bands(label='Energy bands (keV)',\n bands=raw_bands)\n\n assert built_ranges == ['3 - 6 keV', '6 - 12 keV', '12 - 25 keV',\n '25 - 50 keV', '50 - 100 keV', '100 - 300 keV',\n '300 - 800 keV', '800 - 7000 keV',\n '7000 - 20000 keV']\n", "\"\"\"\n======================================================\nSmoothing of timeSeries data using convolution filters\n======================================================\n\nHow to smooth a TimeSeries using a convolution filter\nkernel from `~astropy.convolution` and `~astropy.convolution.convolve`\nfunction.\n\"\"\"\nimport matplotlib.pyplot as plt\nfrom astropy.convolution import convolve, Box1DKernel\n\nfrom sunpy.timeseries import TimeSeries\nfrom sunpy.data.sample import NOAAINDICES_TIMESERIES as noaa_ind\n\n###############################################################################\n# Let's first create a TimeSeries from sample data\nts_noaa_ind = TimeSeries(noaa_ind, source='NOAAIndices')\n\n###############################################################################\n# Now we will extract data values from the TimeSeries and apply a BoxCar filter\n# to get smooth data. Boxcar smoothing is equivalent to taking our signal and\n# using it to make a new signal where each element is the average of w adjacent\n# elements. Here we will use AstroPy’s convolve function with a “boxcar” kernel\n# of width w = 10.\nts_noaa_ind = ts_noaa_ind.add_column(\n 'sunspot SWO Smoothed',\n convolve(ts_noaa_ind.quantity('sunspot SWO'), kernel=Box1DKernel(10))\n)\n\n###############################################################################\n# Plotting original and smoothed timeseries\nplt.ylabel('Sunspot Number')\nplt.xlabel('Time')\nplt.title('Smoothing of Time Series')\nplt.plot(ts_noaa_ind.quantity('sunspot SWO'), label='original data')\nplt.plot(ts_noaa_ind.quantity('sunspot SWO Smoothed'), label='smoothed')\nplt.legend()\nplt.show()\n" ]
[ [ "numpy.linspace", "numpy.testing.assert_almost_equal", "numpy.zeros_like", "numpy.ma.array", "numpy.array", "numpy.zeros" ], [ "numpy.random.random", "pandas.DataFrame", "pandas.util.testing.assert_frame_equal", "numpy.testing.assert_array_equal", "numpy.delete", "numpy.all", "numpy.asanyarray", "numpy.array", "numpy.logical_and", "numpy.empty" ], [ "numpy.array" ], [ "matplotlib.pyplot.legend", "matplotlib.pyplot.title", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
pawelzell/cbtool
[ "8557cca276ccc2d975c66dfd8f0d0e5e7c368d68" ]
[ "myscripts/analyze_interference.py" ]
[ "import datetime\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom sklearn import linear_model\nfrom sklearn.metrics import mean_squared_error\nfrom exp_data import *\nfrom exp_data_utils import RegressionRecord\nfrom read_data.utils import dfTypePair, toSingleRowDF\nimport global_config\n\n\ndef computeInterferenceRegression(exp_series, t1, t2, inverse_throughput_y, fit_intercept, task_limit=None):\n subtract_xs = subtract_ys = 0.\n if not fit_intercept:\n subtract_xs = subtract_ys = 1.\n xs, ys, ys_error = getTrainingData(exp_series, t1, t2, \"tasks\",\n inverse_throughput_y=inverse_throughput_y,\n tasks_limit=task_limit,\n subtract_xs=subtract_xs, subtract_ys=subtract_ys)\n reg = linear_model.LinearRegression(fit_intercept=fit_intercept)\n reg.fit(xs, ys)\n error = mean_squared_error(ys, reg.predict(xs))\n coefs = np.array([reg.intercept_, reg.coef_[0]])\n exp_series.type_pair_to_regression[(t1, t2)] = RegressionRecord(t1, t2, xs, ys, ys_error, reg, coefs, error)\n return coefs\n\n\ndef computeInterferenceRegressionGrid(exp_series, inverse_throughput_y, fit_intercept, task_pair_to_task_limit={}):\n tasks = exp_series.tasks\n n = len(tasks)\n results = np.zeros((n, n))\n df = exp_series.df\n for i, t1 in enumerate(tasks):\n for j, t2 in enumerate(tasks):\n task_limit = task_pair_to_task_limit.get((t1, t2), None)\n if df.loc[dfTypePair(df, t1, t2)].empty:\n results[i, j] = 0\n else:\n coefs = computeInterferenceRegression(exp_series, t1, t2, inverse_throughput_y, fit_intercept,\n task_limit)\n results[i, j] = coefs[1]\n exp_series.interference_matrix = results\n return results\n\n\ndef printInterferenceGridMultipleSeries(exp_series_list, skip_tasks=(), savefig=False):\n tasks = exp_series_list[0].tasks\n n = len(tasks)\n\n def formatLegend(ax, t1, t2, metric, i, j):\n if not i:\n ax.set_title(f\"influence of {t2}\")\n if i == n - 1:\n ax.set_xlabel('number of tasks')\n if not j:\n ax.set_ylabel(f\"{t1} - {metric}\")\n\n fig, axes = plt.subplots(n, n, figsize=(15., 15.))\n for i, t1 in enumerate(tasks):\n for j, t2 in enumerate(tasks):\n ax = axes[i, j]\n metric = exp_series_list[0].getPerfMetricsForTypeShort(t1)\n formatLegend(ax, t1, t2, metric, i, j)\n for k, exp_series in enumerate(exp_series_list):\n if (t1, t2) in skip_tasks:\n raise KeyError(\"Skip task\")\n try:\n regression = exp_series.type_pair_to_regression[(t1, t2)]\n except KeyError:\n print(f\"WARNING: No experiment regression record for {t1} {t2}\")\n continue\n regression.plot(ax)\n if savefig:\n file_name = f\"interference_grid_{exp_series_list[0].name}\"\n file_name = os.path.join(global_config.PLOTS_DIR, file_name)\n plt.savefig(file_name)\n print(f\"Figure saved to {file_name}\")\n else:\n plt.show()\n\n\ndef printInterferenceGrid(exp_series_list, skip_tasks=(), savefig=False):\n printInterferenceGridMultipleSeries([exp_series_list], skip_tasks, savefig)\n\n\ndef analyzeInterferenceGridMultipleSeries(exp_series_list, skip_tasks, inverse_throughput_y, fit_intercept,\n task_pair_to_task_limit, savefig):\n for exp_series in exp_series_list:\n computeInterferenceRegressionGrid(exp_series, inverse_throughput_y, fit_intercept, task_pair_to_task_limit)\n printInterferenceGridMultipleSeries(exp_series_list, skip_tasks, savefig)\n\n\ndef analyzeInterferenceGrid(exp_series, skip_tasks=(), inverse_throughput_y=True, fit_intercept=False,\n tasks_limit=None, savefig=False):\n types = exp_series.tasks\n task_pair_to_task_limit = {(t1, t2): tasks_limit for t1 in types for t2 in types}\n return analyzeInterferenceGridMultipleSeries([exp_series], skip_tasks, inverse_throughput_y, fit_intercept,\n task_pair_to_task_limit, savefig)\n\n\ndef computeExpectedCost(loads, coeffs):\n loads = np.array(loads)\n n = loads.size\n result = []\n for i in range(n):\n cost = 0.\n if loads[i] > 0:\n cost = 1.\n loads[i] -= 1.\n for j in range(n):\n cost += loads[j] * coeffs[i][j]\n loads[i] += 1.\n result.append(cost)\n return result\n\n\n# DF cols: tasks, ai_no, type, expected_cost\ndef computeExpectedCostDf(type_list, ai_types, interference_matrix):\n loads = np.zeros(len(ai_types))\n type_to_id = {t: i for i, t in enumerate(ai_types)}\n df = pd.DataFrame()\n d = dict()\n for n_tasks, t in enumerate(type_list, start=1):\n d[\"tasks\"] = n_tasks\n t_id = type_to_id[t]\n loads[t_id] += 1.\n cost_vector = computeExpectedCost(loads, interference_matrix)\n for ai_no in range(1, n_tasks+1):\n d[\"ai_no\"] = ai_no\n d[\"type\"] = type_list[ai_no-1]\n t_id2 = type_to_id[d[\"type\"]]\n d[\"expected_cost\"] = cost_vector[t_id2]\n df = df.append(toSingleRowDF(d), ignore_index=True)\n return df\n\n\ndef plotInterferenceActualVsExpected(exp_series, exp, interference_matrix):\n types_list = exp.trace.types\n cost_df = computeExpectedCostDf(types_list, exp_series.tasks, interference_matrix)\n t = types_list[0]\n select_rows = cost_df[\"ai_no\"] == 1\n expected_df = cost_df.loc[select_rows, [\"tasks\", \"expected_cost\"]]\n\n df = exp_series.df\n select_rows = (df[\"t1\"] == exp.t1) & (df[\"t2\"] == exp.t2) & (df[\"ai_no\"] == 1)\n actual_cost_row = ai_info.getPerfColName(t)\n actual_df = df.loc[select_rows, [\"tasks\", actual_cost_row]]\n\n fig, ax = plt.subplots()\n ax.set_title(\"Performance cost\")\n ax.set_xlabel(\"Number of tasks running\")\n ax.scatter(expected_df[\"tasks\"].values, expected_df[\"expected_cost\"].values, label=\"predicted\")\n ax.scatter(actual_df[\"tasks\"].values, actual_df[actual_cost_row].values, label=\"observed\")\n plt.legend()\n plt.show()\n" ]
[ [ "matplotlib.pyplot.legend", "matplotlib.pyplot.subplots", "pandas.DataFrame", "matplotlib.pyplot.savefig", "sklearn.linear_model.LinearRegression", "numpy.array", "numpy.zeros", "matplotlib.pyplot.show" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
adaj/geohunter
[ "fbbb7492f79fa838c2080c90d5e8ac2066d29568" ]
[ "geohunter/osm.py" ]
[ "\"\"\"geohunter.osm\n\nThis module wraps requests to OpenStreetMap's Overpass API with an interface for\nthe GeoPandas data structures. The OpenStreetMap has a data model based on nodes,\nways and relations. The geometric data structures available in geopandas are points,\nlines and polygons (but also multipoints, multilines and multipolygons).\n\nFor a complete list of data categories available (\"map features\"), please\nlook the OpenStreetMap.\n\"\"\"\n\nfrom time import time, sleep\nfrom pandas import DataFrame, json_normalize\nfrom geopandas import GeoDataFrame, sjoin\nfrom shapely.ops import polygonize, linemerge\nfrom shapely.geometry import Point, Polygon, LineString\nfrom shapely.geometry import MultiPolygon, MultiPoint\nfrom requests import Session\nfrom requests.adapters import HTTPAdapter\nfrom requests.packages.urllib3.util.retry import Retry\n\nimport geohunter.util\n\nMAP_FEATURES_KEYS = ['aerialway', 'aeroway', 'amenity', 'barrier', 'boundary',\n 'admin_level', 'building', 'craft', 'emergency',\n 'geological', 'highway', 'sidewalk', 'cycleway', 'busway',\n 'bicycle_road', 'service', 'historic', 'landuse',\n 'leisure', 'man_made', 'military', 'natural', 'office',\n 'place', 'power', 'public_transport', 'railway', 'route',\n 'shop', 'sport', 'telecom', 'tourism', 'waterway',\n 'water', 'name', 'healthcare']\n\n\ndef timelog(func):\n def wrapper(data_func, *args, **kwargs):\n t0 = time()\n result = func(data_func, *args, **kwargs)\n tf = time()\n print(f\"Geohunter: [TIMELOG] {func.__name__} -- {kwargs} -- Completed in {round(tf - t0, 4)}s\")\n return result\n return wrapper\n\n\nclass Eagle:\n \"\"\"\n `Eagle` is the facade for requesting data given the map\n keys available with the `request_overpass()` method. This class also\n implements a `get()` method which return the data required in a single\n pandas DataFrame that has a geometric attribute that makes it a geopandas\n GeoDataFrame (please consult geopandas documentation for more details).\n \"\"\"\n\n def __init__(self):\n self.session = requests_retry_session()\n\n def __enter__(self):\n return self\n\n @timelog\n def get(self, bbox, as_points=False,\n largest_geom=False, sjoin_op='intersects',\n **map_features):\n \"\"\"Returns points-of-interest data from OpenStreetMap.\n as geopandas.GeoDataFrame with a set of points-of-interest\n requested. For a list of complete map features keys\n and elements available on the API, please consult documentation\n https://wiki.openstreetmap.org/wiki/Map_Features.\n\n Parameters\n ----------\n bbox : str or geopandas.GeoDataFrame\n If str, follow the structure (south_lat,west_lon,north_lat,east_lon),\n but if you prefer to pass a geopandas.GeoDataFrame, the bbox will be\n defined as the maximum and minimum values delimited by the geometry.\n\n **map_features : **kwargs\n requested described in map features.\n Example: amenity=['hospital', 'police'].\n\n Returns\n -------\n geopandas.GeoDataFrame\n GeoDataFrame with all points-of-interest requested.\n\n Example\n -------\n >>> df = Eagle().get(bbox='(-5.91,-35.29,-5.70,-35.15)',\n amenity=['hospital' , 'police'], natural='*')\n \"\"\"\n for map_feature in map_features:\n if map_feature not in MAP_FEATURES_KEYS:\n raise Exception(f\"{map_feature} is not a valid map feature. Please \" \\\n + \"consult https://wiki.openstreetmap.org/wiki/Map_Features.\")\n poi_data = DataFrame()\n for mf_key in map_features:\n if isinstance(map_features[mf_key], list):\n pass\n elif isinstance(map_features[mf_key], str):\n map_features[mf_key] = [map_features[mf_key]]\n elif isinstance(map_features[mf_key], int):\n map_features[mf_key] = [str(map_features[mf_key])]\n else:\n raise Exception(f'Map feature {mf_key}={map_features[mf_key]}. ' \\\n + 'Please consult https://wiki.openstreetmap.org/wiki/Map_Features.')\n if mf_key == 'admin_level' and sjoin_op == 'intersects':\n if not isinstance(bbox, GeoDataFrame):\n raise ValueError(\"To get admin_level geometries, it's \" \\\n + 'required to have bbox as a GeoDataframe.')\n else:\n # forcing 'within' to get admin_level inside a geometry,\n # intersection could get undesired neighbor regions\n sjoin_op = 'within'\n for mf_item in map_features[mf_key]:\n print(f'Requesting {mf_key}={mf_item}')\n result = self.request_overpass(bbox,\n map_feature_key=mf_key,\n map_feature_item=mf_item)\n print('Done. Wait for 15s to start the next request.')\n sleep(15)\n result_gdf = overpass_result_to_geodf(result, as_points)\n result_gdf['key'] = mf_key\n poi_data = poi_data.append(result_gdf)\n poi_data['item'] = poi_data.apply(lambda x: x['tags'][x['key']], axis=1)\n poi_data = poi_data.reset_index(drop=True)\n poi_data['name'] = json_normalize(poi_data['tags'])['name']\n poi_data = GeoDataFrame(poi_data)\n if isinstance(bbox, GeoDataFrame):\n poi_ix = sjoin(poi_data, bbox, op=sjoin_op).index.unique()\n poi_data = poi_data.loc[poi_ix]\n if largest_geom:\n return poi_data.iloc[[poi_data['geometry'].area.argmax()]]\n return poi_data\n\n def request_overpass(self, bbox, map_feature_key, map_feature_item):\n \"\"\"\n Return the json resulted from *a single* request on Overpass API.\n\n It generates the Overpass QL query from the map features\n defined, including nodes, ways and relations, and request\n data from the API. Please consult OpenStreetMap documentation\n (https://wiki.openstreetmap.org/wiki/Map_Features) for a full\n list of map features available.\n\n Parameters\n ----------\n bbox : str or geopandas.GeoDataFrame\n If str, follow the structure (south_lat,west_lon,north_lat,east_lon),\n but if you prefer to pass a geopandas.GeoDataFrame, the bbox will be\n defined as the maximum and minimum values delimited by the geometry.\n\n map_feature_key: str\n Map key item from OpenStreetMap, such as \"amenity\", \"highway\" etc.\n\n map_feature_item: str\n\n Returns\n -------\n dict\n Data requested in output format of Overpass API.\n \"\"\"\n bbox = geohunter.util.parse_bbox(bbox)\n query_string = ''\n for i in ['node', 'way', 'relation']:\n if map_feature_item == '*':\n query_string += f'{i}[\"{map_feature_key}\"]{bbox};'\n else:\n query_string += f'{i}[\"{map_feature_key}\"=\"{map_feature_item}\"]{bbox};'\n query_string = f'[out:json];({query_string});out+geom;'\n result = self.session.get(\n f'http://overpass-api.de/api/interpreter?data={query_string}')\n if result.status_code != 200:\n if result.status_code == 429:\n raise Exception('Too many requests. Please wait a couple minutes to retry.')\n raise Exception(f\"HTTP {result.status_code}, error.\")\n result = result.json()\n if len(result['elements']) == 0:\n print(query_string)\n raise Exception('Request made with no data returned , ' \\\n + 'please check try with other parameters.')\n return result\n\n def debug__find_geom_not_being_successfully_parsed(self, bbox, key, item):\n failed = self.request_overpass(bbox,\n map_feature_key=key,\n map_feature_item=item)\n elements_df = DataFrame(failed['elements'])\n for i in elements_df.iterrows():\n try:\n parse_geometry(i[1])\n except:\n print(f'{key}={item} id#{i[0]}')\n return i[1]\n\n def close(self):\n self.session.close()\n\n def __exit__(self, exc_type, exc_value, traceback):\n self.close()\n\n\ndef requests_retry_session(retries=3, backoff_factor=0.5, session=None,\n status_forcelist=(500, 503, 502, 504)):\n session = session or Session()\n retry = Retry(total=retries, read=retries, connect=retries,\n backoff_factor=backoff_factor, status_forcelist=status_forcelist)\n adapter = HTTPAdapter(max_retries=retry)\n session.mount('http://', adapter)\n session.mount('https://', adapter)\n return session\n\n\ndef overpass_result_to_geodf(result, as_points=False):\n \"\"\"\n Transforms the result from Overpass request to GeoDataFrame.\n \"\"\"\n elements_df = DataFrame(result['elements'])\n elements_df['geometry'] = elements_df.apply(parse_geometry, axis=1)\n elements_df = GeoDataFrame(elements_df, crs={'init': 'epsg:4326'})\n if as_points:\n elements_df['geometry'] = elements_df['geometry'].centroid\n return elements_df[['type', 'id', 'tags', 'geometry']]\n\n\ndef parse_geometry(x_elements_df):\n \"\"\"\n Transforms coordinates into shapely objects.\n \"\"\"\n if x_elements_df['type'] == 'node':\n geom = Point([x_elements_df['lon'], x_elements_df['lat']])\n elif x_elements_df['type'] == 'way':\n line = [(i['lon'], i['lat']) for i in x_elements_df['geometry']]\n if line[0] == line[-1]:\n geom = Polygon(line)\n else:\n geom = LineString(line)\n else: # relation\n geom = parse_relation(x_elements_df['members'])\n return geom\n\n\ndef parse_relation(x_members):\n \"\"\"\n Transforms coordinates of 'relation' objects into shapely objects.\n \"\"\"\n if not isinstance(x_members, list):\n return x_members\n shell, holes, lines, points = [], [], [], []\n # Iterating through all geometries inside an element of the\n # Overpass relation ouput, which often are composed by\n # many internal geometries. For example, some polygons are formed with\n # sets of lines, sometimes unordered.\n for x_m in x_members:\n line = [(p['lon'], p['lat']) for p in x_m.get(\"geometry\", [])]\n if not line: # empty geometry or it's a node\n if x_m.get('type', None) == 'node':\n points.append((x_m['lon'], x_m['lat']))\n elif line[0] == line[-1]: # explicit polygons\n if x_m['role'] == 'outer':\n shell.append(line)\n elif x_m['role'] == 'inner':\n holes.append(line)\n else: # these may be lines or a polygon formed by lines\n lines.append(LineString(line))\n # We chose to return in order of priority (1) members that\n # have both shell and lines, then those that have only\n # shell, then members that are formed by lines, and if\n # there aren't shells or lines and the member is formed\n # by a node/point, then it's returned.\n if shell and lines:\n polygons = shell + list(polygonize(lines))\n final_geom = MultiPolygon([[s, []] for s in polygons])\n elif shell:\n if len(shell) > 1:\n # Here we don't treat multipolygons with multiholes.\n # Who want this, please implement for us :D\n final_geom = MultiPolygon([[s, []] for s in shell])\n else:\n final_geom = Polygon(shell[0], holes)\n elif lines:\n if len(lines) < 3:\n # Two lines or less doesn't form a polygon. If\n # there are two lines, these are merged.\n final_geom = linemerge(lines)\n else:\n # Lines may not be sequentially organized,\n # so one cannot simply Polygon(lines). Luckily,\n # shapely saved us with shapely.ops.polygonize.\n polygon = list(polygonize(lines))\n if len(polygon) > 1:\n final_geom = MultiPolygon([s for s in polygon])\n else:\n final_geom = polygon[0]\n elif points:\n if len(points) > 1:\n final_geom = MultiPoint(points)\n else:\n final_geom = Point(points[0])\n else:\n print(x_members)\n print('Relation not correctly parsed. Report this in error in the repository.')\n return Point([0,0])\n return final_geom\n" ]
[ [ "pandas.json_normalize", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0" ], "scipy": [], "tensorflow": [] } ]
sbaktha/covid19_pocus_ultrasound
[ "876558a118c7afbe7a520efcc07581af6f8ffbb2" ]
[ "pocovidnet/pocovidnet/evaluate_covid19.py" ]
[ "\"\"\"\nEvaluation class that performs forward pass through trained models\n\"\"\"\nimport os\n\nimport cv2\nimport numpy as np\n\nfrom pocovidnet import MODEL_FACTORY\n\nDIR_PATH = os.path.dirname(os.path.realpath(__file__))\nNUM_FOLDS = 5\n\n\nclass Evaluator(object):\n\n def __init__(self, ensemble=True, split=None, model_id=None):\n \"\"\"\n Constructor of COVID model evaluator class.\n \n Arguments:\n ensemble {str} -- Whether the model ensemble is used.\n \"\"\"\n self.root = os.path.join('/', *DIR_PATH.split('/')[:-1])\n self.split = split\n self.ensemble = ensemble\n if model_id is None:\n self.model_id = 'vgg_base'\n elif model_id not in MODEL_FACTORY.keys():\n raise ValueError(\n f'Wrong model {model_id}. Options are:{MODEL_FACTORY.keys()}'\n )\n else:\n self.model_id = model_id\n\n if ensemble:\n # retores 5 weight paths\n self.weights_paths = [\n os.path.join(\n self.root, 'trained_models', 'fold_' + str(fold),\n \"variables\", \"variables\"\n ) for fold in range(NUM_FOLDS)\n ]\n else:\n if split is None or split < 0 or split > 4:\n raise ValueError(f'Provide split between 0 and 4, not {split}')\n fold = split\n self.weights_paths = [\n os.path.join(\n self.root, 'trained_models', 'fold_' + str(fold),\n \"variables\", \"variables\"\n )\n ]\n\n self.class_mappings = ['covid', 'pneunomia', 'regular']\n self.models = [\n MODEL_FACTORY[self.model_id]()\n for _ in range(len(self.weights_paths))\n ]\n\n # restore weights\n try:\n for model, path in zip(self.models, self.weights_paths):\n model.load_weights(path)\n except Exception:\n raise Exception('Error in model restoring.')\n\n print(f'Model restored. Class mappings are {self.class_mappings}')\n\n def __call__(self, image):\n \"\"\"Performs a forward pass through the restored model\n\n Arguments:\n image {np.array} -- Input image on which prediction is performed.\n No size requirements, but the image will be reshaped to 224 x\n 224 pixels (aspec ratio is *not* preserved, so quadratic images\n are preferred).\n\n Returns:\n logits {list} -- Length 3 num_classes). Class probabilities.\n \"\"\"\n\n image = self.preprocess(image)\n predictions = np.squeeze(\n np.stack([model.predict(image) for model in self.models]), axis=1\n )\n return list(np.mean(predictions, axis=0, keepdims=False))\n\n def preprocess(self, image):\n \"\"\"Apply image preprocessing pipeline\n\n Arguments:\n image {np.array} -- Arbitrary shape, quadratic preferred\n\n Returns:\n np.array -- Shape 224,224. Normalized to [0, 1].\n \"\"\"\n\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n image = cv2.resize(image, (224, 224))\n image = np.expand_dims(np.array(image), 0) / 255.0\n return image\n" ]
[ [ "numpy.array", "numpy.mean" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Dzinushi/models_1_4
[ "d7e72793a68c1667d403b1542c205d1cd9b1d17c", "d7e72793a68c1667d403b1542c205d1cd9b1d17c", "d7e72793a68c1667d403b1542c205d1cd9b1d17c", "d7e72793a68c1667d403b1542c205d1cd9b1d17c", "d7e72793a68c1667d403b1542c205d1cd9b1d17c", "d7e72793a68c1667d403b1542c205d1cd9b1d17c", "d7e72793a68c1667d403b1542c205d1cd9b1d17c", "d7e72793a68c1667d403b1542c205d1cd9b1d17c", "d7e72793a68c1667d403b1542c205d1cd9b1d17c", "d7e72793a68c1667d403b1542c205d1cd9b1d17c", "d7e72793a68c1667d403b1542c205d1cd9b1d17c", "d7e72793a68c1667d403b1542c205d1cd9b1d17c" ]
[ "research/object_detection/models/faster_rcnn_inception_v2_feature_extractor.py", "research/object_detection/models/ssd_mobilenet_v1_feature_extractor.py", "research/syntaxnet/dragnn/python/wrapped_units.py", "research/compression/entropy_coder/progressive/progressive.py", "research/gan/pix2pix/train.py", "research/adversarial_text/graphs_test.py", "research/object_detection/core/prefetcher_test.py", "research/compression/entropy_coder/core/entropy_coder_single.py", "research/compression/entropy_coder/lib/blocks_operator_test.py", "research/tcn/estimators/svtcn_loss.py", "research/differential_privacy/dp_sgd/dp_optimizer/dp_optimizer.py", "tutorials/image/alexnet/alexnet_benchmark.py" ]
[ "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Inception V2 Faster R-CNN implementation.\n\nSee \"Rethinking the Inception Architecture for Computer Vision\"\nhttps://arxiv.org/abs/1512.00567\n\"\"\"\nimport tensorflow as tf\n\nfrom object_detection.meta_architectures import faster_rcnn_meta_arch\nfrom nets import inception_v2\n\nslim = tf.contrib.slim\n\n\ndef _batch_norm_arg_scope(list_ops,\n use_batch_norm=True,\n batch_norm_decay=0.9997,\n batch_norm_epsilon=0.001,\n batch_norm_scale=False,\n train_batch_norm=False):\n \"\"\"Slim arg scope for InceptionV2 batch norm.\"\"\"\n if use_batch_norm:\n batch_norm_params = {\n 'is_training': train_batch_norm,\n 'scale': batch_norm_scale,\n 'decay': batch_norm_decay,\n 'epsilon': batch_norm_epsilon\n }\n normalizer_fn = slim.batch_norm\n else:\n normalizer_fn = None\n batch_norm_params = None\n\n return slim.arg_scope(list_ops,\n normalizer_fn=normalizer_fn,\n normalizer_params=batch_norm_params)\n\n\nclass FasterRCNNInceptionV2FeatureExtractor(\n faster_rcnn_meta_arch.FasterRCNNFeatureExtractor):\n \"\"\"Faster R-CNN Inception V2 feature extractor implementation.\"\"\"\n\n def __init__(self,\n is_training,\n first_stage_features_stride,\n batch_norm_trainable=False,\n reuse_weights=None,\n weight_decay=0.0,\n depth_multiplier=1.0,\n min_depth=16):\n \"\"\"Constructor.\n\n Args:\n is_training: See base class.\n first_stage_features_stride: See base class.\n batch_norm_trainable: See base class.\n reuse_weights: See base class.\n weight_decay: See base class.\n depth_multiplier: float depth multiplier for feature extractor.\n min_depth: minimum feature extractor depth.\n\n Raises:\n ValueError: If `first_stage_features_stride` is not 8 or 16.\n \"\"\"\n if first_stage_features_stride != 8 and first_stage_features_stride != 16:\n raise ValueError('`first_stage_features_stride` must be 8 or 16.')\n self._depth_multiplier = depth_multiplier\n self._min_depth = min_depth\n super(FasterRCNNInceptionV2FeatureExtractor, self).__init__(\n is_training, first_stage_features_stride, batch_norm_trainable,\n reuse_weights, weight_decay)\n\n def preprocess(self, resized_inputs):\n \"\"\"Faster R-CNN Inception V2 preprocessing.\n\n Maps pixel values to the range [-1, 1].\n\n Args:\n resized_inputs: a [batch, height, width, channels] float tensor\n representing a batch of images.\n\n Returns:\n preprocessed_inputs: a [batch, height, width, channels] float tensor\n representing a batch of images.\n \"\"\"\n return (2.0 / 255.0) * resized_inputs - 1.0\n\n def _extract_proposal_features(self, preprocessed_inputs, scope):\n \"\"\"Extracts first stage RPN features.\n\n Args:\n preprocessed_inputs: A [batch, height, width, channels] float32 tensor\n representing a batch of images.\n scope: A scope name.\n\n Returns:\n rpn_feature_map: A tensor with shape [batch, height, width, depth]\n Raises:\n InvalidArgumentError: If the spatial size of `preprocessed_inputs`\n (height or width) is less than 33.\n ValueError: If the created network is missing the required activation.\n \"\"\"\n\n preprocessed_inputs.get_shape().assert_has_rank(4)\n shape_assert = tf.Assert(\n tf.logical_and(tf.greater_equal(tf.shape(preprocessed_inputs)[1], 33),\n tf.greater_equal(tf.shape(preprocessed_inputs)[2], 33)),\n ['image size must at least be 33 in both height and width.'])\n\n with tf.control_dependencies([shape_assert]):\n with tf.variable_scope('InceptionV2', reuse=self._reuse_weights):\n # with slim.arg_scope(inception_v2.inception_v2_arg_scope()) as scope:\n with _batch_norm_arg_scope([slim.conv2d, slim.separable_conv2d],\n batch_norm_scale=True,\n train_batch_norm=self._train_batch_norm):\n _, activations = inception_v2.inception_v2_base(\n preprocessed_inputs,\n final_endpoint='Mixed_4e',\n min_depth=self._min_depth,\n depth_multiplier=self._depth_multiplier,\n scope=scope)\n\n return activations['Mixed_4e']\n\n def _extract_box_classifier_features(self, proposal_feature_maps, scope):\n \"\"\"Extracts second stage box classifier features.\n\n Args:\n proposal_feature_maps: A 4-D float tensor with shape\n [batch_size * self.max_num_proposals, crop_height, crop_width, depth]\n representing the feature map cropped to each proposal.\n scope: A scope name (unused).\n\n Returns:\n proposal_classifier_features: A 4-D float tensor with shape\n [batch_size * self.max_num_proposals, height, width, depth]\n representing box classifier features for each proposal.\n \"\"\"\n net = proposal_feature_maps\n\n depth = lambda d: max(int(d * self._depth_multiplier), self._min_depth)\n trunc_normal = lambda stddev: tf.truncated_normal_initializer(0.0, stddev)\n\n data_format = 'NHWC'\n concat_dim = 3 if data_format == 'NHWC' else 1\n\n with tf.variable_scope('InceptionV2', reuse=self._reuse_weights):\n # with tf.variable_scope(inception_v2.inception_v2_arg_scope()):\n with slim.arg_scope(\n [slim.conv2d, slim.max_pool2d, slim.avg_pool2d],\n stride=1,\n padding='SAME',\n data_format=data_format):\n with _batch_norm_arg_scope([slim.conv2d, slim.separable_conv2d],\n batch_norm_scale=True,\n train_batch_norm=self._train_batch_norm):\n with tf.variable_scope('Mixed_5a'):\n with tf.variable_scope('Branch_0'):\n branch_0 = slim.conv2d(\n net, depth(128), [1, 1],\n weights_initializer=trunc_normal(0.09),\n scope='Conv2d_0a_1x1')\n branch_0 = slim.conv2d(branch_0, depth(192), [3, 3], stride=2,\n scope='Conv2d_1a_3x3')\n with tf.variable_scope('Branch_1'):\n branch_1 = slim.conv2d(\n net, depth(192), [1, 1],\n weights_initializer=trunc_normal(0.09),\n scope='Conv2d_0a_1x1')\n branch_1 = slim.conv2d(branch_1, depth(256), [3, 3],\n scope='Conv2d_0b_3x3')\n branch_1 = slim.conv2d(branch_1, depth(256), [3, 3], stride=2,\n scope='Conv2d_1a_3x3')\n with tf.variable_scope('Branch_2'):\n branch_2 = slim.max_pool2d(net, [3, 3], stride=2,\n scope='MaxPool_1a_3x3')\n net = tf.concat([branch_0, branch_1, branch_2], concat_dim)\n\n with tf.variable_scope('Mixed_5b'):\n with tf.variable_scope('Branch_0'):\n branch_0 = slim.conv2d(net, depth(352), [1, 1],\n scope='Conv2d_0a_1x1')\n with tf.variable_scope('Branch_1'):\n branch_1 = slim.conv2d(\n net, depth(192), [1, 1],\n weights_initializer=trunc_normal(0.09),\n scope='Conv2d_0a_1x1')\n branch_1 = slim.conv2d(branch_1, depth(320), [3, 3],\n scope='Conv2d_0b_3x3')\n with tf.variable_scope('Branch_2'):\n branch_2 = slim.conv2d(\n net, depth(160), [1, 1],\n weights_initializer=trunc_normal(0.09),\n scope='Conv2d_0a_1x1')\n branch_2 = slim.conv2d(branch_2, depth(224), [3, 3],\n scope='Conv2d_0b_3x3')\n branch_2 = slim.conv2d(branch_2, depth(224), [3, 3],\n scope='Conv2d_0c_3x3')\n with tf.variable_scope('Branch_3'):\n branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')\n branch_3 = slim.conv2d(\n branch_3, depth(128), [1, 1],\n weights_initializer=trunc_normal(0.1),\n scope='Conv2d_0b_1x1')\n net = tf.concat([branch_0, branch_1, branch_2, branch_3],\n concat_dim)\n\n with tf.variable_scope('Mixed_5c'):\n with tf.variable_scope('Branch_0'):\n branch_0 = slim.conv2d(net, depth(352), [1, 1],\n scope='Conv2d_0a_1x1')\n with tf.variable_scope('Branch_1'):\n branch_1 = slim.conv2d(\n net, depth(192), [1, 1],\n weights_initializer=trunc_normal(0.09),\n scope='Conv2d_0a_1x1')\n branch_1 = slim.conv2d(branch_1, depth(320), [3, 3],\n scope='Conv2d_0b_3x3')\n with tf.variable_scope('Branch_2'):\n branch_2 = slim.conv2d(\n net, depth(192), [1, 1],\n weights_initializer=trunc_normal(0.09),\n scope='Conv2d_0a_1x1')\n branch_2 = slim.conv2d(branch_2, depth(224), [3, 3],\n scope='Conv2d_0b_3x3')\n branch_2 = slim.conv2d(branch_2, depth(224), [3, 3],\n scope='Conv2d_0c_3x3')\n with tf.variable_scope('Branch_3'):\n branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')\n branch_3 = slim.conv2d(\n branch_3, depth(128), [1, 1],\n weights_initializer=trunc_normal(0.1),\n scope='Conv2d_0b_1x1')\n proposal_classifier_features = tf.concat(\n [branch_0, branch_1, branch_2, branch_3], concat_dim)\n\n return proposal_classifier_features\n", "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"SSDFeatureExtractor for MobilenetV1 features.\"\"\"\n\nimport tensorflow as tf\n\nfrom object_detection.meta_architectures import ssd_meta_arch\nfrom object_detection.models import feature_map_generators\nfrom object_detection.utils import ops\nfrom nets import mobilenet_v1\n\nslim = tf.contrib.slim\n\n\nclass SSDMobileNetV1FeatureExtractor(ssd_meta_arch.SSDFeatureExtractor):\n \"\"\"SSD Feature Extractor using MobilenetV1 features.\"\"\"\n\n def __init__(self,\n is_training,\n depth_multiplier,\n min_depth,\n pad_to_multiple,\n conv_hyperparams,\n batch_norm_trainable=True,\n reuse_weights=None):\n \"\"\"MobileNetV1 Feature Extractor for SSD Models.\n\n Args:\n is_training: whether the network is in training mode.\n depth_multiplier: float depth multiplier for feature extractor.\n min_depth: minimum feature extractor depth.\n pad_to_multiple: the nearest multiple to zero pad the input height and\n width dimensions to.\n conv_hyperparams: tf slim arg_scope for conv2d and separable_conv2d ops.\n batch_norm_trainable: Whether to update batch norm parameters during\n training or not. When training with a small batch size\n (e.g. 1), it is desirable to disable batch norm update and use\n pretrained batch norm params.\n reuse_weights: Whether to reuse variables. Default is None.\n \"\"\"\n super(SSDMobileNetV1FeatureExtractor, self).__init__(\n is_training, depth_multiplier, min_depth, pad_to_multiple,\n conv_hyperparams, batch_norm_trainable, reuse_weights)\n\n def preprocess(self, resized_inputs):\n \"\"\"SSD preprocessing.\n\n Maps pixel values to the range [-1, 1].\n\n Args:\n resized_inputs: a [batch, height, width, channels] float tensor\n representing a batch of images.\n\n Returns:\n preprocessed_inputs: a [batch, height, width, channels] float tensor\n representing a batch of images.\n \"\"\"\n return (2.0 / 255.0) * resized_inputs - 1.0\n\n def extract_features(self, preprocessed_inputs):\n \"\"\"Extract features from preprocessed inputs.\n\n Args:\n preprocessed_inputs: a [batch, height, width, channels] float tensor\n representing a batch of images.\n\n Returns:\n feature_maps: a list of tensors where the ith tensor has shape\n [batch, height_i, width_i, depth_i]\n \"\"\"\n preprocessed_inputs.get_shape().assert_has_rank(4)\n shape_assert = tf.Assert(\n tf.logical_and(tf.greater_equal(tf.shape(preprocessed_inputs)[1], 33),\n tf.greater_equal(tf.shape(preprocessed_inputs)[2], 33)),\n ['image size must at least be 33 in both height and width.'])\n\n feature_map_layout = {\n 'from_layer': ['Conv2d_11_pointwise', 'Conv2d_13_pointwise', '', '',\n '', ''],\n 'layer_depth': [-1, -1, 512, 256, 256, 128],\n }\n\n with tf.control_dependencies([shape_assert]):\n with slim.arg_scope(self._conv_hyperparams):\n with slim.arg_scope([slim.batch_norm], fused=False):\n with tf.variable_scope('MobilenetV1',\n reuse=self._reuse_weights) as scope:\n _, image_features = mobilenet_v1.mobilenet_v1_base(\n ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple),\n final_endpoint='Conv2d_13_pointwise',\n min_depth=self._min_depth,\n depth_multiplier=self._depth_multiplier,\n scope=scope)\n feature_maps = feature_map_generators.multi_resolution_feature_maps(\n feature_map_layout=feature_map_layout,\n depth_multiplier=self._depth_multiplier,\n min_depth=self._min_depth,\n insert_1x1_conv=True,\n image_features=image_features)\n\n return feature_maps.values()\n", "# Copyright 2017 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Network units wrapping TensorFlows' tf.contrib.rnn cells.\n\nPlease put all wrapping logic for tf.contrib.rnn in this module; this will help\ncollect common subroutines that prove useful.\n\"\"\"\n\nimport abc\n\nimport tensorflow as tf\n\nfrom dragnn.python import network_units as dragnn\nfrom syntaxnet.util import check\n\n\ndef capture_variables(function, scope_name):\n \"\"\"Captures and returns variables created by a function.\n\n Runs |function| in a scope of name |scope_name| and returns the list of\n variables created by |function|.\n\n Args:\n function: Function whose variables should be captured. The function should\n take one argument, its enclosing variable scope.\n scope_name: Variable scope in which the |function| is evaluated.\n\n Returns:\n List of created variables.\n \"\"\"\n # Use a dict to dedupe captured variables.\n created_vars = {}\n\n def _custom_getter(getter, *args, **kwargs):\n \"\"\"Calls the real getter and captures its result in |created_vars|.\"\"\"\n real_variable = getter(*args, **kwargs)\n created_vars[real_variable.name] = real_variable\n return real_variable\n\n with tf.variable_scope(\n scope_name, reuse=None, custom_getter=_custom_getter) as scope:\n function(scope)\n return created_vars.values()\n\n\ndef apply_with_captured_variables(function, scope_name, component):\n \"\"\"Applies a function using previously-captured variables.\n\n The counterpart to capture_variables(); invokes |function| in a scope of name\n |scope_name|, extracting captured variables from the |component|.\n\n Args:\n function: Function to apply using captured variables. The function should\n take one argument, its enclosing variable scope.\n scope_name: Variable scope in which the |function| is evaluated. Must match\n the scope passed to capture_variables().\n component: Component from which to extract captured variables.\n\n Returns:\n Results of function application.\n \"\"\"\n\n def _custom_getter(getter, *args, **kwargs):\n \"\"\"Retrieves the normal or moving-average variables.\"\"\"\n return component.get_variable(var_params=getter(*args, **kwargs))\n\n with tf.variable_scope(\n scope_name, reuse=True, custom_getter=_custom_getter) as scope:\n return function(scope)\n\n\nclass BaseLSTMNetwork(dragnn.NetworkUnitInterface):\n \"\"\"Base class for wrapped LSTM networks.\n\n This LSTM network unit supports multiple layers with layer normalization.\n Because it is imported from tf.contrib.rnn, we need to capture the created\n variables during initialization time.\n\n Layers:\n ...subclass-specific layers...\n last_layer: Alias for the activations of the last hidden layer.\n logits: Logits associated with component actions.\n \"\"\"\n\n def __init__(self, component, additional_attr_defaults=None):\n \"\"\"Initializes the LSTM base class.\n\n Parameters used:\n hidden_layer_sizes: Comma-delimited number of hidden units for each layer.\n input_dropout_rate (-1.0): Input dropout rate for each layer. If < 0.0,\n use the global |dropout_rate| hyperparameter.\n recurrent_dropout_rate (0.8): Recurrent dropout rate. If < 0.0, use the\n global |recurrent_dropout_rate| hyperparameter.\n layer_norm (True): Whether or not to use layer norm.\n\n Hyperparameters used:\n dropout_rate: Input dropout rate.\n recurrent_dropout_rate: Recurrent dropout rate.\n\n Args:\n component: parent ComponentBuilderBase object.\n additional_attr_defaults: Additional attributes for use by derived class.\n \"\"\"\n attr_defaults = additional_attr_defaults or {}\n attr_defaults.update({\n 'layer_norm': True,\n 'input_dropout_rate': -1.0,\n 'recurrent_dropout_rate': 0.8,\n 'hidden_layer_sizes': '256',\n })\n self._attrs = dragnn.get_attrs_with_defaults(\n component.spec.network_unit.parameters,\n defaults=attr_defaults)\n\n self._hidden_layer_sizes = map(int,\n self._attrs['hidden_layer_sizes'].split(','))\n\n self._input_dropout_rate = self._attrs['input_dropout_rate']\n if self._input_dropout_rate < 0.0:\n self._input_dropout_rate = component.master.hyperparams.dropout_rate\n\n self._recurrent_dropout_rate = self._attrs['recurrent_dropout_rate']\n if self._recurrent_dropout_rate < 0.0:\n self._recurrent_dropout_rate = (\n component.master.hyperparams.recurrent_dropout_rate)\n if self._recurrent_dropout_rate < 0.0:\n self._recurrent_dropout_rate = component.master.hyperparams.dropout_rate\n\n tf.logging.info('[%s] input_dropout_rate=%s recurrent_dropout_rate=%s',\n component.name, self._input_dropout_rate,\n self._recurrent_dropout_rate)\n\n layers, context_layers = self.create_hidden_layers(component,\n self._hidden_layer_sizes)\n last_layer_dim = layers[-1].dim\n layers.append(\n dragnn.Layer(component, name='last_layer', dim=last_layer_dim))\n layers.append(\n dragnn.Layer(component, name='logits', dim=component.num_actions))\n\n # Provide initial layers and context layers, so the base class constructor\n # can safely use accessors like get_layer_size().\n super(BaseLSTMNetwork, self).__init__(\n component, init_layers=layers, init_context_layers=context_layers)\n\n # Allocate parameters for the softmax.\n self._params.append(\n tf.get_variable(\n 'weights_softmax', [last_layer_dim, component.num_actions],\n initializer=tf.random_normal_initializer(stddev=1e-4)))\n self._params.append(\n tf.get_variable(\n 'bias_softmax', [component.num_actions],\n initializer=tf.zeros_initializer()))\n\n def get_logits(self, network_tensors):\n \"\"\"Returns the logits for prediction.\"\"\"\n return network_tensors[self.get_layer_index('logits')]\n\n @abc.abstractmethod\n def create_hidden_layers(self, component, hidden_layer_sizes):\n \"\"\"Creates hidden network layers.\n\n Args:\n component: Parent ComponentBuilderBase object.\n hidden_layer_sizes: List of requested hidden layer activation sizes.\n\n Returns:\n layers: List of layers created by this network.\n context_layers: List of context layers created by this network.\n \"\"\"\n pass\n\n def _append_base_layers(self, hidden_layers):\n \"\"\"Appends layers defined by the base class to the |hidden_layers|.\"\"\"\n last_layer = hidden_layers[-1]\n\n logits = tf.nn.xw_plus_b(last_layer,\n self._component.get_variable('weights_softmax'),\n self._component.get_variable('bias_softmax'))\n return hidden_layers + [last_layer, logits]\n\n def _create_cell(self, num_units, during_training):\n \"\"\"Creates a single LSTM cell, possibly with dropout.\n\n Requires that BaseLSTMNetwork.__init__() was called.\n\n Args:\n num_units: Number of hidden units in the cell.\n during_training: Whether to create a cell for training (vs inference).\n\n Returns:\n A RNNCell of the requested size, possibly with dropout.\n \"\"\"\n # No dropout in inference mode.\n if not during_training:\n return tf.contrib.rnn.LayerNormBasicLSTMCell(\n num_units, layer_norm=self._attrs['layer_norm'], reuse=True)\n\n # Otherwise, apply dropout to inputs and recurrences.\n cell = tf.contrib.rnn.LayerNormBasicLSTMCell(\n num_units,\n dropout_keep_prob=self._recurrent_dropout_rate,\n layer_norm=self._attrs['layer_norm'])\n cell = tf.contrib.rnn.DropoutWrapper(\n cell, input_keep_prob=self._input_dropout_rate)\n return cell\n\n def _create_train_cells(self):\n \"\"\"Creates a list of LSTM cells for training.\"\"\"\n return [\n self._create_cell(num_units, during_training=True)\n for num_units in self._hidden_layer_sizes\n ]\n\n def _create_inference_cells(self):\n \"\"\"Creates a list of LSTM cells for inference.\"\"\"\n return [\n self._create_cell(num_units, during_training=False)\n for num_units in self._hidden_layer_sizes\n ]\n\n def _capture_variables_as_params(self, function):\n \"\"\"Captures variables created by a function in |self._params|.\"\"\"\n self._params.extend(capture_variables(function, 'cell'))\n\n def _apply_with_captured_variables(self, function):\n \"\"\"Applies a function using previously-captured variables.\"\"\"\n return apply_with_captured_variables(function, 'cell', self._component)\n\n\nclass LayerNormBasicLSTMNetwork(BaseLSTMNetwork):\n \"\"\"Wrapper around tf.contrib.rnn.LayerNormBasicLSTMCell.\n\n Features:\n All inputs are concatenated.\n\n Subclass-specific layers:\n state_c_<n>: Cell states for the <n>'th LSTM layer (0-origin).\n state_h_<n>: Hidden states for the <n>'th LSTM layer (0-origin).\n \"\"\"\n\n def __init__(self, component):\n \"\"\"Sets up context and output layers, as well as a final softmax.\"\"\"\n super(LayerNormBasicLSTMNetwork, self).__init__(component)\n\n # Wrap lists of training and inference sub-cells into multi-layer RNN cells.\n # Note that a |MultiRNNCell| state is a tuple of per-layer sub-states.\n self._train_cell = tf.contrib.rnn.MultiRNNCell(self._create_train_cells())\n self._inference_cell = tf.contrib.rnn.MultiRNNCell(\n self._create_inference_cells())\n\n def _cell_closure(scope):\n \"\"\"Applies the LSTM cell to placeholder inputs and state.\"\"\"\n placeholder_inputs = tf.placeholder(\n dtype=tf.float32, shape=(1, self._concatenated_input_dim))\n\n placeholder_substates = []\n for num_units in self._hidden_layer_sizes:\n placeholder_substate = tf.contrib.rnn.LSTMStateTuple(\n tf.placeholder(dtype=tf.float32, shape=(1, num_units)),\n tf.placeholder(dtype=tf.float32, shape=(1, num_units)))\n placeholder_substates.append(placeholder_substate)\n placeholder_state = tuple(placeholder_substates)\n\n self._train_cell(\n inputs=placeholder_inputs, state=placeholder_state, scope=scope)\n\n self._capture_variables_as_params(_cell_closure)\n\n def create_hidden_layers(self, component, hidden_layer_sizes):\n \"\"\"See base class.\"\"\"\n # Construct the layer meta info for the DRAGNN builder. Note that the order\n # of h and c are reversed compared to the vanilla DRAGNN LSTM cell, as\n # this is the standard in tf.contrib.rnn.\n #\n # NB: The h activations of the last LSTM must be the last layer, in order\n # for _append_base_layers() to work.\n layers = []\n for index, num_units in enumerate(hidden_layer_sizes):\n layers.append(\n dragnn.Layer(component, name='state_c_%d' % index, dim=num_units))\n layers.append(\n dragnn.Layer(component, name='state_h_%d' % index, dim=num_units))\n context_layers = list(layers) # copy |layers|, don't alias it\n return layers, context_layers\n\n def create(self,\n fixed_embeddings,\n linked_embeddings,\n context_tensor_arrays,\n attention_tensor,\n during_training,\n stride=None):\n \"\"\"See base class.\"\"\"\n # NB: This cell pulls the lstm's h and c vectors from context_tensor_arrays\n # instead of through linked features.\n check.Eq(\n len(context_tensor_arrays), 2 * len(self._hidden_layer_sizes),\n 'require two context tensors per hidden layer')\n\n # Rearrange the context tensors into a tuple of LSTM sub-states.\n length = context_tensor_arrays[0].size()\n substates = []\n for index, num_units in enumerate(self._hidden_layer_sizes):\n state_c = context_tensor_arrays[2 * index].read(length - 1)\n state_h = context_tensor_arrays[2 * index + 1].read(length - 1)\n\n # Fix shapes that for some reason are not set properly for an unknown\n # reason. TODO(googleuser): Why are the shapes not set?\n state_c.set_shape([tf.Dimension(None), num_units])\n state_h.set_shape([tf.Dimension(None), num_units])\n substates.append(tf.contrib.rnn.LSTMStateTuple(state_c, state_h))\n state = tuple(substates)\n\n input_tensor = dragnn.get_input_tensor(fixed_embeddings, linked_embeddings)\n cell = self._train_cell if during_training else self._inference_cell\n\n def _cell_closure(scope):\n \"\"\"Applies the LSTM cell to the current inputs and state.\"\"\"\n return cell(input_tensor, state, scope)\n\n unused_h, state = self._apply_with_captured_variables(_cell_closure)\n\n # Return tensors to be put into the tensor arrays / used to compute\n # objective.\n output_tensors = []\n for new_substate in state:\n new_c, new_h = new_substate\n output_tensors.append(new_c)\n output_tensors.append(new_h)\n return self._append_base_layers(output_tensors)\n\n\nclass BulkBiLSTMNetwork(BaseLSTMNetwork):\n \"\"\"Bulk wrapper around tf.contrib.rnn.stack_bidirectional_dynamic_rnn().\n\n Features:\n lengths: [stride, 1] sequence lengths per batch item.\n All other features are concatenated into input activations.\n\n Subclass-specific layers:\n outputs: [stride * num_steps, self._output_dim] bi-LSTM activations.\n \"\"\"\n\n def __init__(self, component):\n \"\"\"Initializes the bulk bi-LSTM.\n\n Parameters used:\n parallel_iterations (1): Parallelism of the underlying tf.while_loop().\n Defaults to 1 thread to encourage deterministic behavior, but can be\n increased to trade memory for speed.\n\n Args:\n component: parent ComponentBuilderBase object.\n \"\"\"\n super(BulkBiLSTMNetwork, self).__init__(\n component, additional_attr_defaults={'parallel_iterations': 1})\n\n check.In('lengths', self._linked_feature_dims,\n 'Missing required linked feature')\n check.Eq(self._linked_feature_dims['lengths'], 1,\n 'Wrong dimension for \"lengths\" feature')\n self._input_dim = self._concatenated_input_dim - 1 # exclude 'lengths'\n self._output_dim = self.get_layer_size('outputs')\n tf.logging.info('[%s] Bulk bi-LSTM with input_dim=%d output_dim=%d',\n component.name, self._input_dim, self._output_dim)\n\n # Create one training and inference cell per layer and direction.\n self._train_cells_forward = self._create_train_cells()\n self._train_cells_backward = self._create_train_cells()\n self._inference_cells_forward = self._create_inference_cells()\n self._inference_cells_backward = self._create_inference_cells()\n\n def _bilstm_closure(scope):\n \"\"\"Applies the bi-LSTM to placeholder inputs and lengths.\"\"\"\n # Use singleton |stride| and |steps| because their values don't affect the\n # weight variables.\n stride, steps = 1, 1\n placeholder_inputs = tf.placeholder(\n dtype=tf.float32, shape=[stride, steps, self._input_dim])\n placeholder_lengths = tf.placeholder(dtype=tf.int64, shape=[stride])\n\n # Omit the initial states and sequence lengths for simplicity; they don't\n # affect the weight variables.\n tf.contrib.rnn.stack_bidirectional_dynamic_rnn(\n self._train_cells_forward,\n self._train_cells_backward,\n placeholder_inputs,\n dtype=tf.float32,\n sequence_length=placeholder_lengths,\n scope=scope)\n\n self._capture_variables_as_params(_bilstm_closure)\n\n # Allocate parameters for the initial states. Note that an LSTM state is a\n # tuple of two substates (c, h), so there are 4 variables per layer.\n for index, num_units in enumerate(self._hidden_layer_sizes):\n for direction in ['forward', 'backward']:\n for substate in ['c', 'h']:\n self._params.append(\n tf.get_variable(\n 'initial_state_%s_%s_%d' % (direction, substate, index),\n [1, num_units], # leading 1 for later batch-wise tiling\n dtype=tf.float32,\n initializer=tf.constant_initializer(0.0)))\n\n def create_hidden_layers(self, component, hidden_layer_sizes):\n \"\"\"See base class.\"\"\"\n dim = 2 * hidden_layer_sizes[-1]\n return [dragnn.Layer(component, name='outputs', dim=dim)], []\n\n def create(self,\n fixed_embeddings,\n linked_embeddings,\n context_tensor_arrays,\n attention_tensor,\n during_training,\n stride=None):\n \"\"\"Requires |stride|; otherwise see base class.\"\"\"\n check.NotNone(stride,\n 'BulkBiLSTMNetwork requires \"stride\" and must be called '\n 'in the bulk feature extractor component.')\n\n # Flatten the lengths into a vector.\n lengths = dragnn.lookup_named_tensor('lengths', linked_embeddings)\n lengths_s = tf.squeeze(lengths.tensor, [1])\n\n # Collect all other inputs into a batched tensor.\n linked_embeddings = [\n named_tensor for named_tensor in linked_embeddings\n if named_tensor.name != 'lengths'\n ]\n inputs_sxnxd = dragnn.get_input_tensor_with_stride(\n fixed_embeddings, linked_embeddings, stride)\n\n # Since get_input_tensor_with_stride() concatenates the input embeddings, it\n # obscures the static activation dimension, which the RNN library requires.\n # Restore it using set_shape(). Note that set_shape() merges into the known\n # shape, so only specify the activation dimension.\n inputs_sxnxd.set_shape(\n [tf.Dimension(None), tf.Dimension(None), self._input_dim])\n\n initial_states_forward, initial_states_backward = (\n self._create_initial_states(stride))\n\n if during_training:\n cells_forward = self._train_cells_forward\n cells_backward = self._train_cells_backward\n else:\n cells_forward = self._inference_cells_forward\n cells_backward = self._inference_cells_backward\n\n def _bilstm_closure(scope):\n \"\"\"Applies the bi-LSTM to the current inputs.\"\"\"\n outputs_sxnxd, _, _ = tf.contrib.rnn.stack_bidirectional_dynamic_rnn(\n cells_forward,\n cells_backward,\n inputs_sxnxd,\n initial_states_fw=initial_states_forward,\n initial_states_bw=initial_states_backward,\n sequence_length=lengths_s,\n parallel_iterations=self._attrs['parallel_iterations'],\n scope=scope)\n return outputs_sxnxd\n\n # Layer outputs are not batched; flatten out the batch dimension.\n outputs_sxnxd = self._apply_with_captured_variables(_bilstm_closure)\n outputs_snxd = tf.reshape(outputs_sxnxd, [-1, self._output_dim])\n return self._append_base_layers([outputs_snxd])\n\n def _create_initial_states(self, stride):\n \"\"\"Returns stacked and batched initial states for the bi-LSTM.\"\"\"\n initial_states_forward = []\n initial_states_backward = []\n for index in range(len(self._hidden_layer_sizes)):\n # Retrieve the initial states for this layer.\n states_sxd = []\n for direction in ['forward', 'backward']:\n for substate in ['c', 'h']:\n state_1xd = self._component.get_variable('initial_state_%s_%s_%d' %\n (direction, substate, index))\n state_sxd = tf.tile(state_1xd, [stride, 1]) # tile across the batch\n states_sxd.append(state_sxd)\n\n # Assemble and append forward and backward LSTM states.\n initial_states_forward.append(\n tf.contrib.rnn.LSTMStateTuple(states_sxd[0], states_sxd[1]))\n initial_states_backward.append(\n tf.contrib.rnn.LSTMStateTuple(states_sxd[2], states_sxd[3]))\n return initial_states_forward, initial_states_backward\n", "# Copyright 2017 The TensorFlow Authors All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Code probability model used for entropy coding.\"\"\"\n\nimport json\n\nfrom six.moves import xrange\nimport tensorflow as tf\n\nfrom entropy_coder.lib import blocks\nfrom entropy_coder.model import entropy_coder_model\nfrom entropy_coder.model import model_factory\n\n\n# pylint: disable=not-callable\n\n\nclass BrnnPredictor(blocks.BlockBase):\n \"\"\"BRNN prediction applied on one layer.\"\"\"\n\n def __init__(self, code_depth, name=None):\n super(BrnnPredictor, self).__init__(name)\n\n with self._BlockScope():\n hidden_depth = 2 * code_depth\n\n # What is coming from the previous layer/iteration\n # is going through a regular Conv2D layer as opposed to the binary codes\n # of the current layer/iteration which are going through a masked\n # convolution.\n self._adaptation0 = blocks.RasterScanConv2D(\n hidden_depth, [7, 7], [1, 1], 'SAME',\n strict_order=True,\n bias=blocks.Bias(0), act=tf.tanh)\n self._adaptation1 = blocks.Conv2D(\n hidden_depth, [3, 3], [1, 1], 'SAME',\n bias=blocks.Bias(0), act=tf.tanh)\n self._predictor = blocks.CompositionOperator([\n blocks.LineOperator(\n blocks.RasterScanConv2DLSTM(\n depth=hidden_depth,\n filter_size=[1, 3],\n hidden_filter_size=[1, 3],\n strides=[1, 1],\n padding='SAME')),\n blocks.Conv2D(hidden_depth, [1, 1], [1, 1], 'SAME',\n bias=blocks.Bias(0), act=tf.tanh),\n blocks.Conv2D(code_depth, [1, 1], [1, 1], 'SAME',\n bias=blocks.Bias(0), act=tf.tanh)\n ])\n\n def _Apply(self, x, s):\n # Code estimation using both:\n # - the state from the previous iteration/layer,\n # - the binary codes that are before in raster scan order.\n h = tf.concat(values=[self._adaptation0(x), self._adaptation1(s)], axis=3)\n\n estimated_codes = self._predictor(h)\n\n return estimated_codes\n\n\nclass LayerPrediction(blocks.BlockBase):\n \"\"\"Binary code prediction for one layer.\"\"\"\n\n def __init__(self, layer_count, code_depth, name=None):\n super(LayerPrediction, self).__init__(name)\n\n self._layer_count = layer_count\n\n # No previous layer.\n self._layer_state = None\n self._current_layer = 0\n\n with self._BlockScope():\n # Layers used to do the conditional code prediction.\n self._brnn_predictors = []\n for _ in xrange(layer_count):\n self._brnn_predictors.append(BrnnPredictor(code_depth))\n\n # Layers used to generate the input of the LSTM operating on the\n # iteration/depth domain.\n hidden_depth = 2 * code_depth\n self._state_blocks = []\n for _ in xrange(layer_count):\n self._state_blocks.append(blocks.CompositionOperator([\n blocks.Conv2D(\n hidden_depth, [3, 3], [1, 1], 'SAME',\n bias=blocks.Bias(0), act=tf.tanh),\n blocks.Conv2D(\n code_depth, [3, 3], [1, 1], 'SAME',\n bias=blocks.Bias(0), act=tf.tanh)\n ]))\n\n # Memory of the RNN is equivalent to the size of 2 layers of binary\n # codes.\n hidden_depth = 2 * code_depth\n self._layer_rnn = blocks.CompositionOperator([\n blocks.Conv2DLSTM(\n depth=hidden_depth,\n filter_size=[1, 1],\n hidden_filter_size=[1, 1],\n strides=[1, 1],\n padding='SAME'),\n blocks.Conv2D(hidden_depth, [1, 1], [1, 1], 'SAME',\n bias=blocks.Bias(0), act=tf.tanh),\n blocks.Conv2D(code_depth, [1, 1], [1, 1], 'SAME',\n bias=blocks.Bias(0), act=tf.tanh)\n ])\n\n def _Apply(self, x):\n assert self._current_layer < self._layer_count\n\n # Layer state is set to 0 when there is no previous iteration.\n if self._layer_state is None:\n self._layer_state = tf.zeros_like(x, dtype=tf.float32)\n\n # Code estimation using both:\n # - the state from the previous iteration/layer,\n # - the binary codes that are before in raster scan order.\n estimated_codes = self._brnn_predictors[self._current_layer](\n x, self._layer_state)\n\n # Compute the updated layer state.\n h = self._state_blocks[self._current_layer](x)\n self._layer_state = self._layer_rnn(h)\n self._current_layer += 1\n\n return estimated_codes\n\n\nclass ProgressiveModel(entropy_coder_model.EntropyCoderModel):\n \"\"\"Progressive BRNN entropy coder model.\"\"\"\n\n def __init__(self):\n super(ProgressiveModel, self).__init__()\n\n def Initialize(self, global_step, optimizer, config_string):\n if config_string is None:\n raise ValueError('The progressive model requires a configuration.')\n config = json.loads(config_string)\n if 'coded_layer_count' not in config:\n config['coded_layer_count'] = 0\n\n self._config = config\n self._optimizer = optimizer\n self._global_step = global_step\n\n def BuildGraph(self, input_codes):\n \"\"\"Build the graph corresponding to the progressive BRNN model.\"\"\"\n layer_depth = self._config['layer_depth']\n layer_count = self._config['layer_count']\n\n code_shape = input_codes.get_shape()\n code_depth = code_shape[-1].value\n if self._config['coded_layer_count'] > 0:\n prefix_depth = self._config['coded_layer_count'] * layer_depth\n if code_depth < prefix_depth:\n raise ValueError('Invalid prefix depth: {} VS {}'.format(\n prefix_depth, code_depth))\n input_codes = input_codes[:, :, :, :prefix_depth]\n\n code_shape = input_codes.get_shape()\n code_depth = code_shape[-1].value\n if code_depth % layer_depth != 0:\n raise ValueError(\n 'Code depth must be a multiple of the layer depth: {} vs {}'.format(\n code_depth, layer_depth))\n code_layer_count = code_depth // layer_depth\n if code_layer_count > layer_count:\n raise ValueError('Input codes have too many layers: {}, max={}'.format(\n code_layer_count, layer_count))\n\n # Block used to estimate binary codes.\n layer_prediction = LayerPrediction(layer_count, layer_depth)\n\n # Block used to compute code lengths.\n code_length_block = blocks.CodeLength()\n\n # Loop over all the layers.\n code_length = []\n code_layers = tf.split(\n value=input_codes, num_or_size_splits=code_layer_count, axis=3)\n for k in xrange(code_layer_count):\n x = code_layers[k]\n predicted_x = layer_prediction(x)\n # Saturate the prediction to avoid infinite code length.\n epsilon = 0.001\n predicted_x = tf.clip_by_value(\n predicted_x, -1 + epsilon, +1 - epsilon)\n code_length.append(code_length_block(\n blocks.ConvertSignCodeToZeroOneCode(x),\n blocks.ConvertSignCodeToZeroOneCode(predicted_x)))\n tf.summary.scalar('code_length_layer_{:02d}'.format(k), code_length[-1])\n code_length = tf.stack(code_length)\n self.loss = tf.reduce_mean(code_length)\n tf.summary.scalar('loss', self.loss)\n\n # Loop over all the remaining layers just to make sure they are\n # instantiated. Otherwise, loading model params could fail.\n dummy_x = tf.zeros_like(code_layers[0])\n for _ in xrange(layer_count - code_layer_count):\n dummy_predicted_x = layer_prediction(dummy_x)\n\n # Average bitrate over total_line_count.\n self.average_code_length = tf.reduce_mean(code_length)\n\n if self._optimizer:\n optim_op = self._optimizer.minimize(self.loss,\n global_step=self._global_step)\n block_updates = blocks.CreateBlockUpdates()\n if block_updates:\n with tf.get_default_graph().control_dependencies([optim_op]):\n self.train_op = tf.group(*block_updates)\n else:\n self.train_op = optim_op\n else:\n self.train_op = None\n\n def GetConfigStringForUnitTest(self):\n s = '{\\n'\n s += '\"layer_depth\": 1,\\n'\n s += '\"layer_count\": 8\\n'\n s += '}\\n'\n return s\n\n\n@model_factory.RegisterEntropyCoderModel('progressive')\ndef CreateProgressiveModel():\n return ProgressiveModel()\n", "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Trains an image-to-image translation network with an adversarial loss.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\n\nimport data_provider\nfrom google3.third_party.tensorflow_models.gan.pix2pix import networks\n\nflags = tf.flags\ntfgan = tf.contrib.gan\n\nflags.DEFINE_integer('batch_size', 10, 'The number of images in each batch.')\n\nflags.DEFINE_integer('patch_size', 32, 'The size of the patches to train on.')\n\nflags.DEFINE_string('master', '', 'Name of the TensorFlow master to use.')\n\nflags.DEFINE_string('train_log_dir', '/tmp/pix2pix/',\n 'Directory where to write event logs.')\n\nflags.DEFINE_float('generator_lr', 0.00001,\n 'The compression model learning rate.')\n\nflags.DEFINE_float('discriminator_lr', 0.00001,\n 'The discriminator learning rate.')\n\nflags.DEFINE_integer('max_number_of_steps', 2000000,\n 'The maximum number of gradient steps.')\n\nflags.DEFINE_integer(\n 'ps_tasks', 0,\n 'The number of parameter servers. If the value is 0, then the parameters '\n 'are handled locally by the worker.')\n\nflags.DEFINE_integer(\n 'task', 0,\n 'The Task ID. This value is used when training with multiple workers to '\n 'identify each worker.')\n\nflags.DEFINE_float(\n 'weight_factor', 0.0,\n 'How much to weight the adversarial loss relative to pixel loss.')\n\nflags.DEFINE_string('dataset_dir', None, 'Location of data.')\n\nFLAGS = flags.FLAGS\n\n\ndef main(_):\n if not tf.gfile.Exists(FLAGS.train_log_dir):\n tf.gfile.MakeDirs(FLAGS.train_log_dir)\n\n with tf.device(tf.train.replica_device_setter(FLAGS.ps_tasks)):\n # Get real and distorted images.\n with tf.device('/cpu:0'), tf.name_scope('inputs'):\n real_images = data_provider.provide_data(\n 'train', FLAGS.batch_size, dataset_dir=FLAGS.dataset_dir,\n patch_size=FLAGS.patch_size)\n distorted_images = _distort_images(\n real_images, downscale_size=int(FLAGS.patch_size / 2),\n upscale_size=FLAGS.patch_size)\n\n # Create a GANModel tuple.\n gan_model = tfgan.gan_model(\n generator_fn=networks.generator,\n discriminator_fn=networks.discriminator,\n real_data=real_images,\n generator_inputs=distorted_images)\n tfgan.eval.add_image_comparison_summaries(\n gan_model, num_comparisons=3, display_diffs=True)\n tfgan.eval.add_gan_model_image_summaries(gan_model, grid_size=3)\n\n # Define the GANLoss tuple using standard library functions.\n with tf.name_scope('losses'):\n gan_loss = tfgan.gan_loss(\n gan_model,\n generator_loss_fn=tfgan.losses.least_squares_generator_loss,\n discriminator_loss_fn=tfgan.losses.least_squares_discriminator_loss)\n\n # Define the standard L1 pixel loss.\n l1_pixel_loss = tf.norm(gan_model.real_data - gan_model.generated_data,\n ord=1) / FLAGS.patch_size ** 2\n\n # Modify the loss tuple to include the pixel loss. Add summaries as well.\n gan_loss = tfgan.losses.combine_adversarial_loss(\n gan_loss, gan_model, l1_pixel_loss,\n weight_factor=FLAGS.weight_factor)\n\n with tf.name_scope('train_ops'):\n # Get the GANTrain ops using the custom optimizers and optional\n # discriminator weight clipping.\n gen_lr, dis_lr = _lr(FLAGS.generator_lr, FLAGS.discriminator_lr)\n gen_opt, dis_opt = _optimizer(gen_lr, dis_lr)\n train_ops = tfgan.gan_train_ops(\n gan_model,\n gan_loss,\n generator_optimizer=gen_opt,\n discriminator_optimizer=dis_opt,\n summarize_gradients=True,\n colocate_gradients_with_ops=True,\n aggregation_method=tf.AggregationMethod.EXPERIMENTAL_ACCUMULATE_N,\n transform_grads_fn=tf.contrib.training.clip_gradient_norms_fn(1e3))\n tf.summary.scalar('generator_lr', gen_lr)\n tf.summary.scalar('discriminator_lr', dis_lr)\n\n # Use GAN train step function if using adversarial loss, otherwise\n # only train the generator.\n train_steps = tfgan.GANTrainSteps(\n generator_train_steps=1,\n discriminator_train_steps=int(FLAGS.weight_factor > 0))\n\n # Run the alternating training loop. Skip it if no steps should be taken\n # (used for graph construction tests).\n status_message = tf.string_join(\n ['Starting train step: ',\n tf.as_string(tf.train.get_or_create_global_step())],\n name='status_message')\n if FLAGS.max_number_of_steps == 0: return\n tfgan.gan_train(\n train_ops,\n FLAGS.train_log_dir,\n get_hooks_fn=tfgan.get_sequential_train_hooks(train_steps),\n hooks=[tf.train.StopAtStepHook(num_steps=FLAGS.max_number_of_steps),\n tf.train.LoggingTensorHook([status_message], every_n_iter=10)],\n master=FLAGS.master,\n is_chief=FLAGS.task == 0)\n\n\ndef _optimizer(gen_lr, dis_lr):\n kwargs = {'beta1': 0.5, 'beta2': 0.999}\n generator_opt = tf.train.AdamOptimizer(gen_lr, **kwargs)\n discriminator_opt = tf.train.AdamOptimizer(dis_lr, **kwargs)\n return generator_opt, discriminator_opt\n\n\ndef _lr(gen_lr_base, dis_lr_base):\n \"\"\"Return the generator and discriminator learning rates.\"\"\"\n gen_lr = tf.train.exponential_decay(\n learning_rate=gen_lr_base,\n global_step=tf.train.get_or_create_global_step(),\n decay_steps=100000,\n decay_rate=0.8,\n staircase=True, )\n dis_lr = dis_lr_base\n\n return gen_lr, dis_lr\n\n\ndef _distort_images(images, downscale_size, upscale_size):\n downscaled = tf.image.resize_area(images, [downscale_size] * 2)\n upscaled = tf.image.resize_area(downscaled, [upscale_size] * 2)\n return upscaled\n\n\nif __name__ == '__main__':\n tf.app.run()\n", "# Copyright 2017 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for graphs.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom collections import defaultdict\nimport operator\nimport os\nimport random\nimport shutil\nimport string\nimport tempfile\n\n# Dependency imports\n\nimport tensorflow as tf\n\nimport graphs\nfrom adversarial_text.data import data_utils\n\nflags = tf.app.flags\nFLAGS = flags.FLAGS\ndata = data_utils\n\nflags.DEFINE_integer('task', 0, 'Task id; needed for SyncReplicas test')\n\n\ndef _build_random_vocabulary(vocab_size=100):\n \"\"\"Builds and returns a dict<term, id>.\"\"\"\n vocab = set()\n while len(vocab) < (vocab_size - 1):\n rand_word = ''.join(\n random.choice(string.ascii_lowercase)\n for _ in range(random.randint(1, 10)))\n vocab.add(rand_word)\n\n vocab_ids = dict([(word, i) for i, word in enumerate(vocab)])\n vocab_ids[data.EOS_TOKEN] = vocab_size - 1\n return vocab_ids\n\n\ndef _build_random_sequence(vocab_ids):\n seq_len = random.randint(10, 200)\n ids = vocab_ids.values()\n seq = data.SequenceWrapper()\n for token_id in [random.choice(ids) for _ in range(seq_len)]:\n seq.add_timestep().set_token(token_id)\n return seq\n\n\ndef _build_vocab_frequencies(seqs, vocab_ids):\n vocab_freqs = defaultdict(int)\n ids_to_words = dict([(i, word) for word, i in vocab_ids.iteritems()])\n for seq in seqs:\n for timestep in seq:\n vocab_freqs[ids_to_words[timestep.token]] += 1\n\n vocab_freqs[data.EOS_TOKEN] = 0\n return vocab_freqs\n\n\nclass GraphsTest(tf.test.TestCase):\n \"\"\"Test graph construction methods.\"\"\"\n\n @classmethod\n def setUpClass(cls):\n # Make model small\n FLAGS.batch_size = 2\n FLAGS.num_timesteps = 3\n FLAGS.embedding_dims = 4\n FLAGS.rnn_num_layers = 2\n FLAGS.rnn_cell_size = 4\n FLAGS.cl_num_layers = 2\n FLAGS.cl_hidden_size = 4\n FLAGS.vocab_size = 10\n\n # Set input/output flags\n FLAGS.data_dir = tempfile.mkdtemp()\n\n # Build and write sequence files.\n vocab_ids = _build_random_vocabulary(FLAGS.vocab_size)\n seqs = [_build_random_sequence(vocab_ids) for _ in range(5)]\n seqs_label = [\n data.build_labeled_sequence(seq, random.choice([True, False]))\n for seq in seqs\n ]\n seqs_lm = [data.build_lm_sequence(seq) for seq in seqs]\n seqs_ae = [data.build_seq_ae_sequence(seq) for seq in seqs]\n seqs_rev = [data.build_reverse_sequence(seq) for seq in seqs]\n seqs_bidir = [\n data.build_bidirectional_seq(seq, rev)\n for seq, rev in zip(seqs, seqs_rev)\n ]\n seqs_bidir_label = [\n data.build_labeled_sequence(bd_seq, random.choice([True, False]))\n for bd_seq in seqs_bidir\n ]\n\n filenames = [\n data.TRAIN_CLASS, data.TRAIN_LM, data.TRAIN_SA, data.TEST_CLASS,\n data.TRAIN_REV_LM, data.TRAIN_BD_CLASS, data.TEST_BD_CLASS\n ]\n seq_lists = [\n seqs_label, seqs_lm, seqs_ae, seqs_label, seqs_rev, seqs_bidir,\n seqs_bidir_label\n ]\n for fname, seq_list in zip(filenames, seq_lists):\n with tf.python_io.TFRecordWriter(\n os.path.join(FLAGS.data_dir, fname)) as writer:\n for seq in seq_list:\n writer.write(seq.seq.SerializeToString())\n\n # Write vocab.txt and vocab_freq.txt\n vocab_freqs = _build_vocab_frequencies(seqs, vocab_ids)\n ordered_vocab_freqs = sorted(\n vocab_freqs.items(), key=operator.itemgetter(1), reverse=True)\n with open(os.path.join(FLAGS.data_dir, 'vocab.txt'), 'w') as vocab_f:\n with open(os.path.join(FLAGS.data_dir, 'vocab_freq.txt'), 'w') as freq_f:\n for word, freq in ordered_vocab_freqs:\n vocab_f.write('{}\\n'.format(word))\n freq_f.write('{}\\n'.format(freq))\n\n @classmethod\n def tearDownClass(cls):\n shutil.rmtree(FLAGS.data_dir)\n\n def setUp(self):\n # Reset FLAGS\n FLAGS.rnn_num_layers = 1\n FLAGS.sync_replicas = False\n FLAGS.adv_training_method = None\n FLAGS.num_candidate_samples = -1\n FLAGS.num_classes = 2\n FLAGS.use_seq2seq_autoencoder = False\n\n # Reset Graph\n tf.reset_default_graph()\n\n def testClassifierGraph(self):\n FLAGS.rnn_num_layers = 2\n model = graphs.VatxtModel()\n train_op, _, _ = model.classifier_training()\n # Pretrained vars: embedding + LSTM layers\n self.assertEqual(\n len(model.pretrained_variables), 1 + 2 * FLAGS.rnn_num_layers)\n with self.test_session() as sess:\n sess.run(tf.global_variables_initializer())\n tf.train.start_queue_runners(sess)\n sess.run(train_op)\n\n def testLanguageModelGraph(self):\n train_op, _, _ = graphs.VatxtModel().language_model_training()\n with self.test_session() as sess:\n sess.run(tf.global_variables_initializer())\n tf.train.start_queue_runners(sess)\n sess.run(train_op)\n\n def testMulticlass(self):\n FLAGS.num_classes = 10\n graphs.VatxtModel().classifier_graph()\n\n def testATMethods(self):\n at_methods = [None, 'rp', 'at', 'vat', 'atvat']\n for method in at_methods:\n FLAGS.adv_training_method = method\n with tf.Graph().as_default():\n graphs.VatxtModel().classifier_graph()\n\n # Ensure variables have been reused\n # Embedding + LSTM layers + hidden layers + logits layer\n expected_num_vars = 1 + 2 * FLAGS.rnn_num_layers + 2 * (\n FLAGS.cl_num_layers) + 2\n self.assertEqual(len(tf.trainable_variables()), expected_num_vars)\n\n def testSyncReplicas(self):\n FLAGS.sync_replicas = True\n graphs.VatxtModel().language_model_training()\n\n def testCandidateSampling(self):\n FLAGS.num_candidate_samples = 10\n graphs.VatxtModel().language_model_training()\n\n def testSeqAE(self):\n FLAGS.use_seq2seq_autoencoder = True\n graphs.VatxtModel().language_model_training()\n\n def testBidirLM(self):\n graphs.VatxtBidirModel().language_model_graph()\n\n def testBidirClassifier(self):\n at_methods = [None, 'rp', 'at', 'vat', 'atvat']\n for method in at_methods:\n FLAGS.adv_training_method = method\n with tf.Graph().as_default():\n graphs.VatxtBidirModel().classifier_graph()\n\n # Ensure variables have been reused\n # Embedding + 2 LSTM layers + hidden layers + logits layer\n expected_num_vars = 1 + 2 * 2 * FLAGS.rnn_num_layers + 2 * (\n FLAGS.cl_num_layers) + 2\n self.assertEqual(len(tf.trainable_variables()), expected_num_vars)\n\n def testEvalGraph(self):\n _, _ = graphs.VatxtModel().eval_graph()\n\n def testBidirEvalGraph(self):\n _, _ = graphs.VatxtBidirModel().eval_graph()\n\n\nif __name__ == '__main__':\n tf.test.main()\n", "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Tests for object_detection.core.prefetcher.\"\"\"\nimport tensorflow as tf\n\nfrom object_detection.core import prefetcher\n\nslim = tf.contrib.slim\n\n\nclass PrefetcherTest(tf.test.TestCase):\n\n def test_prefetch_tensors_with_fully_defined_shapes(self):\n with self.test_session() as sess:\n batch_size = 10\n image_size = 32\n num_batches = 5\n examples = tf.Variable(tf.constant(0, dtype=tf.int64))\n counter = examples.count_up_to(num_batches)\n image = tf.random_normal([batch_size, image_size,\n image_size, 3],\n dtype=tf.float32,\n name='images')\n label = tf.random_uniform([batch_size, 1], 0, 10,\n dtype=tf.int32, name='labels')\n\n prefetch_queue = prefetcher.prefetch(tensor_dict={'counter': counter,\n 'image': image,\n 'label': label},\n capacity=100)\n tensor_dict = prefetch_queue.dequeue()\n\n self.assertAllEqual(tensor_dict['image'].get_shape().as_list(),\n [batch_size, image_size, image_size, 3])\n self.assertAllEqual(tensor_dict['label'].get_shape().as_list(),\n [batch_size, 1])\n\n tf.initialize_all_variables().run()\n with slim.queues.QueueRunners(sess):\n for _ in range(num_batches):\n results = sess.run(tensor_dict)\n self.assertEquals(results['image'].shape,\n (batch_size, image_size, image_size, 3))\n self.assertEquals(results['label'].shape, (batch_size, 1))\n with self.assertRaises(tf.errors.OutOfRangeError):\n sess.run(tensor_dict)\n\n def test_prefetch_tensors_with_partially_defined_shapes(self):\n with self.test_session() as sess:\n batch_size = 10\n image_size = 32\n num_batches = 5\n examples = tf.Variable(tf.constant(0, dtype=tf.int64))\n counter = examples.count_up_to(num_batches)\n image = tf.random_normal([batch_size,\n tf.Variable(image_size),\n tf.Variable(image_size), 3],\n dtype=tf.float32,\n name='image')\n image.set_shape([batch_size, None, None, 3])\n label = tf.random_uniform([batch_size, tf.Variable(1)], 0,\n 10, dtype=tf.int32, name='label')\n label.set_shape([batch_size, None])\n\n prefetch_queue = prefetcher.prefetch(tensor_dict={'counter': counter,\n 'image': image,\n 'label': label},\n capacity=100)\n tensor_dict = prefetch_queue.dequeue()\n\n self.assertAllEqual(tensor_dict['image'].get_shape().as_list(),\n [batch_size, None, None, 3])\n self.assertAllEqual(tensor_dict['label'].get_shape().as_list(),\n [batch_size, None])\n\n tf.initialize_all_variables().run()\n with slim.queues.QueueRunners(sess):\n for _ in range(num_batches):\n results = sess.run(tensor_dict)\n self.assertEquals(results['image'].shape,\n (batch_size, image_size, image_size, 3))\n self.assertEquals(results['label'].shape, (batch_size, 1))\n with self.assertRaises(tf.errors.OutOfRangeError):\n sess.run(tensor_dict)\n\n\nif __name__ == '__main__':\n tf.test.main()\n", "# Copyright 2017 The TensorFlow Authors All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Compute the additional compression ratio after entropy coding.\"\"\"\n\nimport io\nimport os\n\nimport numpy as np\nimport tensorflow as tf\n\nimport config_helper\n\n# pylint: disable=unused-import\nfrom entropy_coder.all_models import all_models\n# pylint: enable=unused-import\nfrom entropy_coder.model import model_factory\n\n# Checkpoint used to restore the model parameters.\ntf.app.flags.DEFINE_string('checkpoint', None,\n \"\"\"Model checkpoint.\"\"\")\n\n# Model selection and configuration.\ntf.app.flags.DEFINE_string('model', None, \"\"\"Underlying encoder model.\"\"\")\ntf.app.flags.DEFINE_string('model_config', None,\n \"\"\"Model config protobuf given as text file.\"\"\")\n\n# File holding the binary codes.\ntf.flags.DEFINE_string('input_codes', None, 'Location of binary code file.')\n\nFLAGS = tf.flags.FLAGS\n\n\ndef main(_):\n if (FLAGS.input_codes is None or FLAGS.model is None):\n print('\\nUsage: python entropy_coder_single.py --model=progressive '\n '--model_config=model_config.json'\n '--iteration=15\\n\\n')\n return\n\n # if FLAGS.iteration < -1 or FLAGS.iteration > 15:\n # print ('\\n--iteration must be between 0 and 15 inclusive, or -1 to infer '\n # 'from file.\\n')\n # return\n # iteration = FLAGS.iteration\n\n if not tf.gfile.Exists(FLAGS.input_codes):\n print('\\nInput codes not found.\\n')\n return\n\n with tf.gfile.FastGFile(FLAGS.input_codes, 'rb') as code_file:\n contents = code_file.read()\n loaded_codes = np.load(io.BytesIO(contents))\n assert ['codes', 'shape'] not in loaded_codes.files\n loaded_shape = loaded_codes['shape']\n loaded_array = loaded_codes['codes']\n\n # Unpack and recover code shapes.\n unpacked_codes = np.reshape(np.unpackbits(loaded_array)\n [:np.prod(loaded_shape)],\n loaded_shape)\n\n numpy_int_codes = unpacked_codes.transpose([1, 2, 3, 0, 4])\n numpy_int_codes = numpy_int_codes.reshape([numpy_int_codes.shape[0],\n numpy_int_codes.shape[1],\n numpy_int_codes.shape[2],\n -1])\n numpy_codes = numpy_int_codes.astype(np.float32) * 2.0 - 1.0\n\n with tf.Graph().as_default() as graph:\n # TF tensor to hold the binary codes to losslessly compress.\n batch_size = 1\n codes = tf.placeholder(tf.float32, shape=numpy_codes.shape)\n\n # Create the entropy coder model.\n global_step = None\n optimizer = None\n model = model_factory.GetModelRegistry().CreateModel(FLAGS.model)\n model_config_string = config_helper.GetConfigString(FLAGS.model_config)\n model.Initialize(global_step, optimizer, model_config_string)\n model.BuildGraph(codes)\n\n saver = tf.train.Saver(sharded=True, keep_checkpoint_every_n_hours=12.0)\n\n with tf.Session(graph=graph) as sess:\n # Initialize local variables.\n sess.run(tf.local_variables_initializer())\n\n # Restore model variables.\n saver.restore(sess, FLAGS.checkpoint)\n\n tf_tensors = {\n 'code_length': model.average_code_length\n }\n feed_dict = {codes: numpy_codes}\n np_tensors = sess.run(tf_tensors, feed_dict=feed_dict)\n\n print('Additional compression ratio: {}'.format(\n np_tensors['code_length']))\n\n\nif __name__ == '__main__':\n tf.app.run()\n", "# Copyright 2017 The TensorFlow Authors All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Tests of the block operators.\"\"\"\n\nimport numpy as np\nimport tensorflow as tf\n\nimport block_base\nimport blocks_operator\n\n\nclass AddOneBlock(block_base.BlockBase):\n\n def __init__(self, name=None):\n super(AddOneBlock, self).__init__(name)\n\n def _Apply(self, x):\n return x + 1.0\n\n\nclass SquareBlock(block_base.BlockBase):\n\n def __init__(self, name=None):\n super(SquareBlock, self).__init__(name)\n\n def _Apply(self, x):\n return x * x\n\n\nclass BlocksOperatorTest(tf.test.TestCase):\n\n def testComposition(self):\n x_value = np.array([[1.0, 2.0, 3.0],\n [-1.0, -2.0, -3.0]])\n y_expected_value = np.array([[4.0, 9.0, 16.0],\n [0.0, 1.0, 4.0]])\n\n x = tf.placeholder(dtype=tf.float32, shape=[2, 3])\n complex_block = blocks_operator.CompositionOperator(\n [AddOneBlock(),\n SquareBlock()])\n y = complex_block(x)\n\n with self.test_session():\n y_value = y.eval(feed_dict={x: x_value})\n\n self.assertAllClose(y_expected_value, y_value)\n\n\nif __name__ == '__main__':\n tf.test.main()\n", "# Copyright 2017 The TensorFlow Authors All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"This implements single view TCN triplet loss.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\n\n\ndef pairwise_squared_distance(feature):\n \"\"\"Computes the squared pairwise distance matrix.\n \n output[i, j] = || feature[i, :] - feature[j, :] ||_2^2\n \n Args:\n feature: 2-D Tensor of size [number of data, feature dimension]\n \n Returns:\n pairwise_squared_distances: 2-D Tensor of size\n [number of data, number of data]\n \"\"\"\n pairwise_squared_distances = tf.add(\n tf.reduce_sum(\n tf.square(feature), axis=1, keep_dims=True),\n tf.reduce_sum(\n tf.square(tf.transpose(feature)), axis=0,\n keep_dims=True)) - 2.0 * tf.matmul(feature, tf.transpose(feature))\n\n # Deal with numerical inaccuracies. Set small negatives to zero.\n pairwise_squared_distances = tf.maximum(pairwise_squared_distances, 0.0)\n return pairwise_squared_distances\n\n\ndef masked_maximum(data, mask, dim=1):\n \"\"\"Computes the axis wise maximum over chosen elements.\n \n Args:\n data: N-D Tensor.\n mask: N-D Tensor of zeros or ones.\n dim: The dimension over which to compute the maximum.\n \n Returns:\n masked_maximums: N-D Tensor.\n The maximized dimension is of size 1 after the operation.\n \"\"\"\n axis_minimums = tf.reduce_min(data, dim, keep_dims=True)\n masked_maximums = tf.reduce_max(\n tf.multiply(\n data - axis_minimums, mask), dim, keep_dims=True) + axis_minimums\n return masked_maximums\n\n\ndef masked_minimum(data, mask, dim=1):\n \"\"\"Computes the axis wise minimum over chosen elements.\n \n Args:\n data: 2-D Tensor of size [n, m].\n mask: 2-D Boolean Tensor of size [n, m].\n dim: The dimension over which to compute the minimum.\n \n Returns:\n masked_minimums: N-D Tensor.\n The minimized dimension is of size 1 after the operation.\n \"\"\"\n axis_maximums = tf.reduce_max(data, dim, keep_dims=True)\n masked_minimums = tf.reduce_min(\n tf.multiply(\n data - axis_maximums, mask), dim, keep_dims=True) + axis_maximums\n return masked_minimums\n\n\ndef singleview_tcn_loss(\n embeddings, timesteps, pos_radius, neg_radius, margin=1.0,\n sequence_ids=None, multiseq=False):\n \"\"\"Computes the single view triplet loss with semi-hard negative mining.\n \n The loss encourages the positive distances (between a pair of embeddings with\n the same labels) to be smaller than the minimum negative distance among\n which are at least greater than the positive distance plus the margin constant\n (called semi-hard negative) in the mini-batch. If no such negative exists,\n uses the largest negative distance instead.\n \n Anchor, positive, negative selection is as follow:\n Anchors: We consider every embedding timestep as an anchor.\n Positives: pos_radius defines a radius (in timesteps) around each anchor from\n which positives can be drawn. E.g. An anchor with t=10 and a pos_radius of\n 2 produces a set of 4 (anchor,pos) pairs [(a=10, p=8), ... (a=10, p=12)].\n Negatives: neg_radius defines a boundary (in timesteps) around each anchor,\n outside of which negatives can be drawn. E.g. An anchor with t=10 and a\n neg_radius of 4 means negatives can be any t_neg where t_neg < 6 and\n t_neg > 14.\n \n Args:\n embeddings: 2-D Tensor of embedding vectors.\n timesteps: 1-D Tensor with shape [batch_size, 1] of sequence timesteps.\n pos_radius: int32; the size of the window (in timesteps) around each anchor\n timestep that a positive can be drawn from.\n neg_radius: int32; the size of the window (in timesteps) around each anchor\n timestep that defines a negative boundary. Negatives can only be chosen\n where negative timestep t is < negative boundary min or > negative\n boundary max.\n margin: Float; the triplet loss margin hyperparameter.\n sequence_ids: (Optional) 1-D Tensor with shape [batch_size, 1] of sequence\n ids. Together (sequence_id, sequence_timestep) give us a unique index for\n each image if we have multiple sequences in a batch.\n multiseq: Boolean, whether or not the batch is composed of multiple\n sequences (with possibly colliding timesteps).\n \n Returns:\n triplet_loss: tf.float32 scalar.\n \"\"\"\n assert neg_radius > pos_radius\n\n # If timesteps shape isn't [batchsize, 1], reshape to [batch_size, 1].\n tshape = tf.shape(timesteps)\n assert tshape.shape == 2 or tshape.shape == 1\n if tshape.shape == 1:\n timesteps = tf.reshape(timesteps, [tshape[0], 1])\n\n # Build pairwise squared distance matrix.\n pdist_matrix = pairwise_squared_distance(embeddings)\n\n # Build pairwise binary adjacency matrix, where adjacency[i,j] is True\n # if timestep j is inside the positive range for timestep i and both\n # timesteps come from the same sequence.\n pos_radius = tf.cast(pos_radius, tf.int32)\n\n if multiseq:\n # If sequence_ids shape isn't [batchsize, 1], reshape to [batch_size, 1].\n tshape = tf.shape(sequence_ids)\n assert tshape.shape == 2 or tshape.shape == 1\n if tshape.shape == 1:\n sequence_ids = tf.reshape(sequence_ids, [tshape[0], 1])\n\n # Build pairwise binary adjacency matrix based on sequence_ids\n sequence_adjacency = tf.equal(sequence_ids, tf.transpose(sequence_ids))\n\n # Invert so we can select negatives only.\n sequence_adjacency_not = tf.logical_not(sequence_adjacency)\n\n in_pos_range = tf.logical_and(\n tf.less_equal(\n tf.abs(timesteps - tf.transpose(timesteps)), pos_radius),\n sequence_adjacency)\n # Build pairwise binary discordance matrix, where discordance[i,j] is True\n # if timestep j is inside the negative range for timestep i or if the\n # timesteps come from different sequences.\n in_neg_range = tf.logical_or(\n tf.greater(tf.abs(timesteps - tf.transpose(timesteps)), neg_radius),\n sequence_adjacency_not\n )\n else:\n in_pos_range = tf.less_equal(\n tf.abs(timesteps - tf.transpose(timesteps)), pos_radius)\n in_neg_range = tf.greater(tf.abs(timesteps - tf.transpose(timesteps)),\n neg_radius)\n\n batch_size = tf.size(timesteps)\n\n # compute the mask\n pdist_matrix_tile = tf.tile(pdist_matrix, [batch_size, 1])\n mask = tf.logical_and(\n tf.tile(in_neg_range, [batch_size, 1]),\n tf.greater(pdist_matrix_tile,\n tf.reshape(tf.transpose(pdist_matrix), [-1, 1])))\n mask_final = tf.reshape(\n tf.greater(\n tf.reduce_sum(\n tf.cast(\n mask, dtype=tf.float32), 1, keep_dims=True),\n 0.0), [batch_size, batch_size])\n mask_final = tf.transpose(mask_final)\n\n in_neg_range = tf.cast(in_neg_range, dtype=tf.float32)\n mask = tf.cast(mask, dtype=tf.float32)\n\n # negatives_outside: smallest D_an where D_an > D_ap\n negatives_outside = tf.reshape(\n masked_minimum(pdist_matrix_tile, mask), [batch_size, batch_size])\n negatives_outside = tf.transpose(negatives_outside)\n\n # negatives_inside: largest D_an\n negatives_inside = tf.tile(\n masked_maximum(pdist_matrix, in_neg_range), [1, batch_size])\n semi_hard_negatives = tf.where(\n mask_final, negatives_outside, negatives_inside)\n\n loss_mat = tf.add(margin, pdist_matrix - semi_hard_negatives)\n\n mask_positives = tf.cast(\n in_pos_range, dtype=tf.float32) - tf.diag(tf.ones([batch_size]))\n\n # In lifted-struct, the authors multiply 0.5 for upper triangular\n # in semihard, they take all positive pairs except the diagonal.\n num_positives = tf.reduce_sum(mask_positives)\n\n triplet_loss = tf.truediv(\n tf.reduce_sum(tf.maximum(tf.multiply(loss_mat, mask_positives), 0.0)),\n num_positives,\n name='triplet_svtcn_loss')\n\n return triplet_loss\n", "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Differentially private optimizers.\n\"\"\"\nfrom __future__ import division\n\nimport tensorflow as tf\n\nfrom differential_privacy.dp_sgd.dp_optimizer import utils\nfrom differential_privacy.dp_sgd.per_example_gradients import per_example_gradients\n\n\nclass DPGradientDescentOptimizer(tf.train.GradientDescentOptimizer):\n \"\"\"Differentially private gradient descent optimizer.\n \"\"\"\n\n def __init__(self, learning_rate, eps_delta, sanitizer,\n sigma=None, use_locking=False, name=\"DPGradientDescent\",\n batches_per_lot=1):\n \"\"\"Construct a differentially private gradient descent optimizer.\n\n The optimizer uses fixed privacy budget for each batch of training.\n\n Args:\n learning_rate: for GradientDescentOptimizer.\n eps_delta: EpsDelta pair for each epoch.\n sanitizer: for sanitizing the graident.\n sigma: noise sigma. If None, use eps_delta pair to compute sigma;\n otherwise use supplied sigma directly.\n use_locking: use locking.\n name: name for the object.\n batches_per_lot: Number of batches in a lot.\n \"\"\"\n\n super(DPGradientDescentOptimizer, self).__init__(learning_rate,\n use_locking, name)\n\n # Also, if needed, define the gradient accumulators\n self._batches_per_lot = batches_per_lot\n self._grad_accum_dict = {}\n if batches_per_lot > 1:\n self._batch_count = tf.Variable(1, dtype=tf.int32, trainable=False,\n name=\"batch_count\")\n var_list = tf.trainable_variables()\n with tf.variable_scope(\"grad_acc_for\"):\n for var in var_list:\n v_grad_accum = tf.Variable(tf.zeros_like(var),\n trainable=False,\n name=utils.GetTensorOpName(var))\n self._grad_accum_dict[var.name] = v_grad_accum\n\n self._eps_delta = eps_delta\n self._sanitizer = sanitizer\n self._sigma = sigma\n\n def compute_sanitized_gradients(self, loss, var_list=None,\n add_noise=True):\n \"\"\"Compute the sanitized gradients.\n\n Args:\n loss: the loss tensor.\n var_list: the optional variables.\n add_noise: if true, then add noise. Always clip.\n Returns:\n a pair of (list of sanitized gradients) and privacy spending accumulation\n operations.\n Raises:\n TypeError: if var_list contains non-variable.\n \"\"\"\n\n self._assert_valid_dtypes([loss])\n\n xs = [tf.convert_to_tensor(x) for x in var_list]\n px_grads = per_example_gradients.PerExampleGradients(loss, xs)\n sanitized_grads = []\n for px_grad, v in zip(px_grads, var_list):\n tensor_name = utils.GetTensorOpName(v)\n sanitized_grad = self._sanitizer.sanitize(\n px_grad, self._eps_delta, sigma=self._sigma,\n tensor_name=tensor_name, add_noise=add_noise,\n num_examples=self._batches_per_lot * tf.slice(\n tf.shape(px_grad), [0], [1]))\n sanitized_grads.append(sanitized_grad)\n\n return sanitized_grads\n\n def minimize(self, loss, global_step=None, var_list=None,\n name=None):\n \"\"\"Minimize using sanitized gradients.\n\n This gets a var_list which is the list of trainable variables.\n For each var in var_list, we defined a grad_accumulator variable\n during init. When batches_per_lot > 1, we accumulate the gradient\n update in those. At the end of each lot, we apply the update back to\n the variable. This has the effect that for each lot we compute\n gradients at the point at the beginning of the lot, and then apply one\n update at the end of the lot. In other words, semantically, we are doing\n SGD with one lot being the equivalent of one usual batch of size\n batch_size * batches_per_lot.\n This allows us to simulate larger batches than our memory size would permit.\n\n The lr and the num_steps are in the lot world.\n\n Args:\n loss: the loss tensor.\n global_step: the optional global step.\n var_list: the optional variables.\n name: the optional name.\n Returns:\n the operation that runs one step of DP gradient descent.\n \"\"\"\n\n # First validate the var_list\n\n if var_list is None:\n var_list = tf.trainable_variables()\n for var in var_list:\n if not isinstance(var, tf.Variable):\n raise TypeError(\"Argument is not a variable.Variable: %s\" % var)\n\n # Modification: apply gradient once every batches_per_lot many steps.\n # This may lead to smaller error\n\n if self._batches_per_lot == 1:\n sanitized_grads = self.compute_sanitized_gradients(\n loss, var_list=var_list)\n\n grads_and_vars = zip(sanitized_grads, var_list)\n self._assert_valid_dtypes([v for g, v in grads_and_vars if g is not None])\n\n apply_grads = self.apply_gradients(grads_and_vars,\n global_step=global_step, name=name)\n return apply_grads\n\n # Condition for deciding whether to accumulate the gradient\n # or actually apply it.\n # we use a private self_batch_count to keep track of number of batches.\n # global step will count number of lots processed.\n\n update_cond = tf.equal(tf.constant(0),\n tf.mod(self._batch_count,\n tf.constant(self._batches_per_lot)))\n\n # Things to do for batches other than last of the lot.\n # Add non-noisy clipped grads to shadow variables.\n\n def non_last_in_lot_op(loss, var_list):\n \"\"\"Ops to do for a typical batch.\n\n For a batch that is not the last one in the lot, we simply compute the\n sanitized gradients and apply them to the grad_acc variables.\n\n Args:\n loss: loss function tensor\n var_list: list of variables\n Returns:\n A tensorflow op to do the updates to the gradient accumulators\n \"\"\"\n sanitized_grads = self.compute_sanitized_gradients(\n loss, var_list=var_list, add_noise=False)\n\n update_ops_list = []\n for var, grad in zip(var_list, sanitized_grads):\n grad_acc_v = self._grad_accum_dict[var.name]\n update_ops_list.append(grad_acc_v.assign_add(grad))\n update_ops_list.append(self._batch_count.assign_add(1))\n return tf.group(*update_ops_list)\n\n # Things to do for last batch of a lot.\n # Add noisy clipped grads to accumulator.\n # Apply accumulated grads to vars.\n\n def last_in_lot_op(loss, var_list, global_step):\n \"\"\"Ops to do for last batch in a lot.\n\n For the last batch in the lot, we first add the sanitized gradients to\n the gradient acc variables, and then apply these\n values over to the original variables (via an apply gradient)\n\n Args:\n loss: loss function tensor\n var_list: list of variables\n global_step: optional global step to be passed to apply_gradients\n Returns:\n A tensorflow op to push updates from shadow vars to real vars.\n \"\"\"\n\n # We add noise in the last lot. This is why we need this code snippet\n # that looks almost identical to the non_last_op case here.\n sanitized_grads = self.compute_sanitized_gradients(\n loss, var_list=var_list, add_noise=True)\n\n normalized_grads = []\n for var, grad in zip(var_list, sanitized_grads):\n grad_acc_v = self._grad_accum_dict[var.name]\n # To handle the lr difference per lot vs per batch, we divide the\n # update by number of batches per lot.\n normalized_grad = tf.div(grad_acc_v.assign_add(grad),\n tf.to_float(self._batches_per_lot))\n\n normalized_grads.append(normalized_grad)\n\n with tf.control_dependencies(normalized_grads):\n grads_and_vars = zip(normalized_grads, var_list)\n self._assert_valid_dtypes(\n [v for g, v in grads_and_vars if g is not None])\n apply_san_grads = self.apply_gradients(grads_and_vars,\n global_step=global_step,\n name=\"apply_grads\")\n\n # Now reset the accumulators to zero\n resets_list = []\n with tf.control_dependencies([apply_san_grads]):\n for _, acc in self._grad_accum_dict.items():\n reset = tf.assign(acc, tf.zeros_like(acc))\n resets_list.append(reset)\n resets_list.append(self._batch_count.assign_add(1))\n\n last_step_update = tf.group(*([apply_san_grads] + resets_list))\n return last_step_update\n\n # pylint: disable=g-long-lambda\n update_op = tf.cond(update_cond,\n lambda: last_in_lot_op(\n loss, var_list,\n global_step),\n lambda: non_last_in_lot_op(\n loss, var_list))\n return tf.group(update_op)\n", "# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Timing benchmark for AlexNet inference.\n\nTo run, use:\n bazel run -c opt --config=cuda \\\n models/tutorials/image/alexnet:alexnet_benchmark\n\nAcross 100 steps on batch size = 128.\n\nForward pass:\nRun on Tesla K40c: 145 +/- 1.5 ms / batch\nRun on Titan X: 70 +/- 0.1 ms / batch\n\nForward-backward pass:\nRun on Tesla K40c: 480 +/- 48 ms / batch\nRun on Titan X: 244 +/- 30 ms / batch\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport argparse\nfrom datetime import datetime\nimport math\nimport sys\nimport time\n\nfrom six.moves import xrange # pylint: disable=redefined-builtin\nimport tensorflow as tf\n\nFLAGS = None\n\n\ndef print_activations(t):\n print(t.op.name, ' ', t.get_shape().as_list())\n\n\ndef inference(images):\n \"\"\"Build the AlexNet model.\n \n Args:\n images: Images Tensor\n \n Returns:\n pool5: the last Tensor in the convolutional component of AlexNet.\n parameters: a list of Tensors corresponding to the weights and biases of the\n AlexNet model.\n \"\"\"\n parameters = []\n # conv1\n with tf.name_scope('conv1') as scope:\n kernel = tf.Variable(tf.truncated_normal([11, 11, 3, 64], dtype=tf.float32,\n stddev=1e-1), name='weights')\n conv = tf.nn.conv2d(images, kernel, [1, 4, 4, 1], padding='SAME')\n biases = tf.Variable(tf.constant(0.0, shape=[64], dtype=tf.float32),\n trainable=True, name='biases')\n bias = tf.nn.bias_add(conv, biases)\n conv1 = tf.nn.relu(bias, name=scope)\n print_activations(conv1)\n parameters += [kernel, biases]\n\n # lrn1\n with tf.name_scope('lrn1') as scope:\n lrn1 = tf.nn.local_response_normalization(conv1,\n alpha=1e-4,\n beta=0.75,\n depth_radius=2,\n bias=2.0)\n\n # pool1\n pool1 = tf.nn.max_pool(lrn1,\n ksize=[1, 3, 3, 1],\n strides=[1, 2, 2, 1],\n padding='VALID',\n name='pool1')\n print_activations(pool1)\n\n # conv2\n with tf.name_scope('conv2') as scope:\n kernel = tf.Variable(tf.truncated_normal([5, 5, 64, 192], dtype=tf.float32,\n stddev=1e-1), name='weights')\n conv = tf.nn.conv2d(pool1, kernel, [1, 1, 1, 1], padding='SAME')\n biases = tf.Variable(tf.constant(0.0, shape=[192], dtype=tf.float32),\n trainable=True, name='biases')\n bias = tf.nn.bias_add(conv, biases)\n conv2 = tf.nn.relu(bias, name=scope)\n parameters += [kernel, biases]\n print_activations(conv2)\n\n # lrn2\n with tf.name_scope('lrn2') as scope:\n lrn2 = tf.nn.local_response_normalization(conv2,\n alpha=1e-4,\n beta=0.75,\n depth_radius=2,\n bias=2.0)\n\n # pool2\n pool2 = tf.nn.max_pool(lrn2,\n ksize=[1, 3, 3, 1],\n strides=[1, 2, 2, 1],\n padding='VALID',\n name='pool2')\n print_activations(pool2)\n\n # conv3\n with tf.name_scope('conv3') as scope:\n kernel = tf.Variable(tf.truncated_normal([3, 3, 192, 384],\n dtype=tf.float32,\n stddev=1e-1), name='weights')\n conv = tf.nn.conv2d(pool2, kernel, [1, 1, 1, 1], padding='SAME')\n biases = tf.Variable(tf.constant(0.0, shape=[384], dtype=tf.float32),\n trainable=True, name='biases')\n bias = tf.nn.bias_add(conv, biases)\n conv3 = tf.nn.relu(bias, name=scope)\n parameters += [kernel, biases]\n print_activations(conv3)\n\n # conv4\n with tf.name_scope('conv4') as scope:\n kernel = tf.Variable(tf.truncated_normal([3, 3, 384, 256],\n dtype=tf.float32,\n stddev=1e-1), name='weights')\n conv = tf.nn.conv2d(conv3, kernel, [1, 1, 1, 1], padding='SAME')\n biases = tf.Variable(tf.constant(0.0, shape=[256], dtype=tf.float32),\n trainable=True, name='biases')\n bias = tf.nn.bias_add(conv, biases)\n conv4 = tf.nn.relu(bias, name=scope)\n parameters += [kernel, biases]\n print_activations(conv4)\n\n # conv5\n with tf.name_scope('conv5') as scope:\n kernel = tf.Variable(tf.truncated_normal([3, 3, 256, 256],\n dtype=tf.float32,\n stddev=1e-1), name='weights')\n conv = tf.nn.conv2d(conv4, kernel, [1, 1, 1, 1], padding='SAME')\n biases = tf.Variable(tf.constant(0.0, shape=[256], dtype=tf.float32),\n trainable=True, name='biases')\n bias = tf.nn.bias_add(conv, biases)\n conv5 = tf.nn.relu(bias, name=scope)\n parameters += [kernel, biases]\n print_activations(conv5)\n\n # pool5\n pool5 = tf.nn.max_pool(conv5,\n ksize=[1, 3, 3, 1],\n strides=[1, 2, 2, 1],\n padding='VALID',\n name='pool5')\n print_activations(pool5)\n\n return pool5, parameters\n\n\ndef time_tensorflow_run(session, target, info_string):\n \"\"\"Run the computation to obtain the target tensor and print timing stats.\n \n Args:\n session: the TensorFlow session to run the computation under.\n target: the target Tensor that is passed to the session's run() function.\n info_string: a string summarizing this run, to be printed with the stats.\n \n Returns:\n None\n \"\"\"\n num_steps_burn_in = 10\n total_duration = 0.0\n total_duration_squared = 0.0\n for i in xrange(FLAGS.num_batches + num_steps_burn_in):\n start_time = time.time()\n _ = session.run(target)\n duration = time.time() - start_time\n if i >= num_steps_burn_in:\n if not i % 10:\n print('%s: step %d, duration = %.3f' %\n (datetime.now(), i - num_steps_burn_in, duration))\n total_duration += duration\n total_duration_squared += duration * duration\n mn = total_duration / FLAGS.num_batches\n vr = total_duration_squared / FLAGS.num_batches - mn * mn\n sd = math.sqrt(vr)\n print('%s: %s across %d steps, %.3f +/- %.3f sec / batch' %\n (datetime.now(), info_string, FLAGS.num_batches, mn, sd))\n\n\ndef run_benchmark():\n \"\"\"Run the benchmark on AlexNet.\"\"\"\n with tf.Graph().as_default():\n # Generate some dummy images.\n image_size = 224\n # Note that our padding definition is slightly different the cuda-convnet.\n # In order to force the model to start with the same activations sizes,\n # we add 3 to the image_size and employ VALID padding above.\n images = tf.Variable(tf.random_normal([FLAGS.batch_size,\n image_size,\n image_size, 3],\n dtype=tf.float32,\n stddev=1e-1))\n\n # Build a Graph that computes the logits predictions from the\n # inference model.\n pool5, parameters = inference(images)\n\n # Build an initialization operation.\n init = tf.global_variables_initializer()\n\n # Start running operations on the Graph.\n config = tf.ConfigProto()\n config.gpu_options.allocator_type = 'BFC'\n sess = tf.Session(config=config)\n sess.run(init)\n\n # Run the forward benchmark.\n time_tensorflow_run(sess, pool5, \"Forward\")\n\n # Add a simple objective so we can calculate the backward pass.\n objective = tf.nn.l2_loss(pool5)\n # Compute the gradient with respect to all the parameters.\n grad = tf.gradients(objective, parameters)\n # Run the backward benchmark.\n time_tensorflow_run(sess, grad, \"Forward-backward\")\n\n\ndef main(_):\n run_benchmark()\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '--batch_size',\n type=int,\n default=128,\n help='Batch size.'\n )\n parser.add_argument(\n '--num_batches',\n type=int,\n default=100,\n help='Number of batches to run.'\n )\n FLAGS, unparsed = parser.parse_known_args()\n tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)\n" ]
[ [ "tensorflow.concat", "tensorflow.control_dependencies", "tensorflow.shape", "tensorflow.truncated_normal_initializer", "tensorflow.variable_scope" ], [ "tensorflow.variable_scope", "tensorflow.control_dependencies", "tensorflow.shape" ], [ "tensorflow.contrib.rnn.stack_bidirectional_dynamic_rnn", "tensorflow.contrib.rnn.LayerNormBasicLSTMCell", "tensorflow.contrib.rnn.DropoutWrapper", "tensorflow.Dimension", "tensorflow.zeros_initializer", "tensorflow.reshape", "tensorflow.squeeze", "tensorflow.placeholder", "tensorflow.constant_initializer", "tensorflow.contrib.rnn.LSTMStateTuple", "tensorflow.logging.info", "tensorflow.variable_scope", "tensorflow.random_normal_initializer", "tensorflow.tile" ], [ "tensorflow.clip_by_value", "tensorflow.reduce_mean", "tensorflow.stack", "tensorflow.zeros_like", "tensorflow.get_default_graph", "tensorflow.split", "tensorflow.group", "tensorflow.summary.scalar" ], [ "tensorflow.device", "tensorflow.norm", "tensorflow.train.StopAtStepHook", "tensorflow.train.LoggingTensorHook", "tensorflow.gfile.Exists", "tensorflow.train.get_or_create_global_step", "tensorflow.train.replica_device_setter", "tensorflow.gfile.MakeDirs", "tensorflow.name_scope", "tensorflow.train.AdamOptimizer", "tensorflow.contrib.training.clip_gradient_norms_fn", "tensorflow.summary.scalar", "tensorflow.image.resize_area", "tensorflow.app.run" ], [ "tensorflow.Graph", "tensorflow.train.start_queue_runners", "tensorflow.test.main", "tensorflow.global_variables_initializer", "tensorflow.reset_default_graph", "tensorflow.trainable_variables" ], [ "tensorflow.constant", "tensorflow.Variable", "tensorflow.test.main", "tensorflow.initialize_all_variables", "tensorflow.random_uniform", "tensorflow.random_normal" ], [ "tensorflow.Graph", "tensorflow.local_variables_initializer", "tensorflow.flags.DEFINE_string", "tensorflow.gfile.Exists", "tensorflow.placeholder", "numpy.unpackbits", "tensorflow.app.flags.DEFINE_string", "tensorflow.Session", "numpy.prod", "tensorflow.train.Saver", "tensorflow.gfile.FastGFile", "tensorflow.app.run" ], [ "numpy.array", "tensorflow.placeholder", "tensorflow.test.main" ], [ "tensorflow.reduce_max", "tensorflow.multiply", "tensorflow.transpose", "tensorflow.shape", "tensorflow.maximum", "tensorflow.reduce_sum", "tensorflow.cast", "tensorflow.reshape", "tensorflow.ones", "tensorflow.reduce_min", "tensorflow.tile", "tensorflow.add", "tensorflow.where", "tensorflow.square", "tensorflow.logical_not", "tensorflow.size" ], [ "tensorflow.convert_to_tensor", "tensorflow.constant", "tensorflow.Variable", "tensorflow.control_dependencies", "tensorflow.shape", "tensorflow.zeros_like", "tensorflow.variable_scope", "tensorflow.to_float", "tensorflow.trainable_variables", "tensorflow.group" ], [ "tensorflow.nn.bias_add", "tensorflow.nn.relu", "tensorflow.Graph", "tensorflow.truncated_normal", "tensorflow.constant", "tensorflow.nn.max_pool", "tensorflow.gradients", "tensorflow.ConfigProto", "tensorflow.app.run", "tensorflow.global_variables_initializer", "tensorflow.nn.l2_loss", "tensorflow.random_normal", "tensorflow.name_scope", "tensorflow.Session", "tensorflow.nn.local_response_normalization", "tensorflow.nn.conv2d" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.4", "1.5", "1.7", "0.12", "1.0", "1.2" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7", "0.12", "1.0", "1.2" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.4", "1.5", "1.7", "0.12", "1.0", "1.2" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7", "0.12", "1.0", "1.2" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7", "0.12", "1.0", "1.2" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.5", "1.7", "1.0", "1.2" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.4", "1.5", "1.7", "0.12", "1.0", "1.2" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]