repo_name
stringlengths 6
130
| hexsha
sequence | file_path
sequence | code
sequence | apis
sequence | possible_versions
list |
---|---|---|---|---|---|
tkrons/SPFlow_topdownrules | [
"32233bf29d107c62f0f727b0e64aaa74b37cfe1e",
"32233bf29d107c62f0f727b0e64aaa74b37cfe1e"
] | [
"src/spn/experiments/AQP/leaves/static/InferenceRange.py",
"src/spn/experiments/AQP/tests/test_static_numeric.py"
] | [
"\"\"\"\nCreated on June 21, 2018\n\n@author: Moritz\n\"\"\"\n\nimport numpy as np\n\nfrom spn.algorithms.Inference import add_node_likelihood\nfrom spn.experiments.AQP.leaves.static.StaticNumeric import StaticNumeric\n\n\ndef static_likelihood_range(node, ranges, dtype=np.float64, **kwargs):\n assert len(node.scope) == 1, node.scope\n\n probs = np.ones((ranges.shape[0], 1), dtype=dtype)\n ranges = ranges[:, node.scope[0]]\n\n for i, rang in enumerate(ranges):\n\n # Skip if no range is specified aka use a log-probability of 0 for that instance\n if rang is None:\n continue\n\n # Skip if no values for the range are provided\n if rang.is_impossible():\n probs[i] = 0\n\n # Compute the sum of the probability of all possible values\n probs[i] = sum([_compute_probability_for_range(node, interval) for interval in rang.get_ranges()])\n\n return probs\n\n\ndef _compute_probability_for_range(node, interval):\n\n if len(interval) == 1:\n if node.val == interval[0]:\n return 1\n else:\n return 0\n else:\n lower = interval[0]\n higher = interval[1]\n\n if lower <= node.val and node.val <= higher:\n return 1\n else:\n return 0\n\n\ndef add_static_inference_range_support():\n add_node_likelihood(StaticNumeric, static_likelihood_range)\n",
"\"\"\"\nCreated on June 21, 2018\n\n@author: Moritz\n\"\"\"\nimport unittest\n\nimport numpy as np\n\nfrom spn.experiments.AQP.leaves.static.StaticNumeric import create_static_leaf\nfrom spn.experiments.AQP.leaves.static import Inference, InferenceRange, SamplingRange\n\n\nfrom spn.experiments.AQP.Ranges import NumericRange\n\n\nclass TestStatic(unittest.TestCase):\n def test_inference(self):\n\n val = 20\n scope = [0]\n node = create_static_leaf(val, scope)\n\n evidence = [[20.0]]\n np_evd = np.array(evidence, np.float64)\n prob = Inference.static_likelihood(node, np_evd)[0][0]\n self.assertEqual(prob, 1)\n\n evidence = [[np.nan]]\n np_evd = np.array(evidence, np.float64)\n prob = Inference.static_likelihood(node, np_evd)[0][0]\n self.assertEqual(prob, 1)\n\n evidence = [[29]]\n np_evd = np.array(evidence, np.float64)\n prob = Inference.static_likelihood(node, np_evd)[0][0]\n self.assertEqual(prob, 0)\n\n evidence = [[19]]\n np_evd = np.array(evidence, np.float64)\n prob = Inference.static_likelihood(node, np_evd)[0][0]\n self.assertEqual(prob, 0)\n\n evidence = [[20.00001]]\n np_evd = np.array(evidence, np.float64)\n prob = Inference.static_likelihood(node, np_evd)[0][0]\n self.assertEqual(prob, 0)\n\n evidence = [[20.00001], [np.nan], [20], [22]]\n np_evd = np.array(evidence, np.float64)\n prob = Inference.static_likelihood(node, np_evd)\n self.assertEqual(prob[0][0], 0)\n self.assertEqual(prob[1][0], 1)\n self.assertEqual(prob[2][0], 1)\n self.assertEqual(prob[3][0], 0)\n\n def test_inference_range(self):\n\n val = 20\n scope = [0]\n node = create_static_leaf(val, scope)\n\n rang = [NumericRange([[20]])]\n ranges = np.array([rang])\n prob = InferenceRange.static_likelihood_range(node, ranges)[0][0]\n self.assertAlmostEqual(prob, 1)\n\n rang = [NumericRange([[19.2]])]\n ranges = np.array([rang])\n prob = InferenceRange.static_likelihood_range(node, ranges)[0][0]\n self.assertAlmostEqual(prob, 0)\n\n rang = [NumericRange([[20.0003]])]\n ranges = np.array([rang])\n prob = InferenceRange.static_likelihood_range(node, ranges)[0][0]\n self.assertAlmostEqual(prob, 0)\n\n rang = [NumericRange([[0]])]\n ranges = np.array([rang])\n prob = InferenceRange.static_likelihood_range(node, ranges)[0][0]\n self.assertAlmostEqual(prob, 0)\n\n rang = [NumericRange([[0, 10]])]\n ranges = np.array([rang])\n prob = InferenceRange.static_likelihood_range(node, ranges)[0][0]\n self.assertAlmostEqual(prob, 0)\n\n rang = [NumericRange([[0, 200]])]\n ranges = np.array([rang])\n prob = InferenceRange.static_likelihood_range(node, ranges)[0][0]\n self.assertAlmostEqual(prob, 1)\n\n rang = [NumericRange([[19.99999, 20.11]])]\n ranges = np.array([rang])\n prob = InferenceRange.static_likelihood_range(node, ranges)[0][0]\n self.assertAlmostEqual(prob, 1)\n\n rang = [NumericRange([[19.99999, 20]])]\n ranges = np.array([rang])\n prob = InferenceRange.static_likelihood_range(node, ranges)[0][0]\n self.assertAlmostEqual(prob, 1)\n\n rang = [NumericRange([[20, 20.321]])]\n ranges = np.array([rang])\n prob = InferenceRange.static_likelihood_range(node, ranges)[0][0]\n self.assertAlmostEqual(prob, 1)\n\n rang = [NumericRange([[19, 19.5], [20.5, 21]])]\n ranges = np.array([rang])\n prob = InferenceRange.static_likelihood_range(node, ranges)[0][0]\n self.assertAlmostEqual(prob, 0)\n\n rang = [NumericRange([[19, 19.5], [19.999, 20.111], [20.5, 21]])]\n ranges = np.array([rang])\n prob = InferenceRange.static_likelihood_range(node, ranges)[0][0]\n self.assertAlmostEqual(prob, 1)\n\n def test_sample_range(self):\n\n val = 20\n scope = [0]\n node = create_static_leaf(val, scope)\n\n samples = SamplingRange.sample_static_node(node, 10)\n self.assertAlmostEqual(np.average(samples), 20)\n\n rang = NumericRange([[20, 20.321]])\n ranges = np.array([rang])\n samples = SamplingRange.sample_static_node(node, 10, ranges=ranges)\n self.assertAlmostEqual(np.average(samples), 20)\n\n rang = NumericRange([[19, 20]])\n ranges = np.array([rang])\n samples = SamplingRange.sample_static_node(node, 10, ranges=ranges)\n self.assertAlmostEqual(np.average(samples), 20)\n\n rang = NumericRange([[19, 19.5], [19.999, 20.111], [20.5, 21]])\n ranges = np.array([rang])\n samples = SamplingRange.sample_static_node(node, 10, ranges=ranges)\n self.assertAlmostEqual(np.average(samples), 20)\n\n rang = NumericRange([[19, 19.5]])\n ranges = np.array([rang])\n samples = SamplingRange.sample_static_node(node, 10, ranges=ranges)\n self.assertTrue(all(np.isnan(samples)))\n\n\nif __name__ == \"__main__\":\n unittest.main()\n"
] | [
[
"numpy.ones"
],
[
"numpy.isnan",
"numpy.array",
"numpy.average"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
kk2487/3dresnet | [
"d7161a70ed6c2f8dcbe89f9b6bad2ef6cc5b5d94",
"d7161a70ed6c2f8dcbe89f9b6bad2ef6cc5b5d94",
"d7161a70ed6c2f8dcbe89f9b6bad2ef6cc5b5d94"
] | [
"models/resnet_lgdv2.py",
"apex/contrib/sparsity/asp.py",
"models/resnet_2p1d.py"
] | [
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nimport math\nfrom functools import partial\nimport numpy as np\n\n__all__ = [\n 'ResNet', 'resnet10', 'resnet18', 'resnet34', 'resnet50', 'resnet101',\n 'resnet152', 'resnet200'\n]\n\ndef look_bottleneck_global(glo):\n if look_bottleneck_global:\n if glo is None:\n print('first bottleneck-> no global content!')\n else:\n print('glo has content!')\n\n# Can print the model structure\ndef model_info(model, report='summary'):\n # Plots a line-by-line description of a PyTorch model\n n_p = sum(x.numel() for x in model.parameters()) # number parameters\n n_g = sum(x.numel() for x in model.parameters() if x.requires_grad) # number gradients\n if report is 'full':\n print('%5s %40s %9s %12s %20s %10s %10s' % ('layer', 'name', 'gradient', 'parameters', 'shape', 'mu', 'sigma'))\n for i, (name, p) in enumerate(model.named_parameters()):\n name = name.replace('module_list.', '')\n print('%5g %40s %9s %12g %20s %10.3g %10.3g' %\n (i, name, p.requires_grad, p.numel(), list(p.shape), p.mean(), p.std()))\n print('Model Summary: %g layers, %g parameters, %g gradients' % (len(list(model.parameters())), n_p, n_g))\n\ndef conv3x3x3(in_planes, out_planes, stride=1):\n # 3x3x3 convolution with padding\n return nn.Conv3d(\n in_planes,\n out_planes,\n kernel_size=3,\n stride=stride,\n padding=1,\n bias=False)\n\n# Implement of bottleneck with se block\nclass BottleneckX(nn.Module):\n expansion = 4\n def __init__(self, inplanes, planes, stride=1, downsample=None, first_block=False):\n super(BottleneckX, self).__init__()\n self.conv1 = nn.Conv3d(inplanes, planes, kernel_size=1, bias=False)\n self.bn1 = nn.BatchNorm3d(planes)\n #self.bn1 = nn.GroupNorm(4, planes)\n\n self.conv2 = nn.Conv3d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)\n self.bn2 = nn.BatchNorm3d(planes)\n #self.bn2 = nn.GroupNorm(4, planes)\n\n self.conv3 = nn.Conv3d(planes, planes * 4, kernel_size=1, bias=False)\n self.bn3 = nn.BatchNorm3d(planes * 4)\n #self.bn3 = nn.GroupNorm(4, planes * 4)\n\n #self.relu = nn.ReLU(inplace=True)\n self.downsample = downsample\n self.stride = stride\n # If first bottleneckX, it does not contain global path\n self.first_block = first_block\n # If downsampling occurs, set true\n self.ds = False\n #self.se_module = SEModule(planes * 4, reduction=16, first_block=self.first_block)\n self.avg_pool = nn.AdaptiveAvgPool3d(1)\n\n #Implement LGD block\n self.fc1 = nn.Conv3d(planes * 4 // 2, planes * 4, kernel_size=1, stride=1, padding=0, bias=False)\n #self.fc2 = nn.Conv3d(planes * 4 // 16, planes * 4, kernel_size=1, stride=1, padding=0, bias=False)\n self.bn4 = nn.BatchNorm3d(planes * 4)\n #self.bn4 = nn.GroupNorm(4, planes * 4)\n\n self.fc3 = nn.Conv3d(planes * 4, planes * 4 // 16, kernel_size=1, stride=1, padding=0, bias=False)\n self.fc4 = nn.Conv3d(planes * 4 // 16, planes * 4, kernel_size=1, stride=1, padding=0, bias=False)\n\n self.fc5 = nn.Conv3d(planes * 4, planes * 4 // 16, kernel_size=1, stride=1, padding=0, bias=False)\n self.fc6 = nn.Conv3d(planes * 4 // 16, planes * 4, kernel_size=1, stride=1, padding=0, bias=False)\n self.sigmoid = nn.Sigmoid()\n self.relu = nn.LeakyReLU(inplace=True)\n\n def forward(self, xx):\n # xx contains two element: input->x and global path->glo\n x = xx[0]\n glo = xx[1]\n residual = x\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n #out = self.relu(out)\n\n # If downsample, downsampleing global path & residual channels\n if self.downsample is not None:\n if glo is not None:\n glo = self.avg_pool(glo)\n glo = self.fc1(glo)\n glo = self.relu(glo)\n residual = self.downsample(x)\n #LGD block\n if glo is not None:\n glo = self.fc3(glo)\n glo = self.relu(glo)\n glo = self.fc4(glo)\n glo = self.sigmoid(glo)\n\n out = out * glo\n #out = self.relu(out)\n\n glo2 = self.avg_pool(out)\n glo2 = self.fc5(glo2)\n glo2 = self.relu(glo2)\n glo2 = self.fc6(glo2)\n glo2 = self.sigmoid(glo2)\n\n g = glo + glo2\n g = self.relu(g)\n out = out + residual\n out = self.relu(out)\n outg = [out, g]\n # Normal bottleneck\n else:\n out = out + residual\n out = self.relu(out)\n outg = [out, residual]\n\n return outg\n\n\nclass ResNet(nn.Module):\n\n def __init__(self,\n blockx,\n layers,\n sample_size,\n sample_duration,\n shortcut_type='B',\n num_classes=400):\n self.inplanes = 64\n super(ResNet, self).__init__()\n self.conv1 = nn.Conv3d(\n 3,\n 64,\n kernel_size=7,\n stride=(1, 2, 2),\n padding=(3, 3, 3),\n bias=False)\n self.bn1 = nn.BatchNorm3d(64)\n #self.bn1 = nn.GroupNorm(4, 64)\n\n self.relu = nn.LeakyReLU(inplace=True)\n self.maxpool = nn.MaxPool3d(kernel_size=(3, 3, 3), stride=2, padding=1)\n self.layer1 = self._make_layer(blockx, 64, layers[0], shortcut_type, first_block=True)\n self.layer2 = self._make_layer(blockx, 128, layers[1], shortcut_type, stride=2, first_block=False)\n self.layer3 = self._make_layer(blockx, 256, layers[2], shortcut_type, stride=2, first_block=False)\n self.layer4 = self._make_layer(blockx, 512, layers[3], shortcut_type, stride=2, first_block=False)\n last_duration = int(math.ceil(sample_duration / 16))\n last_size = int(math.ceil(sample_size / 32))\n #last_size = 4\n self.avgpool = nn.AvgPool3d(\n (last_duration, last_size, last_size), stride=1)\n self.fc = nn.Linear(512 * blockx.expansion, num_classes)\n #self.fusion = nn.Conv3d(512 * block.expansion * 2, 512 * block.expansion, kernel_size=1, stride=1, padding=0, bias=False)\n\n for m in self.modules():\n if isinstance(m, nn.Conv3d):\n m.weight = nn.init.kaiming_normal(m.weight, mode='fan_out')\n elif isinstance(m, nn.BatchNorm3d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n\n def _make_layer(self, block, planes, blocks, shortcut_type, stride=1, first_block=False):\n downsample = None\n if stride != 1 or self.inplanes != planes * block.expansion:\n if shortcut_type == 'A':\n downsample = partial(\n downsample_basic_block,\n planes=planes * block.expansion,\n stride=stride)\n else:\n downsample = nn.Sequential(\n nn.Conv3d(\n self.inplanes,\n planes * block.expansion,\n kernel_size=1,\n stride=stride,\n bias=False), nn.BatchNorm3d(planes * block.expansion))\n\n layers = []\n layers.append(block(self.inplanes, planes, stride, downsample, first_block))\n self.inplanes = planes * block.expansion\n for i in range(1, blocks):\n layers.append(block(self.inplanes, planes))\n\n return nn.Sequential(*layers)\n\n def forward(self, x):\n #print('lgd')\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n x = self.maxpool(x)\n\n lookshape = False\n # First time need to give two element to model\n xx = [x, None]\n x = self.layer1(xx)\n if lookshape:\n print('\\nlayer1-------------')\n print(np.shape(x[0]))\n print(np.shape(x[1]))\n print('--------------')\n\n x = self.layer2(x)\n if lookshape:\n print('\\nlayer2-------------')\n print(np.shape(x[0]))\n print(np.shape(x[1]))\n print('--------------')\n\n x = self.layer3(x)\n if lookshape:\n print('\\nlayer3-------------')\n print(np.shape(x[0]))\n print(np.shape(x[1]))\n print('--------------')\n\n x = self.layer4(x)\n if lookshape:\n print('\\nlayer4-------------')\n print(np.shape(x[0]))\n print(np.shape(x[1]))\n print('--------------')\n\n # After bottlenck part\n loc, g = x[0], x[1]\n #print(g)\n if lookshape:\n print('loc & g:--------')\n print(np.shape(loc))\n print(np.shape(g))\n print('----------------')\n\n\n x = self.avgpool(loc)\n\n #x = x + g\n #x = self.bn2(x)\n #x = self.relu(x)\n\n if lookshape:\n print('\\nlayer5-------------')\n print(np.shape(x))\n print('--------------')\n\n # Test local and global path feature maps fusion type below\n \n # 3d conv\n #x = torch.cat((x, g), 1)\n #x = self.fusion(x)\n #x = self.bn2(x)\n #x = self.relu(x)\n\n # concat (need to change fc layer filter number)\n #x = torch.cat((x, g), 1)\n #x = self.relu(x)\n\n x = x.view(x.size(0), -1)\n if lookshape:\n print('\\nlayer6-------------')\n print(np.shape(x))\n print('--------------')\n\n x = self.fc(x)\n\n if lookshape:\n print('\\nlayer7-------------')\n print(np.shape(x))\n print('--------------')\n\n return x\n\ndef get_fine_tuning_parameters(model, ft_begin_index):\n #if ft_begin_index == 0:\n # return model.parameters()\n print('ohraaaa')\n ft_module_names = []\n for i in range(ft_begin_index, 5):\n ft_module_names.append('layer{}'.format(i))\n ft_module_names.append('fc')\n\n # Look the content of ft_module\n print('ft: ', ft_module_names)\n\n parameters = []\n ii = 0\n\n '''\n for k, v in model.named_parameters():\n for ft_module in ft_module_names:\n if ii >= 271: #220 271\n print(ii)\n parameters.append({'params': v})\n else:\n print('notfc')\n print(ii)\n parameters.append({'params': v, 'lr': 0.0})\n #parameters.append({'params': v})\n print(k)\n ii = ii+1\n return parameters\n '''\n \n # bakup code\n for k, v in model.named_parameters():\n for ft_module in ft_module_names:\n if ft_module in k:\n #if ii >= 271:\n print('fc')\n #print(ii)\n parameters.append({'params': v})\n break\n else:\n print('notfc')\n #print(ii)\n #parameters.append({'params': v, 'lr': 0.0})\n parameters.append({'params': v})\n print(k)\n ii = ii+1\n return parameters\n \n\ndef resnet10(**kwargs):\n \"\"\"Constructs a ResNet-18 model.\n \"\"\"\n model = ResNet(BasicBlock, [1, 1, 1, 1], **kwargs)\n return model\n\n\ndef resnet18(**kwargs):\n \"\"\"Constructs a ResNet-18 model.\n \"\"\"\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n return model\n\n\ndef resnet34(**kwargs):\n \"\"\"Constructs a ResNet-34 model.\n \"\"\"\n model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)\n return model\n\n\ndef resnet50(**kwargs):\n \"\"\"Constructs a ResNet-50 model.\n \"\"\"\n model = ResNet(BottleneckX, [3, 4, 6, 3], **kwargs)\n #model = ResNet(Bottleneck, BottleneckX, [3, 4, 23, 3], **kwargs)\n #model_info(model,'full')\n return model\n\n\ndef resnet101(**kwargs):\n \"\"\"Constructs a ResNet-101 model.\n \"\"\"\n model = ResNet(BottleneckX, [3, 4, 23, 3], **kwargs)\n return model\n\n\ndef resnet152(**kwargs):\n \"\"\"Constructs a ResNet-101 model.\n \"\"\"\n model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)\n return model\n\n\ndef resnet200(**kwargs):\n \"\"\"Constructs a ResNet-101 model.\n \"\"\"\n model = ResNet(Bottleneck, [3, 24, 36, 3], **kwargs)\n return model",
"import types\nimport torch\nfrom .sparse_masklib import create_mask\n\ntorchvision_imported=True\ntry:\n import torchvision\nexcept ImportError:\n print(\"[ASP][Warning] torchvision cannot be imported.\")\n torchvision_imported=False\n\ndef eligible_modules(model, whitelist_layer_types, allowed_layer_names, disallowed_layer_names):\n eligible_modules_list = []\n for name, mod in model.named_modules():\n if isinstance(mod, whitelist_layer_types) and name not in disallowed_layer_names:\n if allowed_layer_names is not None and name not in allowed_layer_names:\n continue\n eligible_modules_list.append((name, mod))\n return eligible_modules_list\n\nclass ASP:\n __model = None\n __verbosity = 0\n __optimizer = None\n __sparse_parameters = []\n __calculate_mask = None\n\n @classmethod\n def init_model_for_pruning(cls, model, mask_calculator=\"m4n2_1d\",\n verbosity=3,\n whitelist=[torch.nn.Linear, torch.nn.Conv1d, torch.nn.Conv2d, torch.nn.Conv3d], \n allowed_layer_names=None, disallowed_layer_names=[],\n allow_recompute_mask=False):\n \"\"\"Call this method to modify your model to take advantage of sparse matrix multiplication.\n Note that this call alone only augments the model with additional buffers needed for sparse MMA,\n it does not enable use of sparse MMA. \n\n If you are starting with a fresh model:\n\n model = ...\n ASP.init_model_for_pruning(model, mask_calculator, ...)\n if (training) ASP.init_optimizer_for_pruning(optimizer)\n ASP.compute_sparse_masks() // sparsity is off by default, call when youy want to enable it.\n\n If you are starting from a checkpoint:\n\n model = ...\n ASP.init_model_for_pruning(model, mask_calculator, ...)\n torch.load(...)\n if (training) ASP.init_optimizer_for_pruning(optimizer)\n\n Arguments:\n model The model\n mask_calculator Either callable that computes mask given a tensor OR pattern string for sparse mask lib.\n verbosity Integer controling verbosity level.\n 0 -> Only errors.\n 1 -> Errors and warnings.\n 2 -> Errors, warnings and info.\n 3 -> Errors, warnings, info and debug.\n whitelist Module types approved for sparsity.\n allowed_layer_names If not None, only layer names that appear in this list are considered for sparsity.\n disallowed_layer_names If not [], only layer names that do not appear in this list are considered for sparsity.\n allow_recompute_mask If True, stores pruned values so that dense weights can be restored.\n Pruned weights are stored in CPU memory, hence this option does not increase GPU memory usage.\n Support for allow_recompute_mask can be removed, it is not part of our recipe -- AKM. \n \"\"\"\n assert (cls.__model is None), \"ASP has been initialized already.\"\n cls.__model = model\n cls.__verbosity = verbosity\n\n if isinstance(mask_calculator, str):\n def create_mask_from_pattern(param):\n return create_mask(param, mask_calculator).bool()\n cls.__calculate_mask = create_mask_from_pattern\n else:\n cls.__calculate_mask = mask_calculator #user defined function\n\n # function to extract variables that will be sparsified. \n # idea is that you will add one of these functions for each module type that can be sparsified.\n if torchvision_imported:\n print(\"[ASP] torchvision is imported, can work with the MaskRCNN/KeypointRCNN from torchvision.\")\n sparse_parameter_list = {torch.nn.Linear: ['weight'], torch.nn.Conv1d: ['weight'], torch.nn.Conv2d: ['weight'], torch.nn.Conv3d: ['weight'], torchvision.ops.misc.Conv2d: ['weight']}\n else:\n sparse_parameter_list = {torch.nn.Linear: ['weight'], torch.nn.Conv1d: ['weight'], torch.nn.Conv2d: ['weight'], torch.nn.Conv3d: ['weight']}\n for module_type in whitelist:\n assert (module_type in sparse_parameter_list), \"Module %s :: Don't know how to sparsify module.\" % module.dtype()\n\n # find all sparse modules, extract sparse parameters and decorate\n def add_sparse_attributes(module_name, module):\n sparse_parameters = sparse_parameter_list[type(module)]\n for p_name, p in module.named_parameters():\n if p_name in sparse_parameters and p.requires_grad:\n # check for NVIDIA's TC compatibility: we check along the horizontal direction\n if p.dtype == torch.float32 and ((p.size()[0] % 8) != 0 or (p.size()[1] % 16) != 0): #User defines FP32 and APEX internally uses FP16 math\n print(\"[ASP] Auto skipping pruning %s::%s of size=%s and type=%s for sparsity\" % (module_name, p_name, str(p.size()), str(p.dtype)))\n continue\n if p.dtype == torch.float16 and ((p.size()[0] % 8) != 0 or (p.size()[1] % 16) != 0): #For Conv2d dim= K x CRS; we prune along C\n print(\"[ASP] Auto skipping pruning %s::%s of size=%s and type=%s for sparsity\" % (module_name, p_name, str(p.size()), str(p.dtype)))\n continue\n\n if cls.__verbosity >= 3:\n print(\"[ASP] Sparsifying %s::%s of size=%s and type=%s for sparsity\" % (module_name, p_name, str(p.size()), str(p.dtype)))\n \n mask = torch.ones_like(p).bool()\n buffname = p_name.split(\".\")[-1] # buffer names cannot contain \".\"\n module.register_buffer('__%s_mma_mask' % buffname, mask)\n if allow_recompute_mask:\n pruned = torch.zeros_like(p).cpu()\n module.register_buffer('__%s_mma_pruned_p' % buffname, pruned)\n else:\n pruned = None\n cls.__sparse_parameters.append((module_name, module, p_name, p, mask, pruned))\n\n for name, sparse_module in eligible_modules(model, tuple(whitelist), allowed_layer_names, disallowed_layer_names):\n add_sparse_attributes(name, sparse_module)\n\n @classmethod\n def init_optimizer_for_pruning(cls, optimizer):\n \"\"\"Call this method to monkey patch optimizer step function so that masks can be applied to\n gradients and weights during training.\n You must call init_model_for_pruning(...) before calling init_optimizer_for_pruning(...)\n \"\"\"\n assert (cls.__optimizer is None), \"ASP has initialized optimizer already.\"\n assert (cls.__calculate_mask is not None), \"Called ASP.init_optimizer_for_pruning before ASP.init_model_for_pruning.\"\n\n # store pointer to original optimizer step method\n cls.__optimizer = optimizer\n cls.__optimizer.__step = optimizer.step\n\n def __step(opt_self, *args, **kwargs):\n # prune gradients before step method\n with torch.no_grad():\n for module_name, module, p_name, p, mask, pruned in cls.__sparse_parameters:\n p.grad.mul_(mask)\n # call original optimizer step method\n rval = opt_self.__step(*args, **kwargs)\n # prune parameters after step method\n with torch.no_grad():\n for module_name, module, p_name, p, mask, pruned in cls.__sparse_parameters:\n p.mul_(mask)\n return rval\n cls.__optimizer.step = types.MethodType(__step, cls.__optimizer)\n\n @classmethod\n def compute_sparse_masks(cls):\n \"\"\"Call this method to enable sparsity.\n If init(...) was called with allow_recompute_mask=False AND sparsity is disabled, pruned field can be None.\n \"\"\"\n with torch.no_grad():\n for module_name, module, p_name, p, mask, pruned in cls.__sparse_parameters:\n if mask.sum() < mask.numel(): # when recalculating masks\n # restore dense parameter if allow_recompute_mask is enabled\n assert (pruned is not None), \"Unable to restore dense parameter because allow_recompute_mask == False\"\n p.add_(pruned.cuda())\n\n mask.set_(cls.__calculate_mask(p))\n\n if pruned is not None: # stow away pruned weights to cpu\n pruned.set_((p * (~mask)).cpu())\n\n p.mul_(mask) # in-place multiplication, so pruned weights are 0-values, hence checkpoint will have 0s for pruned weights\n if cls.__verbosity >= 2:\n print(\"[ASP] Enabled %.2f%% sparsity for %s::%s of size=%s and type=%s\" % (100.0*mask.sum()/mask.numel(), module_name, p_name, str(p.size()), str(p.dtype)))\n\n @classmethod\n def restore_pruned_weights(cls):\n \"\"\"Call this method to disable sparsity and restore all weights.\n This will only work if init(...) was called with allow_recompute=True.\n \"\"\"\n with torch.no_grad():\n for module_name, module, p_name, p, mask, pruned in cls.__sparse_parameters:\n if mask.sum() < mask.numel():\n assert (pruned is not None), \"Unable to restore dense parameter because allow_recompute_mask == False\"\n p.add_(pruned.cuda())\n mask.fill_(1)\n pruned.zero_()\n if cls.__verbosity >= 2:\n print(\"[ASP] Disabled sparsity for %s::%s (dense weights restored)\" % (module_name, p_name))\n\n @classmethod\n def is_sparsity_enabled(cls):\n \"\"\"Call this method to determine if sparsity is enabled in the model.\n The typical use case is right after checkpoint has been loaded.\n \"\"\"\n total,sp100,sp50 = 0,0,0\n for module_name, module, p_name, p, mask, pruned in cls.__sparse_parameters:\n total += 1\n mask_sum = mask.sum()\n mask_numel = mask.numel()\n if mask_sum == mask_numel:\n sp100 += 1\n elif mask_sum*2 == mask_numel:\n sp50 += 1\n\n assert (total == sp100 or total == sp50), \"Inconsistent model sparsity\"\n if total == sp100:\n return False\n elif total == sp50:\n return True\n \n @classmethod\n def prune_trained_model(cls, model, optimizer):\n # add mask buffers to model (init_model_for_pruning), augment optimizer (init_optimizer_for_pruning) and compute masks (compute_sparse_masks)\n cls.init_model_for_pruning(model, mask_calculator=\"m4n2_1d\", verbosity=2, whitelist=[torch.nn.Linear, torch.nn.Conv2d], allow_recompute_mask=False)\n cls.init_optimizer_for_pruning(optimizer)\n cls.compute_sparse_masks()\n\n",
"import math\nfrom functools import partial\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\ndef get_inplanes():\n return [64, 128, 256, 512]\n\n\ndef conv1x3x3(in_planes, mid_planes, stride=1):\n return nn.Conv3d(in_planes,\n mid_planes,\n kernel_size=(1, 3, 3),\n stride=(1, stride, stride),\n padding=(0, 1, 1),\n bias=False)\n\n\ndef conv3x1x1(mid_planes, planes, stride=1):\n return nn.Conv3d(mid_planes,\n planes,\n kernel_size=(3, 1, 1),\n stride=(stride, 1, 1),\n padding=(1, 0, 0),\n bias=False)\n\n\ndef conv1x1x1(in_planes, out_planes, stride=1):\n return nn.Conv3d(in_planes,\n out_planes,\n kernel_size=1,\n stride=stride,\n bias=False)\n\n\nclass BasicBlock(nn.Module):\n expansion = 1\n\n def __init__(self, in_planes, planes, stride=1, downsample=None):\n super().__init__()\n\n n_3d_parameters1 = in_planes * planes * 3 * 3 * 3\n n_2p1d_parameters1 = in_planes * 3 * 3 + 3 * planes\n mid_planes1 = n_3d_parameters1 // n_2p1d_parameters1\n self.conv1_s = conv1x3x3(in_planes, mid_planes1, stride)\n self.bn1_s = nn.BatchNorm3d(mid_planes1)\n self.conv1_t = conv3x1x1(mid_planes1, planes, stride)\n self.bn1_t = nn.BatchNorm3d(planes)\n\n n_3d_parameters2 = planes * planes * 3 * 3 * 3\n n_2p1d_parameters2 = planes * 3 * 3 + 3 * planes\n mid_planes2 = n_3d_parameters2 // n_2p1d_parameters2\n self.conv2_s = conv1x3x3(planes, mid_planes2)\n self.bn2_s = nn.BatchNorm3d(mid_planes2)\n self.conv2_t = conv3x1x1(mid_planes2, planes)\n self.bn2_t = nn.BatchNorm3d(planes)\n\n self.relu = nn.ReLU(inplace=True)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n residual = x\n\n out = self.conv1_s(x)\n out = self.bn1_s(out)\n out = self.relu(out)\n out = self.conv1_t(out)\n out = self.bn1_t(out)\n out = self.relu(out)\n\n out = self.conv2_s(out)\n out = self.bn2_s(out)\n out = self.relu(out)\n out = self.conv2_t(out)\n out = self.bn2_t(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n out = self.relu(out)\n\n return out\n\n\nclass Bottleneck(nn.Module):\n expansion = 4\n\n def __init__(self, in_planes, planes, stride=1, downsample=None):\n super().__init__()\n\n self.conv1 = conv1x1x1(in_planes, planes)\n self.bn1 = nn.BatchNorm3d(planes)\n\n n_3d_parameters = planes * planes * 3 * 3 * 3\n n_2p1d_parameters = planes * 3 * 3 + 3 * planes\n mid_planes = n_3d_parameters // n_2p1d_parameters\n self.conv2_s = conv1x3x3(planes, mid_planes, stride)\n self.bn2_s = nn.BatchNorm3d(mid_planes)\n self.conv2_t = conv3x1x1(mid_planes, planes, stride)\n self.bn2_t = nn.BatchNorm3d(planes)\n\n self.conv3 = conv1x1x1(planes, planes * self.expansion)\n self.bn3 = nn.BatchNorm3d(planes * self.expansion)\n self.relu = nn.ReLU(inplace=True)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2_s(out)\n out = self.bn2_s(out)\n out = self.relu(out)\n out = self.conv2_t(out)\n out = self.bn2_t(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n out = self.relu(out)\n\n return out\n\n\nclass ResNet(nn.Module):\n\n def __init__(self,\n block,\n layers,\n block_inplanes,\n n_input_channels=3,\n conv1_t_size=7,\n conv1_t_stride=1,\n no_max_pool=False,\n shortcut_type='B',\n widen_factor=1.0,\n num_classes=700,\n sample_size=112,\n sample_duration=15):\n #super().__init__()\n super(ResNet, self).__init__()\n\n block_inplanes = [int(x * widen_factor) for x in block_inplanes]\n print(block_inplanes)\n self.in_planes = block_inplanes[0]\n self.no_max_pool = no_max_pool\n\n n_3d_parameters = 3 * self.in_planes * conv1_t_size * 7 * 7\n n_2p1d_parameters = 3 * 7 * 7 + conv1_t_size * self.in_planes\n mid_planes = n_3d_parameters // n_2p1d_parameters\n self.conv1_s = nn.Conv3d(n_input_channels,\n mid_planes,\n kernel_size=(1, 7, 7),\n stride=(1, 2, 2),\n padding=(0, 3, 3),\n bias=False)\n self.bn1_s = nn.BatchNorm3d(mid_planes)\n self.conv1_t = nn.Conv3d(mid_planes,\n self.in_planes,\n kernel_size=(conv1_t_size, 1, 1),\n stride=(conv1_t_stride, 1, 1),\n padding=(conv1_t_size // 2, 0, 0),\n bias=False)\n self.bn1_t = nn.BatchNorm3d(self.in_planes)\n self.relu = nn.ReLU(inplace=True)\n\n self.maxpool = nn.MaxPool3d(kernel_size=3, stride=2, padding=1)\n self.layer1 = self._make_layer(block, block_inplanes[0], layers[0],\n shortcut_type)\n self.layer2 = self._make_layer(block,\n block_inplanes[1],\n layers[1],\n shortcut_type,\n stride=2)\n self.layer3 = self._make_layer(block,\n block_inplanes[2],\n layers[2],\n shortcut_type,\n stride=2)\n self.layer4 = self._make_layer(block,\n block_inplanes[3],\n layers[3],\n shortcut_type,\n stride=2)\n\n self.avgpool = nn.AdaptiveAvgPool3d((1, 1, 1))\n self.fc = nn.Linear(block_inplanes[3] * block.expansion, num_classes)\n\n for m in self.modules():\n if isinstance(m, nn.Conv3d):\n nn.init.kaiming_normal_(m.weight,\n mode='fan_out',\n nonlinearity='relu')\n elif isinstance(m, nn.BatchNorm3d):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n\n def _downsample_basic_block(self, x, planes, stride):\n out = F.avg_pool3d(x, kernel_size=1, stride=stride)\n zero_pads = torch.zeros(out.size(0), planes - out.size(1), out.size(2),\n out.size(3), out.size(4))\n if isinstance(out.data, torch.cuda.FloatTensor):\n zero_pads = zero_pads.cuda()\n\n out = torch.cat([out.data, zero_pads], dim=1)\n\n return out\n\n def _make_layer(self, block, planes, blocks, shortcut_type, stride=1):\n downsample = None\n if stride != 1 or self.in_planes != planes * block.expansion:\n if shortcut_type == 'A':\n downsample = partial(self._downsample_basic_block,\n planes=planes * block.expansion,\n stride=stride)\n else:\n downsample = nn.Sequential(\n conv1x1x1(self.in_planes, planes * block.expansion, stride),\n nn.BatchNorm3d(planes * block.expansion))\n\n layers = []\n layers.append(\n block(in_planes=self.in_planes,\n planes=planes,\n stride=stride,\n downsample=downsample))\n self.in_planes = planes * block.expansion\n for i in range(1, blocks):\n layers.append(block(self.in_planes, planes))\n\n return nn.Sequential(*layers)\n\n def forward(self, x):\n x = self.conv1_s(x)\n x = self.bn1_s(x)\n x = self.relu(x)\n x = self.conv1_t(x)\n x = self.bn1_t(x)\n x = self.relu(x)\n\n x = self.maxpool(x)\n\n\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n\n x = self.avgpool(x)\n\n x = x.view(x.size(0), -1)\n x = self.fc(x)\n\n return x\n\n# Can print the model structure\ndef model_info(model, report='summary'):\n # Plots a line-by-line description of a PyTorch model\n n_p = sum(x.numel() for x in model.parameters()) # number parameters\n n_g = sum(x.numel() for x in model.parameters() if x.requires_grad) # number gradients\n if report is 'full':\n print('%5s %40s %9s %12s %20s %10s %10s' % ('layer', 'name', 'gradient', 'parameters', 'shape', 'mu', 'sigma'))\n for i, (name, p) in enumerate(model.named_parameters()):\n name = name.replace('module_list.', '')\n print('%5g %40s %9s %12g %20s %10.3g %10.3g' %\n (i, name, p.requires_grad, p.numel(), list(p.shape), p.mean(), p.std()))\n print('Model Summary: %g layers, %g parameters, %g gradients' % (len(list(model.parameters())), n_p, n_g))\n\ndef resnet50(**kwargs):\n \"\"\"Constructs a ResNet-50 model.\n \"\"\"\n model = ResNet(Bottleneck, [3, 4, 6, 3], get_inplanes(), **kwargs)\n model_info(model,'full')\n return model\n\ndef generate_model(model_depth, **kwargs):\n assert model_depth in [10, 18, 34, 50, 101, 152, 200]\n\n if model_depth == 10:\n model = ResNet(BasicBlock, [1, 1, 1, 1], get_inplanes(), **kwargs)\n elif model_depth == 18:\n model = ResNet(BasicBlock, [2, 2, 2, 2], get_inplanes(), **kwargs)\n elif model_depth == 34:\n model = ResNet(BasicBlock, [3, 4, 6, 3], get_inplanes(), **kwargs)\n elif model_depth == 50:\n model = ResNet(Bottleneck, [3, 4, 6, 3], get_inplanes(), **kwargs)\n elif model_depth == 101:\n model = ResNet(Bottleneck, [3, 4, 23, 3], get_inplanes(), **kwargs)\n elif model_depth == 152:\n model = ResNet(Bottleneck, [3, 8, 36, 3], get_inplanes(), **kwargs)\n elif model_depth == 200:\n model = ResNet(Bottleneck, [3, 24, 36, 3], get_inplanes(), **kwargs)\n\n return model"
] | [
[
"torch.nn.AvgPool3d",
"torch.nn.Sequential",
"torch.nn.init.kaiming_normal",
"torch.nn.Sigmoid",
"torch.nn.AdaptiveAvgPool3d",
"torch.nn.MaxPool3d",
"torch.nn.Conv3d",
"torch.nn.Linear",
"numpy.shape",
"torch.nn.LeakyReLU",
"torch.nn.BatchNorm3d"
],
[
"torch.zeros_like",
"torch.no_grad",
"torch.ones_like"
],
[
"torch.nn.Sequential",
"torch.cat",
"torch.nn.init.constant_",
"torch.nn.AdaptiveAvgPool3d",
"torch.nn.MaxPool3d",
"torch.nn.Conv3d",
"torch.nn.Linear",
"torch.nn.functional.avg_pool3d",
"torch.nn.ReLU",
"torch.nn.BatchNorm3d",
"torch.nn.init.kaiming_normal_"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
yarikoptic/statsmodels | [
"844381797a475a01c05a4e162592a5a6e3a48032",
"844381797a475a01c05a4e162592a5a6e3a48032",
"844381797a475a01c05a4e162592a5a6e3a48032",
"f990cb1a1ef0c9883c9394444e6f9d027efabec6"
] | [
"statsmodels/tsa/tests/results/arima111nc_css_results.py",
"statsmodels/tsa/vector_ar/tests/results/results_var.py",
"statsmodels/stats/libqsturng/qsturng_.py",
"statsmodels/emplike/tests/test_origin.py"
] | [
"import numpy as np\n\nllf = np.array([-242.89663276735])\n\nnobs = np.array([ 202])\n\nk = np.array([ 3])\n\nk_exog = np.array([ 1])\n\nsigma = np.array([ .8053519404535])\n\nchi2 = np.array([ 15723.381396967])\n\ndf_model = np.array([ 2])\n\nk_ar = np.array([ 1])\n\nk_ma = np.array([ 1])\n\nparams = np.array([ .99479180506163, \n -.84461527652809, \n .64859174799221])\n\ncov_params = np.array([ .00008904968254, \n -.00023560410507, \n .00012795903324, \n -.00023560410507, \n .00131628534915, \n -.00022462340695, \n .00012795903324, \n -.00022462340695, \n .0005651128627]).reshape(3,3)\n\nxb = np.array([ 0, \n 0, \n .02869686298072, \n .05651443824172, \n .0503994859755, \n .06887971609831, \n .05940540507436, \n .08067328482866, \n .08167565613985, \n .06429278105497, \n .07087650150061, \n .06886467337608, \n .06716959923506, \n .08230647444725, \n .07099691033363, \n .08401278406382, \n .07996553182602, \n .07354256510735, \n .09366323798895, \n .08811800926924, \n .10296355187893, \n .08846370875835, \n .0852297320962, \n .08700425922871, \n .09751411527395, \n .09737934917212, \n .11228405684233, \n .1053489819169, \n .12352022528648, \n .16439816355705, \n .1643835157156, \n .19891132414341, \n .17551273107529, \n .17827558517456, \n .19562774896622, \n .21028305590153, \n .23767858743668, \n .24580039083958, \n .28269505500793, \n .29883882403374, \n .31247469782829, \n .35402658581734, \n .37410452961922, \n .39106267690659, \n .42040377855301, \n .44518512487411, \n .43608102202415, \n .44340893626213, \n .44959822297096, \n .40977239608765, \n .42118826508522, \n .40079545974731, \n .38357082009315, \n .36902260780334, \n .35673499107361, \n .36137464642525, \n .38031083345413, \n .47139286994934, \n .47323387861252, \n .60994738340378, \n .69538277387619, \n .7825602889061, \n .84117436408997, \n .9657689332962, \n 1.0109325647354, \n .95897275209427, \n .96013957262039, \n .9461076259613, \n .9342554807663, \n .83413934707642, \n .83968591690063, \n .84437066316605, \n .83330947160721, \n .8990553021431, \n .87949693202972, \n .86297762393951, \n .89407861232758, \n .93536442518234, \n 1.0303052663803, \n 1.1104937791824, \n 1.1481873989105, \n 1.2851470708847, \n 1.4458787441254, \n 1.5515991449356, \n 1.7309991121292, \n 1.8975404500961, \n 1.8579913377762, \n 1.8846583366394, \n 1.9672524929047, \n 1.9469071626663, \n 2.0048115253448, \n 1.9786299467087, \n 1.8213576078415, \n 1.6284521818161, \n 1.7508568763733, \n 1.5689061880112, \n 1.2950873374939, \n 1.2290096282959, \n 1.1882168054581, \n 1.1537625789642, \n 1.1697143316269, \n 1.1681711673737, \n 1.106795668602, \n 1.0849931240082, \n 1.006507396698, \n 1.0453414916992, \n .98803448677063, \n .95465070009232, \n 1.0165599584579, \n .67838954925537, \n .69311982393265, \n .69054269790649, \n .76345545053482, \n .84005492925644, \n .87471830844879, \n .91901183128357, \n .92638796567917, \n .96265280246735, \n 1.0083012580872, \n 1.0618740320206, \n 1.0921038389206, \n 1.2077431678772, \n 1.2303256988525, \n 1.174311041832, \n 1.3072115182877, \n 1.314337015152, \n 1.3503924608231, \n 1.5760731697083, \n 1.5264053344727, \n 1.34929728508, \n 1.304829955101, \n 1.2522557973862, \n 1.222869515419, \n 1.198047041893, \n 1.1770839691162, \n 1.1743944883347, \n 1.1571066379547, \n 1.1274864673615, \n 1.0574153661728, \n 1.058304309845, \n .99898308515549, \n .9789143204689, \n 1.0070173740387, \n 1.000718832016, \n 1.0104174613953, \n 1.0486439466476, \n 1.0058424472809, \n .98470783233643, \n 1.0119106769562, \n 1.0649236440659, \n 1.0346088409424, \n 1.0540577173233, \n 1.0704846382141, \n .97923594713211, \n .90216588973999, \n .9271782040596, \n .85819715261459, \n .75488126277924, \n .78776079416275, \n .77047789096832, \n .77089905738831, \n .8313245177269, \n .82229107618332, \n .90476810932159, \n .94439232349396, \n 1.0379292964935, \n 1.1469690799713, \n 1.1489590406418, \n 1.2257302999496, \n 1.1554099321365, \n 1.1260533332825, \n .9811190366745, \n .8436843752861, \n .95287209749222, \n .90993344783783, \n .94875508546829, \n 1.0115815401077, \n .94450175762177, \n .87282890081406, \n .91741597652435, \n .98511207103729, \n .9972335100174, \n 1.0975805521011, \n 1.1823329925537, \n 1.1487929821014, \n 1.270641207695, \n 1.2083609104156, \n 1.696394443512, \n 1.4628355503082, \n 1.4307631254196, \n 1.5087975263596, \n 1.1542117595673, \n 1.2262620925903, \n 1.3880327939987, \n 1.3853038549423, \n 1.4396153688431, \n 1.7208145856857, \n 1.678991317749, \n 2.110867023468, \n 1.524417757988, \n .57946246862411, \n .56406193971634, \n .74643105268478])\n\ny = np.array([np.nan, \n 28.979999542236, \n 29.178695678711, \n 29.40651512146, \n 29.420400619507, \n 29.608880996704, \n 29.609405517578, \n 29.830673217773, \n 29.921676635742, \n 29.874292373657, \n 29.990877151489, \n 30.048864364624, \n 30.10717010498, \n 30.292304992676, \n 30.290996551514, \n 30.464012145996, \n 30.519966125488, \n 30.553541183472, \n 30.783664703369, \n 30.838117599487, \n 31.042964935303, \n 31.038463592529, \n 31.105230331421, \n 31.207004547119, \n 31.377513885498, \n 31.477378845215, \n 31.692283630371, \n 31.755348205566, \n 32.003520965576, \n 32.444396972656, \n 32.61438369751, \n 33.048908233643, \n 33.07551574707, \n 33.278274536133, \n 33.595630645752, \n 33.91028213501, \n 34.337677001953, \n 34.645801544189, \n 35.182697296143, \n 35.598838806152, \n 36.012474060059, \n 36.654026031494, \n 37.174102783203, \n 37.691062927246, \n 38.320404052734, \n 38.94518661499, \n 39.336082458496, \n 39.843410491943, \n 40.349597930908, \n 40.509769439697, \n 41.021186828613, \n 41.300796508789, \n 41.583572387695, \n 41.869022369385, \n 42.156734466553, \n 42.561374664307, \n 43.080310821533, \n 44.171394348145, \n 44.673233032227, \n 46.209945678711, \n 47.495380401611, \n 48.882556915283, \n 50.141174316406, \n 51.965770721436, \n 53.310932159424, \n 53.958972930908, \n 54.960140228271, \n 55.84610748291, \n 56.734252929688, \n 56.934139251709, \n 57.839687347412, \n 58.744373321533, \n 59.533309936523, \n 60.899055480957, \n 61.679496765137, \n 62.46297454834, \n 63.594078063965, \n 64.83536529541, \n 66.530303955078, \n 68.210494995117, \n 69.64818572998, \n 71.885147094727, \n 74.445877075195, \n 76.751594543457, \n 79.731002807617, \n 82.797538757324, \n 84.457992553711, \n 86.584655761719, \n 89.167251586914, \n 91.046905517578, \n 93.504814147949, \n 95.378631591797, \n 96.22135925293, \n 96.628448486328, \n 99.250854492188, \n 99.668907165527, \n 99.195091247559, \n 100.0290145874, \n 100.98822021484, \n 101.95376586914, \n 103.26971435547, \n 104.46817779541, \n 105.20679473877, \n 106.1849899292, \n 106.70650482178, \n 108.0453414917, \n 108.68803405762, \n 109.45465087891, \n 110.91656494141, \n 109.37838745117, \n 110.19312286377, \n 110.89054107666, \n 112.16345977783, \n 113.54005432129, \n 114.67472076416, \n 115.91901397705, \n 116.92639160156, \n 118.16265106201, \n 119.50830078125, \n 120.96187591553, \n 122.29209899902, \n 124.30773925781, \n 125.7303237915, \n 126.57431030273, \n 128.8072052002, \n 130.21432495117, \n 131.85038757324, \n 134.97607421875, \n 136.22640991211, \n 136.44931030273, \n 137.50482177734, \n 138.45225524902, \n 139.5228729248, \n 140.59803771973, \n 141.67707824707, \n 142.87438964844, \n 143.95710754395, \n 144.92749023438, \n 145.55741882324, \n 146.65830993652, \n 147.29898071289, \n 148.17890930176, \n 149.40701293945, \n 150.40071105957, \n 151.51042175293, \n 152.84864807129, \n 153.60585021973, \n 154.48471069336, \n 155.7119140625, \n 157.16493225098, \n 158.03460693359, \n 159.25405883789, \n 160.47047424316, \n 160.87922668457, \n 161.30215454102, \n 162.42718505859, \n 162.85820007324, \n 162.95487976074, \n 163.98776245117, \n 164.67047119141, \n 165.47090148926, \n 166.73132324219, \n 167.52229309082, \n 169.00477600098, \n 170.24440002441, \n 171.93792724609, \n 173.84696960449, \n 175.04895019531, \n 176.82572937012, \n 177.55540466309, \n 178.52604675293, \n 178.58113098145, \n 178.54368591309, \n 180.25286865234, \n 180.90992736816, \n 182.14875793457, \n 183.61158752441, \n 184.14450073242, \n 184.5728302002, \n 185.81741333008, \n 187.28511047363, \n 188.39723205566, \n 190.19758605957, \n 191.98233032227, \n 192.94879150391, \n 195.07064819336, \n 195.90835571289, \n 200.89639282227, \n 200.86282348633, \n 202.13075256348, \n 204.20880126953, \n 203.05419921875, \n 204.80026245117, \n 207.3080291748, \n 208.72329711914, \n 210.57261657715, \n 214.21580505371, \n 215.67597961426, \n 220.72087097168, \n 218.41342163086, \n 212.75346374512, \n 213.23506164551, \n 215.21542358398])\n\nresid = np.array([np.nan, \n .17000007629395, \n .17130389809608, \n -.03651398047805, \n .11960058659315, \n -.05888139456511, \n .14059536159039, \n .00932686589658, \n -.11167634278536, \n .04570783302188, \n -.0108770346269, \n -.00886330008507, \n .10282856971025, \n -.07230624556541, \n .08900293707848, \n -.0240114107728, \n -.03996651992202, \n .13645842671394, \n -.03366377204657, \n .10188252478838, \n -.09296332299709, \n -.01846401393414, \n .01477065030485, \n .0729955881834, \n .00248436117545, \n .10262141376734, \n -.04228436201811, \n .12465056031942, \n .27647939324379, \n .00560382334515, \n .23561419546604, \n -.1489082723856, \n .02448422275484, \n .12172746658325, \n .10437148809433, \n .18971465528011, \n .06232447177172, \n .25419962406158, \n .11730266362429, \n .10116269439459, \n .2875237762928, \n .14597341418266, \n .12589547038078, \n .20893961191177, \n .17959471046925, \n -.04518361017108, \n .06391899287701, \n .05659105628729, \n -.24960128962994, \n .09022761881351, \n -.12118522822857, \n -.10079623758793, \n -.08357158303261, \n -.06902338564396, \n .04326653853059, \n .13862533867359, \n .61968916654587, \n .02860714122653, \n .92676383256912, \n .59005337953568, \n .60461646318436, \n .41744044423103, \n .85882639884949, \n .33423033356667, \n -.31093180179596, \n .04102724045515, \n -.06013804674149, \n -.04610994458199, \n -.63425624370575, \n .06586220860481, \n .06031560897827, \n -.04437142238021, \n .46668976545334, \n -.09905604273081, \n -.07949769496918, \n .23702463507652, \n .30592212080956, \n .66463404893875, \n .56969320774078, \n .28950771689415, \n .95181107521057, \n 1.1148544549942, \n .75411820411682, \n 1.2484039068222, \n 1.1690024137497, \n -.1975435167551, \n .24200716614723, \n .6153416633606, \n -.06725100427866, \n .45309436321259, \n -.10480991750956, \n -.97863000631332, \n -1.2213591337204, \n .8715478181839, \n -1.1508584022522, \n -1.7689031362534, \n -.39508575201035, \n -.22900961339474, \n -.18821682035923, \n .14623281359673, \n .03029025532305, \n -.36817568540573, \n -.10679569840431, \n -.48499462008476, \n .29349562525749, \n -.34534454345703, \n -.18803144991398, \n .44535079598427, \n -2.2165644168854, \n .12161350995302, \n .00687709869817, \n .50946187973022, \n .53653997182846, \n .25995117425919, \n .32527860999107, \n .08098815381527, \n .27360898256302, \n .33735024929047, \n .39170032739639, \n .23812144994736, \n .80789774656296, \n .19225835800171, \n -.33032417297363, \n .92568749189377, \n .09278241544962, \n .28566908836365, \n 1.5496014356613, \n -.27607008814812, \n -1.1263961791992, \n -.24930645525455, \n -.30482992529869, \n -.15224970877171, \n -.12287864089012, \n -.09804095327854, \n .02291300706565, \n -.07438835501671, \n -.15710659325123, \n -.42748948931694, \n .04259072244167, \n -.35830733180046, \n -.09898918122053, \n .22108262777328, \n -.00701736938208, \n .0992873236537, \n .28958559036255, \n -.24864092469215, \n -.10584850609303, \n .21528913080692, \n .38809850811958, \n -.16492980718613, \n .16538816690445, \n .1459391862154, \n -.57048463821411, \n -.47923597693443, \n .19784018397331, \n -.4271782040596, \n -.65820020437241, \n .24511873722076, \n -.0877638310194, \n .02952514961362, \n .42909786105156, \n -.03132146969438, \n .57771807909012, \n .29522883892059, \n .6555985212326, \n .76207375526428, \n .05302781611681, \n .55105316638947, \n -.42574247717857, \n -.15540990233421, \n -.92604118585587, \n -.88112819194794, \n .75632172822952, \n -.25287514925003, \n .29006350040436, \n .45125409960747, \n -.41159069538116, \n -.44450175762177, \n .32716807723045, \n .48259317874908, \n .11487878113985, \n .70277869701385, \n .60241633653641, \n -.18233296275139, \n .85120695829391, \n -.37064728140831, \n 3.2916390895844, \n -1.4963974952698, \n -.16283248364925, \n .56923681497574, \n -2.3088004589081, \n .51979947090149, \n 1.1197309494019, \n .02996650896966, \n .40969428420067, \n 1.9223841428757, \n -.21881568431854, \n 2.9340152740479, \n -3.8318600654602, \n -6.239429473877, \n -.08245316892862, \n 1.2339268922806, \n 1.1695692539215])\n\nyr = np.array([np.nan, \n .17000007629395, \n .17130389809608, \n -.03651398047805, \n .11960058659315, \n -.05888139456511, \n .14059536159039, \n .00932686589658, \n -.11167634278536, \n .04570783302188, \n -.0108770346269, \n -.00886330008507, \n .10282856971025, \n -.07230624556541, \n .08900293707848, \n -.0240114107728, \n -.03996651992202, \n .13645842671394, \n -.03366377204657, \n .10188252478838, \n -.09296332299709, \n -.01846401393414, \n .01477065030485, \n .0729955881834, \n .00248436117545, \n .10262141376734, \n -.04228436201811, \n .12465056031942, \n .27647939324379, \n .00560382334515, \n .23561419546604, \n -.1489082723856, \n .02448422275484, \n .12172746658325, \n .10437148809433, \n .18971465528011, \n .06232447177172, \n .25419962406158, \n .11730266362429, \n .10116269439459, \n .2875237762928, \n .14597341418266, \n .12589547038078, \n .20893961191177, \n .17959471046925, \n -.04518361017108, \n .06391899287701, \n .05659105628729, \n -.24960128962994, \n .09022761881351, \n -.12118522822857, \n -.10079623758793, \n -.08357158303261, \n -.06902338564396, \n .04326653853059, \n .13862533867359, \n .61968916654587, \n .02860714122653, \n .92676383256912, \n .59005337953568, \n .60461646318436, \n .41744044423103, \n .85882639884949, \n .33423033356667, \n -.31093180179596, \n .04102724045515, \n -.06013804674149, \n -.04610994458199, \n -.63425624370575, \n .06586220860481, \n .06031560897827, \n -.04437142238021, \n .46668976545334, \n -.09905604273081, \n -.07949769496918, \n .23702463507652, \n .30592212080956, \n .66463404893875, \n .56969320774078, \n .28950771689415, \n .95181107521057, \n 1.1148544549942, \n .75411820411682, \n 1.2484039068222, \n 1.1690024137497, \n -.1975435167551, \n .24200716614723, \n .6153416633606, \n -.06725100427866, \n .45309436321259, \n -.10480991750956, \n -.97863000631332, \n -1.2213591337204, \n .8715478181839, \n -1.1508584022522, \n -1.7689031362534, \n -.39508575201035, \n -.22900961339474, \n -.18821682035923, \n .14623281359673, \n .03029025532305, \n -.36817568540573, \n -.10679569840431, \n -.48499462008476, \n .29349562525749, \n -.34534454345703, \n -.18803144991398, \n .44535079598427, \n -2.2165644168854, \n .12161350995302, \n .00687709869817, \n .50946187973022, \n .53653997182846, \n .25995117425919, \n .32527860999107, \n .08098815381527, \n .27360898256302, \n .33735024929047, \n .39170032739639, \n .23812144994736, \n .80789774656296, \n .19225835800171, \n -.33032417297363, \n .92568749189377, \n .09278241544962, \n .28566908836365, \n 1.5496014356613, \n -.27607008814812, \n -1.1263961791992, \n -.24930645525455, \n -.30482992529869, \n -.15224970877171, \n -.12287864089012, \n -.09804095327854, \n .02291300706565, \n -.07438835501671, \n -.15710659325123, \n -.42748948931694, \n .04259072244167, \n -.35830733180046, \n -.09898918122053, \n .22108262777328, \n -.00701736938208, \n .0992873236537, \n .28958559036255, \n -.24864092469215, \n -.10584850609303, \n .21528913080692, \n .38809850811958, \n -.16492980718613, \n .16538816690445, \n .1459391862154, \n -.57048463821411, \n -.47923597693443, \n .19784018397331, \n -.4271782040596, \n -.65820020437241, \n .24511873722076, \n -.0877638310194, \n .02952514961362, \n .42909786105156, \n -.03132146969438, \n .57771807909012, \n .29522883892059, \n .6555985212326, \n .76207375526428, \n .05302781611681, \n .55105316638947, \n -.42574247717857, \n -.15540990233421, \n -.92604118585587, \n -.88112819194794, \n .75632172822952, \n -.25287514925003, \n .29006350040436, \n .45125409960747, \n -.41159069538116, \n -.44450175762177, \n .32716807723045, \n .48259317874908, \n .11487878113985, \n .70277869701385, \n .60241633653641, \n -.18233296275139, \n .85120695829391, \n -.37064728140831, \n 3.2916390895844, \n -1.4963974952698, \n -.16283248364925, \n .56923681497574, \n -2.3088004589081, \n .51979947090149, \n 1.1197309494019, \n .02996650896966, \n .40969428420067, \n 1.9223841428757, \n -.21881568431854, \n 2.9340152740479, \n -3.8318600654602, \n -6.239429473877, \n -.08245316892862, \n 1.2339268922806, \n 1.1695692539215])\n\nmse = np.array([ 1.1112809181213, \n .6632194519043, \n .65879660844803, \n .65575885772705, \n .65364873409271, \n .65217137336731, \n .65113133192062, \n .6503963470459, \n .64987552165985, \n .64950579404831, \n .64924287796021, \n .64905577898026, \n .64892256259918, \n .64882761240005, \n .64875996112823, \n .64871168136597, \n .64867728948593, \n .64865279197693, \n .64863526821136, \n .64862281084061, \n .64861387014389, \n .64860755205154, \n .64860302209854, \n .64859980344772, \n .64859747886658, \n .64859586954117, \n .64859467744827, \n .64859384298325, \n .6485932469368, \n .64859282970428, \n .64859253168106, \n .64859229326248, \n .64859211444855, \n .64859199523926, \n .64859193563461, \n .64859187602997, \n .64859187602997, \n .64859181642532, \n .64859181642532, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068])\n\nstdp = np.array([ 0, \n 0, \n .02869686298072, \n .05651443824172, \n .0503994859755, \n .06887971609831, \n .05940540507436, \n .08067328482866, \n .08167565613985, \n .06429278105497, \n .07087650150061, \n .06886467337608, \n .06716959923506, \n .08230647444725, \n .07099691033363, \n .08401278406382, \n .07996553182602, \n .07354256510735, \n .09366323798895, \n .08811800926924, \n .10296355187893, \n .08846370875835, \n .0852297320962, \n .08700425922871, \n .09751411527395, \n .09737934917212, \n .11228405684233, \n .1053489819169, \n .12352022528648, \n .16439816355705, \n .1643835157156, \n .19891132414341, \n .17551273107529, \n .17827558517456, \n .19562774896622, \n .21028305590153, \n .23767858743668, \n .24580039083958, \n .28269505500793, \n .29883882403374, \n .31247469782829, \n .35402658581734, \n .37410452961922, \n .39106267690659, \n .42040377855301, \n .44518512487411, \n .43608102202415, \n .44340893626213, \n .44959822297096, \n .40977239608765, \n .42118826508522, \n .40079545974731, \n .38357082009315, \n .36902260780334, \n .35673499107361, \n .36137464642525, \n .38031083345413, \n .47139286994934, \n .47323387861252, \n .60994738340378, \n .69538277387619, \n .7825602889061, \n .84117436408997, \n .9657689332962, \n 1.0109325647354, \n .95897275209427, \n .96013957262039, \n .9461076259613, \n .9342554807663, \n .83413934707642, \n .83968591690063, \n .84437066316605, \n .83330947160721, \n .8990553021431, \n .87949693202972, \n .86297762393951, \n .89407861232758, \n .93536442518234, \n 1.0303052663803, \n 1.1104937791824, \n 1.1481873989105, \n 1.2851470708847, \n 1.4458787441254, \n 1.5515991449356, \n 1.7309991121292, \n 1.8975404500961, \n 1.8579913377762, \n 1.8846583366394, \n 1.9672524929047, \n 1.9469071626663, \n 2.0048115253448, \n 1.9786299467087, \n 1.8213576078415, \n 1.6284521818161, \n 1.7508568763733, \n 1.5689061880112, \n 1.2950873374939, \n 1.2290096282959, \n 1.1882168054581, \n 1.1537625789642, \n 1.1697143316269, \n 1.1681711673737, \n 1.106795668602, \n 1.0849931240082, \n 1.006507396698, \n 1.0453414916992, \n .98803448677063, \n .95465070009232, \n 1.0165599584579, \n .67838954925537, \n .69311982393265, \n .69054269790649, \n .76345545053482, \n .84005492925644, \n .87471830844879, \n .91901183128357, \n .92638796567917, \n .96265280246735, \n 1.0083012580872, \n 1.0618740320206, \n 1.0921038389206, \n 1.2077431678772, \n 1.2303256988525, \n 1.174311041832, \n 1.3072115182877, \n 1.314337015152, \n 1.3503924608231, \n 1.5760731697083, \n 1.5264053344727, \n 1.34929728508, \n 1.304829955101, \n 1.2522557973862, \n 1.222869515419, \n 1.198047041893, \n 1.1770839691162, \n 1.1743944883347, \n 1.1571066379547, \n 1.1274864673615, \n 1.0574153661728, \n 1.058304309845, \n .99898308515549, \n .9789143204689, \n 1.0070173740387, \n 1.000718832016, \n 1.0104174613953, \n 1.0486439466476, \n 1.0058424472809, \n .98470783233643, \n 1.0119106769562, \n 1.0649236440659, \n 1.0346088409424, \n 1.0540577173233, \n 1.0704846382141, \n .97923594713211, \n .90216588973999, \n .9271782040596, \n .85819715261459, \n .75488126277924, \n .78776079416275, \n .77047789096832, \n .77089905738831, \n .8313245177269, \n .82229107618332, \n .90476810932159, \n .94439232349396, \n 1.0379292964935, \n 1.1469690799713, \n 1.1489590406418, \n 1.2257302999496, \n 1.1554099321365, \n 1.1260533332825, \n .9811190366745, \n .8436843752861, \n .95287209749222, \n .90993344783783, \n .94875508546829, \n 1.0115815401077, \n .94450175762177, \n .87282890081406, \n .91741597652435, \n .98511207103729, \n .9972335100174, \n 1.0975805521011, \n 1.1823329925537, \n 1.1487929821014, \n 1.270641207695, \n 1.2083609104156, \n 1.696394443512, \n 1.4628355503082, \n 1.4307631254196, \n 1.5087975263596, \n 1.1542117595673, \n 1.2262620925903, \n 1.3880327939987, \n 1.3853038549423, \n 1.4396153688431, \n 1.7208145856857, \n 1.678991317749, \n 2.110867023468, \n 1.524417757988, \n .57946246862411, \n .56406193971634, \n .74643105268478])\n\nicstats = np.array([ 202, \n np.nan, \n -242.89663276735, \n 3, \n 491.79326553469, \n 501.7180686269])\n\nclass Bunch(dict):\n def __init__(self, **kw):\n dict.__init__(self, kw)\n self.__dict__ = self\n\n\nresults = Bunch(llf=llf, nobs=nobs, k=k, k_exog=k_exog, sigma=sigma, chi2=chi2, df_model=df_model, k_ar=k_ar, k_ma=k_ma, params=params, cov_params=cov_params, xb=xb, y=y, resid=resid, yr=yr, mse=mse, stdp=stdp, icstats=icstats, )\n\n",
"\"\"\"\nTest Results for the VAR model. Obtained from Stata using\ndatasets/macrodata/var.do\n\"\"\"\n\nimport numpy as np\n\nclass MacrodataResults(object):\n def __init__(self):\n params = [-0.2794863875, 0.0082427826, 0.6750534746, 0.2904420695,\n 0.0332267098, -0.0073250059, 0.0015269951, -0.1004938623,\n -0.1231841792, 0.2686635768, 0.2325045441, 0.0257430635,\n 0.0235035714, 0.0054596064, -1.97116e+00, 0.3809752365,\n 4.4143364022, 0.8001168377, 0.2255078864, -0.1241109271,\n -0.0239026118]\n params = np.asarray(params).reshape(3,-1)\n params = np.hstack((params[:,-1][:,None],params[:,:-1:2],params[:,1::2]))\n self.params = params\n self.neqs = 3\n self.nobs = 200\n self.df_eq = 7\n self.nobs_1 = 200\n self.df_model_1 = 6\n self.rmse_1 = .0075573716985351\n self.rsquared_1 = .2739094844780006\n self.llf_1 = 696.8213727557811\n self.nobs_2 = 200\n self.rmse_2 = .0065444260782597\n self.rsquared_2 = .1423626064753714\n self.llf_2 = 725.6033255319256\n self.nobs_3 = 200\n self.rmse_3 = .0395942039671031\n self.rsquared_3 = .2955406949737428\n self.llf_3 = 365.5895183036045\n# These are from Stata. They use the LL based definition\n# We return Lutkepohl statistics. See Stata TS manual page 436\n# self.bic = -19.06939794312953\n# self.aic = -19.41572126661708\n# self.hqic = -19.27556951526737\n# These are from R. See var.R in macrodata folder\n self.bic = -2.758301611618373e+01\n self.aic = -2.792933943967127e+01\n self.hqic = -2.778918768832157e+01\n self.fpe = 7.421287668357018e-13\n self.detsig = 6.01498432283e-13\n self.llf = 1962.572126661708\n self.chi2_1 = 75.44775165699033 # don't know how they calculate this\n # it's not -2 * (ll1 - ll0)\n self.chi2_2 = 33.19878716815366\n self.chi2_3 = 83.90568280242312\n bse = [.1666662376, .1704584393, .1289691456, .1433308696, .0257313781,\n .0253307796, .0010992645,.1443272761,.1476111934,.1116828804,\n .1241196435, .0222824956, .021935591, .0009519255, .8731894193,\n .8930573331, .6756886998, .7509319263, .1348105496, .1327117543,\n .0057592114]\n bse = np.asarray(bse).reshape(3,-1)\n bse = np.hstack((bse[:,-1][:,None],bse[:,:-1:2],bse[:,1::2]))\n self.bse = bse\n\n\n\n#array([[ -2.79434736e-01, 6.75015752e-01, 3.32194508e-02,\n# 8.22108491e-03, 2.90457628e-01, -7.32090753e-03,\n# 1.52697235e-03],\n# [ -1.00467978e-01, 2.68639553e-01, 2.57387265e-02,\n# -1.23173928e-01, 2.32499436e-01, 2.35037610e-02,\n# 5.45960305e-03],\n# [ -1.97097367e+00, 4.41416233e+00, 2.25478953e-01,\n# 3.80785849e-01, 8.00280918e-01, -1.24079062e-01,\n# -2.39025209e-02]])\n\n",
"# Copyright (c) 2011, Roger Lew [see LICENSE.txt]\n# This software is funded in part by NIH Grant P20 RR016454.\n\n\"\"\"\nImplementation of Gleason's (1999) non-iterative upper quantile\nstudentized range approximation.\n\nAccording to Gleason this method should be more accurate than the\nAS190 FORTRAN algorithm of Lund and Lund (1983) and works from .5\n<= p <= .999 (The AS190 only works from .9 <= p <= .99).\n\nIt is more efficient then the Copenhaver & Holland (1988) algorithm\n(used by the _qtukey_ R function) although it requires storing the A\ntable in memory. (q distribution) approximations in Python.\n\nsee:\n Gleason, J. R. (1999). An accurate, non-iterative approximation\n for studentized range quantiles. Computational Statistics &\n Data Analysis, (31), 147-158.\n\n Gleason, J. R. (1998). A table of quantile points of the\n Studentized range distribution.\n http://www.stata.com/stb/stb46/dm64/sturng.pdf\n\"\"\"\nfrom __future__ import print_function\nfrom statsmodels.compat.python import lrange, map\nimport math\nimport scipy.stats\nimport numpy as np\n\nfrom scipy.optimize import fminbound\n\ninf = np.inf\n\n__version__ = '0.2.3'\n\n# changelog\n# 0.1 - initial release\n# 0.1.1 - vectorized\n# 0.2 - psturng added\n# 0.2.1 - T, R generation script relegated to make_tbls.py\n# 0.2.2\n# - select_points refactored for performance to select_ps and\n# select_vs\n# - pysturng tester added.\n# 0.2.3 - uses np.inf and np.isinf\n\n# Gleason's table was derived using least square estimation on the tabled\n# r values for combinations of p and v. In total there are 206\n# estimates over p-values of .5, .75, .9, .95, .975, .99, .995,\n# and .999, and over v (degrees of freedom) of (1) - 20, 24, 30, 40,\n# 60, 120, and inf. combinations with p < .95 don't have coefficients\n# for v = 1. Hence the parentheses. These coefficients allow us to\n# form f-hat. f-hat with the inverse t transform of tinv(p,v) yields\n# a fairly accurate estimate of the studentized range distribution\n# across a wide range of values. According to Gleason this method\n# should be more accurate than algorithm AS190 of Lund and Lund (1983)\n# and work across a wider range of values (The AS190 only works\n# from .9 <= p <= .99). R's qtukey algorithm was used to add tables\n# at .675, .8, and .85. These aid approximations when p < .9.\n#\n# The code that generated this table is called make_tbls.py and is\n# located in version control.\nA = {(0.1, 2.0): [-2.2485085243379075, -1.5641014278923464, 0.55942294426816752, -0.060006608853883377],\n (0.1, 3.0): [-2.2061105943901564, -1.8415406600571855, 0.61880788039834955, -0.062217093661209831],\n (0.1, 4.0): [-2.1686691786678178, -2.008196172372553, 0.65010084431947401, -0.06289005500114471],\n (0.1, 5.0): [-2.145077200277393, -2.112454843879346, 0.66701240582821342, -0.062993502233654797],\n (0.1, 6.0): [-2.0896098049743155, -2.2400004934286497, 0.70088523391700142, -0.065907568563272748],\n (0.1, 7.0): [-2.0689296655661584, -2.3078445479584873, 0.71577374609418909, -0.067081034249350552],\n (0.1, 8.0): [-2.0064956480711262, -2.437400413087452, 0.76297532367415266, -0.072805518121505458],\n (0.1, 9.0): [-2.3269477513436061, -2.0469494712773089, 0.60662518717720593, -0.054887108437009016],\n (0.1, 10.0): [-2.514024350177229, -1.8261187841127482, 0.51674358077906746, -0.044590425150963633],\n (0.1, 11.0): [-2.5130181309130828, -1.8371718595995694, 0.51336701694862252, -0.043761825829092445],\n (0.1, 12.0): [-2.5203508109278823, -1.8355687130611862, 0.5063486549107169, -0.042646205063108261],\n (0.1, 13.0): [-2.5142536438310477, -1.8496969402776282, 0.50616991367764153, -0.042378379905665363],\n (0.1, 14.0): [-2.3924634153781352, -2.013859173066078, 0.56421893251638688, -0.048716888109540266],\n (0.1, 15.0): [-2.3573552940582574, -2.0576676976224362, 0.57424068771143233, -0.049367487649225841],\n (0.1, 16.0): [-2.3046427483044871, -2.1295959138627993, 0.59778272657680553, -0.051864829216301617],\n (0.1, 17.0): [-2.2230551072316125, -2.2472837435427127, 0.64255758243215211, -0.057186665209197643],\n (0.1, 18.0): [-2.3912859179716897, -2.0350604070641269, 0.55924788749333332, -0.047729331835226464],\n (0.1, 19.0): [-2.4169773092220623, -2.0048217969339146, 0.54493039319748915, -0.045991241346224065],\n (0.1, 20.0): [-2.4264087194660751, -1.9916614057049267, 0.53583555139648154, -0.04463049934517662],\n (0.1, 24.0): [-2.3969903132061869, -2.0252941869225345, 0.53428382141200137, -0.043116495567779786],\n (0.1, 30.0): [-2.2509922780354623, -2.2309248956124894, 0.60748041324937263, -0.051427415888817322],\n (0.1, 40.0): [-2.1310090183854946, -2.3908466074610564, 0.65844375382323217, -0.05676653804036895],\n (0.1, 60.0): [-1.9240060179027036, -2.6685751031012233, 0.75678826647453024, -0.067938584352398995],\n (0.1, 120.0): [-1.9814895487030182, -2.5962051736978373, 0.71793969041292693, -0.063126863201511618],\n (0.1, inf): [-1.913410267066703, -2.6947367328724732, 0.74742335122750592, -0.06660897234304515],\n (0.5, 2.0): [-0.88295935738770648, -0.1083576698911433, 0.035214966839394388, -0.0028576288978276461],\n (0.5, 3.0): [-0.89085829205846834, -0.10255696422201063, 0.033613638666631696, -0.0027101699918520737],\n (0.5, 4.0): [-0.89627345339338116, -0.099072524607668286, 0.032657774808907684, -0.0026219007698204916],\n (0.5, 5.0): [-0.89959145511941052, -0.097272836582026817, 0.032236187675182958, -0.0025911555217019663],\n (0.5, 6.0): [-0.89959428735702474, -0.098176292411106647, 0.032590766960226995, -0.0026319890073613164],\n (0.5, 7.0): [-0.90131491102863937, -0.097135907620296544, 0.032304124993269533, -0.0026057965808244125],\n (0.5, 8.0): [-0.90292500599432901, -0.096047500971337962, 0.032030946615574568, -0.0025848748659053891],\n (0.5, 9.0): [-0.90385598607803697, -0.095390771554571888, 0.031832651111105899, -0.0025656060219315991],\n (0.5, 10.0): [-0.90562524936125388, -0.093954488089771915, 0.031414451048323286, -0.0025257834705432031],\n (0.5, 11.0): [-0.90420347371173826, -0.095851656370277288, 0.0321150356209743, -0.0026055056400093451],\n (0.5, 12.0): [-0.90585973471757664, -0.094449306296728028, 0.031705945923210958, -0.0025673330195780191],\n (0.5, 13.0): [-0.90555437067293054, -0.094792991050780248, 0.031826594964571089, -0.0025807109129488545],\n (0.5, 14.0): [-0.90652756604388762, -0.093792156994564738, 0.031468966328889042, -0.0025395175361083741],\n (0.5, 15.0): [-0.90642323700400085, -0.094173017520487984, 0.031657517378893905, -0.0025659271829033877],\n (0.5, 16.0): [-0.90716338636685234, -0.093785178083820434, 0.031630091949657997, -0.0025701459247416637],\n (0.5, 17.0): [-0.90790133816769714, -0.093001147638638884, 0.031376863944487084, -0.002545143621663892],\n (0.5, 18.0): [-0.9077432927051563, -0.093343516378180599, 0.031518139662395313, -0.0025613906133277178],\n (0.5, 19.0): [-0.90789499456490286, -0.09316964789456067, 0.031440782366342901, -0.0025498353345867453],\n (0.5, 20.0): [-0.90842707861030725, -0.092696016476608592, 0.031296040311388329, -0.0025346963982742186],\n (0.5, 24.0): [-0.9083281347135469, -0.092959308144970776, 0.031464063190077093, -0.0025611384271086285],\n (0.5, 30.0): [-0.90857624050016828, -0.093043139391980514, 0.031578791729341332, -0.0025766595412777147],\n (0.5, 40.0): [-0.91034085045438684, -0.091978035738914568, 0.031451631000052639, -0.0025791418103733297],\n (0.5, 60.0): [-0.91084356681030032, -0.091452675572423425, 0.031333147984820044, -0.0025669786958144843],\n (0.5, 120.0): [-0.90963649561463833, -0.093414563261352349, 0.032215602703677425, -0.0026704024780441257],\n (0.5, inf): [-0.91077157500981665, -0.092899220350334571, 0.032230422399363315, -0.0026696941964372916],\n (0.675, 2.0): [-0.67231521026565144, -0.097083624030663451, 0.027991378901661649, -0.0021425184069845558],\n (0.675, 3.0): [-0.65661724764645824, -0.08147195494632696, 0.02345732427073333, -0.0017448570400999351],\n (0.675, 4.0): [-0.65045677697461124, -0.071419073399450431, 0.020741962576852499, -0.0015171262565892491],\n (0.675, 5.0): [-0.64718875357808325, -0.064720611425218344, 0.019053450246546449, -0.0013836232986228711],\n (0.675, 6.0): [-0.64523003702018655, -0.059926313672731824, 0.017918997181483924, -0.0012992250285556828],\n (0.675, 7.0): [-0.64403313148478836, -0.056248191513784476, 0.017091446791293721, -0.0012406558789511822],\n (0.675, 8.0): [-0.64325095865764359, -0.053352543126426684, 0.016471879286491072, -0.0011991839050964099],\n (0.675, 9.0): [-0.64271152754911653, -0.051023769620449078, 0.01599799600547195, -0.0011693637984597086],\n (0.675, 10.0): [-0.64232244408502626, -0.049118327462884373, 0.015629704966568955, -0.0011477775513952285],\n (0.675, 11.0): [-0.64203897854353564, -0.047524627960277892, 0.015334801262767227, -0.0011315057284007177],\n (0.675, 12.0): [-0.64180344973512771, -0.046205907576003291, 0.015108290595438166, -0.0011207364514518488],\n (0.675, 13.0): [-0.64162086456823342, -0.045076099336874231, 0.0149226565346125, -0.0011126140690497352],\n (0.675, 14.0): [-0.64146906480198984, -0.044108523550512715, 0.014772954218646743, -0.0011069708562369386],\n (0.675, 15.0): [-0.64133915151966603, -0.043273370927039825, 0.014651691599222836, -0.0011032216539514398],\n (0.675, 16.0): [-0.64123237842752079, -0.042538925012463868, 0.014549992487506169, -0.0011005633864334021],\n (0.675, 17.0): [-0.64113034037536609, -0.041905699463005854, 0.014470805560767184, -0.0010995286436738471],\n (0.675, 18.0): [-0.64104137391561256, -0.041343885546229336, 0.014404563657113593, -0.0010991304223377683],\n (0.675, 19.0): [-0.64096064882827297, -0.04084569291139839, 0.014350159655133801, -0.0010993656711121901],\n (0.675, 20.0): [-0.64088647405089572, -0.040402175957178085, 0.014305769823654429, -0.0011001304776712105],\n (0.675, 24.0): [-0.64063763965937837, -0.039034716348048545, 0.014196703837251648, -0.0011061961945598175],\n (0.675, 30.0): [-0.64034987716294889, -0.037749651156941719, 0.014147040999127263, -0.0011188251352919833],\n (0.675, 40.0): [-0.6399990514713938, -0.036583307574857803, 0.014172070700846548, -0.0011391004138624943],\n (0.675, 60.0): [-0.63955586202430248, -0.035576938958184395, 0.014287299153378865, -0.0011675811805794236],\n (0.675, 120.0): [-0.63899242674778622, -0.034763757512388853, 0.014500726912982405, -0.0012028491454427466],\n (0.675, inf): [-0.63832682579247613, -0.034101476695520404, 0.014780921043580184, -0.0012366204114216408],\n (0.75, 2.0): [-0.60684073638504454, -0.096375192078057031, 0.026567529471304554, -0.0019963228971914488],\n (0.75, 3.0): [-0.57986144519102656, -0.078570292718034881, 0.021280637925009449, -0.0015329306898533772],\n (0.75, 4.0): [-0.56820771686193594, -0.0668113563896649, 0.018065284051059189, -0.0012641485481533648],\n (0.75, 5.0): [-0.56175292435740221, -0.058864526929603825, 0.016046735025708799, -0.0011052560286524044],\n (0.75, 6.0): [-0.55773449282066356, -0.053136923269827351, 0.014684258167069347, -0.0010042826823561605],\n (0.75, 7.0): [-0.55509524598867332, -0.048752649191139405, 0.013696566605823626, -0.00093482210003133898],\n (0.75, 8.0): [-0.55324993686191515, -0.045305558708724644, 0.012959681992062138, -0.00088583541601696021],\n (0.75, 9.0): [-0.55189259054026196, -0.042539819902381634, 0.012398791106424769, -0.00085083962241435827],\n (0.75, 10.0): [-0.55085384656956893, -0.040281425755686585, 0.01196442242722482, -0.00082560322161492677],\n (0.75, 11.0): [-0.55003198103541273, -0.038410176100193948, 0.011623294239447784, -0.00080732975034320073],\n (0.75, 12.0): [-0.54936541596319177, -0.036838543267887103, 0.011351822637895701, -0.0007940703654926442],\n (0.75, 13.0): [-0.54881015972753833, -0.035506710625568455, 0.011134691307865171, -0.0007846360016355809],\n (0.75, 14.0): [-0.54834094346071949, -0.034364790609906569, 0.010958873929274728, -0.00077796645357008291],\n (0.75, 15.0): [-0.54793602418304255, -0.033379237455748029, 0.010816140998057593, -0.00077344175064785099],\n (0.75, 16.0): [-0.54758347689728037, -0.032520569145898917, 0.010699240399358219, -0.00077050847328596678],\n (0.75, 17.0): [-0.54727115963795303, -0.031769277192927527, 0.010603749751170481, -0.0007688642392748113],\n (0.75, 18.0): [-0.54699351808826535, -0.031105476267880995, 0.010524669113016114, -0.00076810656837464093],\n (0.75, 19.0): [-0.54674357626419079, -0.030516967201954001, 0.010459478822937069, -0.00076808652582440037],\n (0.75, 20.0): [-0.54651728378950126, -0.029992319199769232, 0.010405694998386575, -0.0007686417223966138],\n (0.75, 24.0): [-0.54578309546828363, -0.028372628574010936, 0.010269939602271542, -0.00077427370647261838],\n (0.75, 30.0): [-0.54501246434397554, -0.026834887880579802, 0.010195603314317611, -0.00078648615954105515],\n (0.75, 40.0): [-0.54418127442022624, -0.025413224488871379, 0.010196455193836855, -0.00080610785749523739],\n (0.75, 60.0): [-0.543265189207915, -0.024141961069146383, 0.010285001019536088, -0.00083332193364294587],\n (0.75, 120.0): [-0.54224757817994806, -0.023039071833948214, 0.010463365295636302, -0.00086612828539477918],\n (0.75, inf): [-0.54114579815367159, -0.02206592527426093, 0.01070374099737127, -0.00089726564005122183],\n (0.8, 2.0): [-0.56895274046831146, -0.096326255190541957, 0.025815915364208686, -0.0019136561019354845],\n (0.8, 3.0): [-0.5336038380862278, -0.077585191014876181, 0.020184759265389905, -0.0014242746007323785],\n (0.8, 4.0): [-0.51780274285934258, -0.064987738443608709, 0.016713309796866204, -0.001135379856633562],\n (0.8, 5.0): [-0.50894361222268403, -0.056379186603362705, 0.014511270339773345, -0.00096225604117493205],\n (0.8, 6.0): [-0.50335153028630408, -0.050168860294790812, 0.01302807093593626, -0.00085269812692536306],\n (0.8, 7.0): [-0.49960934380896432, -0.045417333787806033, 0.011955593330247398, -0.00077759605604250882],\n (0.8, 8.0): [-0.49694518248979763, -0.041689151516021969, 0.011158986677273709, -0.00072497430103953366],\n (0.8, 9.0): [-0.4949559974898507, -0.038702217132906024, 0.010554360004521268, -0.0006875213117164109],\n (0.8, 10.0): [-0.49341407910162483, -0.036266788741325398, 0.010087354421936092, -0.00066060835062865602],\n (0.8, 11.0): [-0.49218129312493897, -0.034252403643273498, 0.0097218584838579536, -0.00064123459335201907],\n (0.8, 12.0): [-0.49117223957112183, -0.032563269730499021, 0.0094318583096021404, -0.00062725253852419032],\n (0.8, 13.0): [-0.49032781145131277, -0.031132495018324432, 0.0091999762562792898, -0.0006172944366003854],\n (0.8, 14.0): [-0.48961049628464259, -0.029906921170494854, 0.009012451847823854, -0.00061026211968669543],\n (0.8, 15.0): [-0.48899069793054922, -0.028849609914548158, 0.0088602820002619594, -0.00060548991575179055],\n (0.8, 16.0): [-0.48844921216636505, -0.027929790075266154, 0.00873599263877896, -0.00060242119796859379],\n (0.8, 17.0): [-0.48797119683309537, -0.027123634910159868, 0.0086338139869481887, -0.00060061821593399998],\n (0.8, 18.0): [-0.48754596864745836, -0.026411968723496961, 0.0085493196604705755, -0.00059977083160833624],\n (0.8, 19.0): [-0.48716341805691843, -0.025781422230819986, 0.0084796655915025769, -0.00059970031758323466],\n (0.8, 20.0): [-0.48681739197185547, -0.025219629852198749, 0.0084221844254287765, -0.00060023212822886711],\n (0.8, 24.0): [-0.48570639629281365, -0.023480608772518948, 0.008274490561114187, -0.000605681105792215],\n (0.8, 30.0): [-0.48455867067770253, -0.021824655071720423, 0.0081888502974720567, -0.00061762126933785633],\n (0.8, 40.0): [-0.48335478729267423, -0.020279958998363389, 0.0081765095914194709, -0.00063657117129829635],\n (0.8, 60.0): [-0.48207351944996679, -0.018875344346672228, 0.0082473997191472338, -0.00066242478479277243],\n (0.8, 120.0): [-0.48070356185330182, -0.017621686995755746, 0.0084009638803223801, -0.00069300383808949318],\n (0.8, inf): [-0.47926687718713606, -0.016476575352367202, 0.0086097059646591811, -0.00072160843492730911],\n (0.85, 2.0): [-0.53366806986381743, -0.098288178252723263, 0.026002333446289064, -0.0019567144268844896],\n (0.85, 3.0): [-0.48995919239619989, -0.077312722648418056, 0.019368984865418108, -0.0013449670192265796],\n (0.85, 4.0): [-0.46956079162382858, -0.063818518513946695, 0.015581608910696544, -0.0010264315084377606],\n (0.85, 5.0): [-0.45790853796153624, -0.054680511194530226, 0.013229852432203093, -0.00084248430847535898],\n (0.85, 6.0): [-0.4505070841695738, -0.048050936682873302, 0.011636407582714191, -0.00072491480033529815],\n (0.85, 7.0): [-0.44548337477336181, -0.042996612516383016, 0.010493052959891263, -0.00064528784792153239],\n (0.85, 8.0): [-0.44186624932664148, -0.039040005821657585, 0.0096479530794160544, -0.00058990874360967567],\n (0.85, 9.0): [-0.43914118689812259, -0.035875693030752713, 0.0090088804130628187, -0.00055071480339399694],\n (0.85, 10.0): [-0.43701255390953769, -0.033300997407157376, 0.0085172159355344848, -0.00052272770799695464],\n (0.85, 11.0): [-0.43530109064899053, -0.031174742038490313, 0.0081335619868386066, -0.00050268353809787927],\n (0.85, 12.0): [-0.43389220376610071, -0.02939618314990838, 0.007830626267772851, -0.00048836431712678222],\n (0.85, 13.0): [-0.43271026958463166, -0.027890759135246888, 0.0075886916668632936, -0.00047819339710596971],\n (0.85, 14.0): [-0.43170230265007209, -0.026604156062396189, 0.0073939099688705547, -0.00047109996854335419],\n (0.85, 15.0): [-0.43083160459377423, -0.025494228911600785, 0.0072358738657550868, -0.00046630677052262481],\n (0.85, 16.0): [-0.4300699280587239, -0.024529612608808794, 0.0071069227026219683, -0.00046323869860941791],\n (0.85, 17.0): [-0.42939734931902857, -0.023685025616054269, 0.0070011541609695891, -0.00046147954942994158],\n (0.85, 18.0): [-0.42879829041505324, -0.022940655682782165, 0.006914006369119409, -0.00046070877994711774],\n (0.85, 19.0): [-0.42826119448419875, -0.022280181781634649, 0.0068417746905826433, -0.00046066841214091982],\n (0.85, 20.0): [-0.42777654887094479, -0.021690909076747832, 0.0067817408643717969, -0.00046118620289068032],\n (0.85, 24.0): [-0.42622450033640852, -0.019869646711890065, 0.0066276799593494029, -0.00046668820637553747],\n (0.85, 30.0): [-0.42463810443233418, -0.018130114737381745, 0.0065344613060499164, -0.00047835583417510423],\n (0.85, 40.0): [-0.42299917804589382, -0.016498222901308417, 0.0065120558343578407, -0.00049656043685325469],\n (0.85, 60.0): [-0.42129387265810464, -0.014992121475265813, 0.0065657795990087635, -0.00052069705640687698],\n (0.85, 120.0): [-0.41951580476366368, -0.013615722489371183, 0.0066923911275726814, -0.00054846911649167492],\n (0.85, inf): [-0.41768751825428968, -0.012327525092266726, 0.0068664920569562592, -0.00057403720261753539],\n (0.9, 1.0): [-0.65851063279096722, -0.126716242078905, 0.036318801917603061, -0.002901283222928193],\n (0.9, 2.0): [-0.50391945369829139, -0.096996108021146235, 0.024726437623473398, -0.0017901399938303017],\n (0.9, 3.0): [-0.44799791843058734, -0.077180370333307199, 0.018584042055594469, -0.0012647038118363408],\n (0.9, 4.0): [-0.42164091756145167, -0.063427071006287514, 0.014732203755741392, -0.00094904174117957688],\n (0.9, 5.0): [-0.40686856251221754, -0.053361940054842398, 0.012041802076025801, -0.00072960198292410612],\n (0.9, 6.0): [-0.39669926026535285, -0.046951517438004242, 0.010546647213094956, -0.00062621198002366064],\n (0.9, 7.0): [-0.39006553675807426, -0.04169480606532109, 0.0093687546601737195, -0.00054648695713273862],\n (0.9, 8.0): [-0.38570205067061908, -0.037083910859179794, 0.0083233218526375836, -0.00047177586974035451],\n (0.9, 9.0): [-0.38190737267892938, -0.034004585655388865, 0.0077531991574119183, -0.00044306547308527872],\n (0.9, 10.0): [-0.37893272918125737, -0.031394677600916979, 0.0072596802503533536, -0.0004160518834299966],\n (0.9, 11.0): [-0.37692512492705132, -0.028780793403136471, 0.0066937909049060379, -0.00037420010136784526],\n (0.9, 12.0): [-0.37506345200129187, -0.026956483290567372, 0.0064147730707776523, -0.00036595383207062906],\n (0.9, 13.0): [-0.37339516122383209, -0.02543949524844704, 0.0061760656530197187, -0.00035678737379179527],\n (0.9, 14.0): [-0.37216979891087842, -0.02396347606956644, 0.0059263234465969641, -0.0003439784452550796],\n (0.9, 15.0): [-0.371209456600122, -0.022696132732654414, 0.0057521677184623147, -0.00033961108561770848],\n (0.9, 16.0): [-0.36958924377983338, -0.022227885445863002, 0.0057691706799383926, -0.00035042762538099682],\n (0.9, 17.0): [-0.36884224719083203, -0.021146977888668726, 0.0055957928269732716, -0.00034283810412697531],\n (0.9, 18.0): [-0.36803087186793326, -0.020337731477576542, 0.0054655378095212759, -0.00033452966946535248],\n (0.9, 19.0): [-0.3676700404163355, -0.019370115848857467, 0.0053249296207149655, -0.00032975528909580403],\n (0.9, 20.0): [-0.36642276267188811, -0.019344251412284838, 0.0054454968582897528, -0.00034868111677540948],\n (0.9, 24.0): [-0.36450650753755193, -0.017284255499990679, 0.0052337500059176749, -0.00034898202845747288],\n (0.9, 30.0): [-0.36251868940168608, -0.015358560437631397, 0.0050914299956134786, -0.00035574528891633978],\n (0.9, 40.0): [-0.36008886676510943, -0.014016835682905486, 0.0051930835959111514, -0.00038798316011984165],\n (0.9, 60.0): [-0.35825590690268061, -0.011991568926537646, 0.0050632208542414191, -0.00039090198974493085],\n (0.9, 120.0): [-0.35543612237284411, -0.011074403997811812, 0.0053504570752765162, -0.00043647137428074178],\n (0.9, inf): [-0.35311806343057167, -0.0096254020092145353, 0.0054548591208177181, -0.00045343916634968493],\n (0.95, 1.0): [-0.65330318136020071, -0.12638310760474375, 0.035987535130769424, -0.0028562665467665315],\n (0.95, 2.0): [-0.47225160417826934, -0.10182570362271424, 0.025846563499059158, -0.0019096769058043243],\n (0.95, 3.0): [-0.4056635555586528, -0.077067172693350297, 0.017789909647225533, -0.001182961668735774],\n (0.95, 4.0): [-0.37041675177340955, -0.063815687118939465, 0.014115210247737845, -0.00089996098435117598],\n (0.95, 5.0): [-0.35152398291152309, -0.052156502640669317, 0.010753738086401853, -0.0005986841939451575],\n (0.95, 6.0): [-0.33806730015201264, -0.045668399809578597, 0.0093168898952878162, -0.00051369719615782102],\n (0.95, 7.0): [-0.32924041072104465, -0.040019601775490091, 0.0080051199552865163, -0.00042054536135868043],\n (0.95, 8.0): [-0.32289030266989077, -0.035575345931670443, 0.0070509089344694669, -0.00035980773304803576],\n (0.95, 9.0): [-0.31767304201477375, -0.032464945930165703, 0.0064755950437272143, -0.0003316676253661824],\n (0.95, 10.0): [-0.31424318064708656, -0.029133461621153, 0.0057437449431074795, -0.00027894252261209191],\n (0.95, 11.0): [-0.31113589620384974, -0.02685115250591049, 0.0053517905282942889, -0.00026155954116874666],\n (0.95, 12.0): [-0.30848983612414582, -0.025043238019239168, 0.0050661675913488829, -0.00025017202909614005],\n (0.95, 13.0): [-0.3059212907410393, -0.023863874699213077, 0.0049618051135807322, -0.00025665425781125703],\n (0.95, 14.0): [-0.30449676902720035, -0.021983976741572344, 0.0045740513735751968, -0.00022881166323945914],\n (0.95, 15.0): [-0.30264908294481396, -0.02104880307520084, 0.0044866571614804382, -0.00023187587597844057],\n (0.95, 16.0): [-0.30118294463097917, -0.020160231061926728, 0.0044170780759056859, -0.00023733502359045826],\n (0.95, 17.0): [-0.30020013353427744, -0.018959271614471574, 0.0041925333038202285, -0.00022274025630789767],\n (0.95, 18.0): [-0.29857886556874402, -0.018664437456802001, 0.0042557787632833697, -0.00023758868868853716],\n (0.95, 19.0): [-0.29796289236978263, -0.017632218552317589, 0.0040792779937959866, -0.00022753271474613109],\n (0.95, 20.0): [-0.29681506554838077, -0.017302563243037392, 0.0041188426221428964, -0.00023913038468772782],\n (0.95, 24.0): [-0.29403146911167666, -0.015332330986025032, 0.0039292170319163728, -0.00024003445648641732],\n (0.95, 30.0): [-0.29080775563775879, -0.013844059210779323, 0.0039279165616059892, -0.00026085104496801666],\n (0.95, 40.0): [-0.28821583032805109, -0.011894686715666892, 0.0038202623278839982, -0.00026933325102031252],\n (0.95, 60.0): [-0.28525636737751447, -0.010235910558409797, 0.0038147029777580001, -0.00028598362144178959],\n (0.95, 120.0): [-0.28241065885026539, -0.0086103836327305026, 0.0038450612886908714, -0.00030206053671559411],\n (0.95, inf): [-0.27885570064169296, -0.0078122455524849222, 0.0041798538053623453, -0.0003469494881774609],\n (0.975, 1.0): [-0.65203598304297983, -0.12608944279227957, 0.035710038757117347, -0.0028116024425349053],\n (0.975, 2.0): [-0.46371891130382281, -0.096954458319996509, 0.023958312519912289, -0.0017124565391080503],\n (0.975, 3.0): [-0.38265282195259875, -0.076782539231612282, 0.017405078796142955, -0.0011610853687902553],\n (0.975, 4.0): [-0.34051193158878401, -0.063652342734671602, 0.013528310336964293, -0.00083644708934990761],\n (0.975, 5.0): [-0.31777655705536484, -0.051694686914334619, 0.010115807205265859, -0.00054517465344192009],\n (0.975, 6.0): [-0.30177149019958716, -0.044806697631189059, 0.008483551848413786, -0.00042827853925009264],\n (0.975, 7.0): [-0.29046972313293562, -0.039732822689098744, 0.007435356037378946, -0.00037562928283350671],\n (0.975, 8.0): [-0.28309484007368141, -0.034764904940713388, 0.0062932513694928518, -0.00029339243611357956],\n (0.975, 9.0): [-0.27711707948119785, -0.031210465194810709, 0.0055576244284178435, -0.00024663798208895803],\n (0.975, 10.0): [-0.27249203448553611, -0.028259756468251584, 0.00499112012528406, -0.00021535380417035389],\n (0.975, 11.0): [-0.26848515860011007, -0.026146703336893323, 0.0046557767110634073, -0.00020400628148271448],\n (0.975, 12.0): [-0.26499921540008192, -0.024522931106167097, 0.0044259624958665278, -0.00019855685376441687],\n (0.975, 13.0): [-0.2625023751891592, -0.022785875653297854, 0.004150277321193792, -0.00018801223218078264],\n (0.975, 14.0): [-0.26038552414321758, -0.021303509859738341, 0.0039195608280464681, -0.00017826200169385824],\n (0.975, 15.0): [-0.25801244886414665, -0.020505508012402567, 0.0038754868932712929, -0.00018588907991739744],\n (0.975, 16.0): [-0.25685316062360508, -0.018888418269740373, 0.0035453092842317293, -0.00016235770674204116],\n (0.975, 17.0): [-0.25501132271353549, -0.018362951972357794, 0.0035653933105288631, -0.00017470353354992729],\n (0.975, 18.0): [-0.25325045404452656, -0.017993537285026156, 0.0036035867405376691, -0.00018635492166426884],\n (0.975, 19.0): [-0.25236899494677928, -0.016948921372207198, 0.0034138931781330802, -0.00017462253414687881],\n (0.975, 20.0): [-0.25134498025027691, -0.016249564498874988, 0.0033197284005334333, -0.00017098091103245596],\n (0.975, 24.0): [-0.24768690797476625, -0.014668160763513996, 0.0032850791186852558, -0.00019013480716844995],\n (0.975, 30.0): [-0.24420834707522676, -0.012911171716272752, 0.0031977676700968051, -0.00020114907914487053],\n (0.975, 40.0): [-0.24105725356215926, -0.010836526056169627, 0.0030231303550754159, -0.00020128696343148667],\n (0.975, 60.0): [-0.23732082703955223, -0.0095442727157385391, 0.0031432904473555259, -0.00023062224109383941],\n (0.975, 120.0): [-0.23358581879594578, -0.0081281259918709343, 0.0031877298679120094, -0.00024496230446851501],\n (0.975, inf): [-0.23004105093119268, -0.0067112585174133573, 0.0032760251638919435, -0.00026244001319462992],\n (0.99, 1.0): [-0.65154119422706203, -0.1266603927572312, 0.03607480609672048, -0.0028668112687608113],\n (0.99, 2.0): [-0.45463403324378804, -0.098701236234527367, 0.024412715761684689, -0.0017613772919362193],\n (0.99, 3.0): [-0.36402060051035778, -0.079244959193729148, 0.017838124021360584, -0.00119080116484847],\n (0.99, 4.0): [-0.31903506063953818, -0.061060740682445241, 0.012093154962939612, -0.00067268347188443093],\n (0.99, 5.0): [-0.28917014580689182, -0.052940780099313689, 0.010231009146279354, -0.00057178339184615239],\n (0.99, 6.0): [-0.27283240161179012, -0.042505435573209085, 0.0072753401118264534, -0.00031314034710725922],\n (0.99, 7.0): [-0.25773968720546719, -0.039384214480463406, 0.0069120882597286867, -0.00032994068754356204],\n (0.99, 8.0): [-0.24913629282433833, -0.033831567178432859, 0.0055516244725724185, -0.00022570786249671376],\n (0.99, 9.0): [-0.24252380896373404, -0.029488280751457097, 0.0045215453527922998, -0.00014424552929022646],\n (0.99, 10.0): [-0.23654349556639986, -0.02705600214566789, 0.0041627255469343632, -0.00013804427029504753],\n (0.99, 11.0): [-0.23187404969432468, -0.024803662094970855, 0.0037885852786822475, -0.00012334999287725012],\n (0.99, 12.0): [-0.22749929386320905, -0.023655085290534145, 0.0037845051889055896, -0.00014785715789924055],\n (0.99, 13.0): [-0.22458989143485605, -0.021688394892771506, 0.0034075294601425251, -0.00012436961982044268],\n (0.99, 14.0): [-0.22197623872225777, -0.020188830700102918, 0.0031648685865587473, -0.00011320740119998819],\n (0.99, 15.0): [-0.2193924323730066, -0.019327469111698265, 0.0031295453754886576, -0.00012373072900083014],\n (0.99, 16.0): [-0.21739436875855705, -0.018215854969324128, 0.0029638341057222645, -0.00011714667871412003],\n (0.99, 17.0): [-0.21548926805467686, -0.017447822179412719, 0.0028994805120482812, -0.00012001887015183794],\n (0.99, 18.0): [-0.21365014687077843, -0.01688869353338961, 0.0028778031289216546, -0.00012591199104792711],\n (0.99, 19.0): [-0.21236653761262406, -0.016057151563612645, 0.0027571468998022017, -0.00012049196593780046],\n (0.99, 20.0): [-0.21092693178421842, -0.015641706950956638, 0.0027765989877361293, -0.00013084915163086915],\n (0.99, 24.0): [-0.20681960327410207, -0.013804298040271909, 0.0026308276736585674, -0.0001355061502101814],\n (0.99, 30.0): [-0.20271691131071576, -0.01206095288359876, 0.0025426138004198909, -0.00014589047959047533],\n (0.99, 40.0): [-0.19833098054449289, -0.010714533963740719, 0.0025985992420317597, -0.0001688279944262007],\n (0.99, 60.0): [-0.19406768821236584, -0.0093297106482013985, 0.0026521518387539584, -0.00018884874193665104],\n (0.99, 120.0): [-0.19010213174677365, -0.0075958207221300924, 0.0025660823297025633, -0.00018906475172834352],\n (0.99, inf): [-0.18602070255787137, -0.0062121155165363188, 0.0026328293420766593, -0.00020453366529867131],\n (0.995, 1.0): [-0.65135583544951825, -0.1266868999507193, 0.036067522182457165, -0.0028654516958844922],\n (0.995, 2.0): [-0.45229774013072793, -0.09869462954369547, 0.024381858599368908, -0.0017594734553033394],\n (0.995, 3.0): [-0.35935765236429706, -0.076650408326671915, 0.016823026893528978, -0.0010835134496404637],\n (0.995, 4.0): [-0.30704474720931169, -0.063093047731613019, 0.012771683306774929, -0.00075852491621809955],\n (0.995, 5.0): [-0.27582551740863454, -0.052533353137885791, 0.0097776009845174372, -0.00051338031756399129],\n (0.995, 6.0): [-0.25657971464398704, -0.043424914996692286, 0.0074324147435969991, -0.00034105188850494067],\n (0.995, 7.0): [-0.24090407819707738, -0.039591604712200287, 0.0068848429451020387, -0.00034737131709273414],\n (0.995, 8.0): [-0.23089540800827862, -0.034353305816361958, 0.0056009527629820111, -0.00024389336976992433],\n (0.995, 9.0): [-0.22322694848310584, -0.030294770709722547, 0.0046751239747245543, -0.00017437479314218922],\n (0.995, 10.0): [-0.21722684126671632, -0.026993563560163809, 0.0039811592710905491, -0.00013135281785826703],\n (0.995, 11.0): [-0.21171635822852911, -0.025156193618212551, 0.0037507759652964205, -0.00012959836685175671],\n (0.995, 12.0): [-0.20745332165849167, -0.023318819535607219, 0.0034935020002058903, -0.00012642826898405916],\n (0.995, 13.0): [-0.20426054591612508, -0.021189796175249527, 0.003031472176128759, -9.0497733877531618e-05],\n (0.995, 14.0): [-0.20113536905578902, -0.020011536696623061, 0.0029215880889956729, -9.571527213951222e-05],\n (0.995, 15.0): [-0.19855601561006403, -0.018808533734002542, 0.0027608859956002344, -9.2472995256929217e-05],\n (0.995, 16.0): [-0.19619157579534008, -0.017970461530551096, 0.0027113719105000371, -9.9864874982890861e-05],\n (0.995, 17.0): [-0.19428015140726104, -0.017009762497670704, 0.0025833389598201345, -9.6137545738061124e-05],\n (0.995, 18.0): [-0.19243180236773033, -0.01631617252107519, 0.0025227443561618621, -9.8067580523432881e-05],\n (0.995, 19.0): [-0.19061294393069844, -0.01586226613672222, 0.0025207005902641781, -0.00010466151274918466],\n (0.995, 20.0): [-0.18946302696580328, -0.014975796567260896, 0.0023700506576419867, -9.5507779057884629e-05],\n (0.995, 24.0): [-0.18444251428695257, -0.013770955893918012, 0.0024579445553339903, -0.00012688402863358003],\n (0.995, 30.0): [-0.18009742499570078, -0.011831341846559026, 0.0022801125189390046, -0.00012536249967254906],\n (0.995, 40.0): [-0.17562721880943261, -0.010157142650455463, 0.0022121943861923474, -0.000134542652873434],\n (0.995, 60.0): [-0.17084630673594547, -0.0090224965852754805, 0.0023435529965815565, -0.00016240306777440115],\n (0.995, 120.0): [-0.16648414081054147, -0.0074792163241677225, 0.0023284585524533607, -0.00017116464012147041],\n (0.995, inf): [-0.16213921875452461, -0.0058985998630496144, 0.0022605819363689093, -0.00016896211491119114],\n (0.999, 1.0): [-0.65233994072089363, -0.12579427445444219, 0.035830577995679271, -0.0028470555202945564],\n (0.999, 2.0): [-0.45050164311326341, -0.098294804380698292, 0.024134463919493736, -0.0017269603956852841],\n (0.999, 3.0): [-0.35161741499307819, -0.076801152272374273, 0.016695693063138672, -0.0010661121974071864],\n (0.999, 4.0): [-0.29398448788574133, -0.06277319725219685, 0.012454220010543127, -0.00072644165723402445],\n (0.999, 5.0): [-0.25725364564365477, -0.053463787584337355, 0.0099664236557431545, -0.00054866039388980659],\n (0.999, 6.0): [-0.23674225795168574, -0.040973155890031254, 0.0062599481191736696, -0.00021565734226586692],\n (0.999, 7.0): [-0.21840108878983297, -0.037037020271877719, 0.0055908063671900703, -0.00020238790479809623],\n (0.999, 8.0): [-0.2057964743918449, -0.032500885103194356, 0.0046441644585661756, -0.00014769592268680274],\n (0.999, 9.0): [-0.19604592954882674, -0.029166922919677936, 0.0040644333111949814, -0.00012854052861297006],\n (0.999, 10.0): [-0.18857328935948367, -0.026316705703161091, 0.0035897350868809275, -0.00011572282691335702],\n (0.999, 11.0): [-0.18207431428535406, -0.024201081944369412, 0.0031647372098056077, -8.1145935982296439e-05],\n (0.999, 12.0): [-0.17796358148991101, -0.021054306118620879, 0.0023968085939602055, -1.5907156771296993e-05],\n (0.999, 13.0): [-0.17371965962745489, -0.019577162950177709, 0.0022391783473999739, -2.0613023472812558e-05],\n (0.999, 14.0): [-0.16905298116759873, -0.01967115985443986, 0.0026495208325889269, -9.1074275220634073e-05],\n (0.999, 15.0): [-0.16635662558214312, -0.017903767183469876, 0.0022301322677100496, -5.1956773935885426e-05],\n (0.999, 16.0): [-0.16388776549525449, -0.016671918839902419, 0.0020365289602744382, -4.3592447599724942e-05],\n (0.999, 17.0): [-0.16131934177990759, -0.015998918405126326, 0.0019990454743285904, -4.8176277491327653e-05],\n (0.999, 18.0): [-0.15880633110376571, -0.015830715141055916, 0.0021688405343832091, -8.061825248932771e-05],\n (0.999, 19.0): [-0.15644841913314136, -0.015729364721105681, 0.0022981443610378136, -0.00010093672643417343],\n (0.999, 20.0): [-0.15516596606222705, -0.014725095968258637, 0.0021117117014292155, -8.8806880297328484e-05],\n (0.999, 24.0): [-0.14997437768645827, -0.012755323295476786, 0.0018871651510496939, -8.0896370662414938e-05],\n (0.999, 30.0): [-0.14459974882323703, -0.011247323832877647, 0.0018637400643826279, -9.6415323191606741e-05],\n (0.999, 40.0): [-0.13933285919392555, -0.0097151769692496587, 0.0018131251876208683, -0.00010452598991994023],\n (0.999, 60.0): [-0.13424555343804143, -0.0082163027951669444, 0.0017883427892173382, -0.00011415865110808405],\n (0.999, 120.0): [-0.12896119523040372, -0.0070426701112581112, 0.0018472364154226955, -0.00012862202979478294],\n (0.999, inf): [-0.12397213562666673, -0.0056901201604149998, 0.0018260689406957129, -0.00013263452567995485]}\n\n# p values that are defined in the A table\np_keys = [.1,.5,.675,.75,.8,.85,.9,.95,.975,.99,.995,.999]\n\n# v values that are defined in the A table\nv_keys = lrange(2, 21) + [24, 30, 40, 60, 120, inf]\n\ndef _isfloat(x):\n \"\"\"\n returns True if x is a float,\n returns False otherwise\n \"\"\"\n try:\n float(x)\n except:\n return False\n\n return True\n\n##def _phi(p):\n## \"\"\"returns the pth quantile inverse norm\"\"\"\n## return scipy.stats.norm.isf(p)\n\ndef _phi( p ):\n # this function is faster than using scipy.stats.norm.isf(p)\n # but the permissity of the license isn't explicitly listed.\n # using scipy.stats.norm.isf(p) is an acceptable alternative\n\n \"\"\"\n Modified from the author's original perl code (original comments follow below)\n by [email protected]. May 3, 2004.\n\n Lower tail quantile for standard normal distribution function.\n\n This function returns an approximation of the inverse cumulative\n standard normal distribution function. I.e., given P, it returns\n an approximation to the X satisfying P = Pr{Z <= X} where Z is a\n random variable from the standard normal distribution.\n\n The algorithm uses a minimax approximation by rational functions\n and the result has a relative error whose absolute value is less\n than 1.15e-9.\n\n Author: Peter John Acklam\n Time-stamp: 2000-07-19 18:26:14\n E-mail: [email protected]\n WWW URL: http://home.online.no/~pjacklam\n \"\"\"\n\n if p <= 0 or p >= 1:\n # The original perl code exits here, we'll throw an exception instead\n raise ValueError( \"Argument to ltqnorm %f must be in open interval (0,1)\" % p )\n\n # Coefficients in rational approximations.\n a = (-3.969683028665376e+01, 2.209460984245205e+02, \\\n -2.759285104469687e+02, 1.383577518672690e+02, \\\n -3.066479806614716e+01, 2.506628277459239e+00)\n b = (-5.447609879822406e+01, 1.615858368580409e+02, \\\n -1.556989798598866e+02, 6.680131188771972e+01, \\\n -1.328068155288572e+01 )\n c = (-7.784894002430293e-03, -3.223964580411365e-01, \\\n -2.400758277161838e+00, -2.549732539343734e+00, \\\n 4.374664141464968e+00, 2.938163982698783e+00)\n d = ( 7.784695709041462e-03, 3.224671290700398e-01, \\\n 2.445134137142996e+00, 3.754408661907416e+00)\n\n # Define break-points.\n plow = 0.02425\n phigh = 1 - plow\n\n # Rational approximation for lower region:\n if p < plow:\n q = math.sqrt(-2*math.log(p))\n return -(((((c[0]*q+c[1])*q+c[2])*q+c[3])*q+c[4])*q+c[5]) / \\\n ((((d[0]*q+d[1])*q+d[2])*q+d[3])*q+1)\n\n # Rational approximation for upper region:\n if phigh < p:\n q = math.sqrt(-2*math.log(1-p))\n return (((((c[0]*q+c[1])*q+c[2])*q+c[3])*q+c[4])*q+c[5]) / \\\n ((((d[0]*q+d[1])*q+d[2])*q+d[3])*q+1)\n\n # Rational approximation for central region:\n q = p - 0.5\n r = q*q\n return -(((((a[0]*r+a[1])*r+a[2])*r+a[3])*r+a[4])*r+a[5])*q / \\\n (((((b[0]*r+b[1])*r+b[2])*r+b[3])*r+b[4])*r+1)\n\ndef _ptransform(p):\n \"\"\"function for p-value abcissa transformation\"\"\"\n return -1. / (1. + 1.5 * _phi((1. + p)/2.))\n\ndef _func(a, p, r, v):\n \"\"\"\n calculates f-hat for the coefficients in a, probability p,\n sample mean difference r, and degrees of freedom v.\n \"\"\"\n # eq. 2.3\n f = a[0]*math.log(r-1.) + \\\n a[1]*math.log(r-1.)**2 + \\\n a[2]*math.log(r-1.)**3 + \\\n a[3]*math.log(r-1.)**4\n\n # eq. 2.7 and 2.8 corrections\n if r == 3:\n f += -0.002 / (1. + 12. * _phi(p)**2)\n\n if v <= 4.364:\n f += 1./517. - 1./(312.*(v,1e38)[np.isinf(v)])\n else:\n f += 1./(191.*(v,1e38)[np.isinf(v)])\n\n return -f\n\ndef _select_ps(p):\n # There are more generic ways of doing this but profiling\n # revealed that selecting these points is one of the slow\n # things that is easy to change. This is about 11 times\n # faster than the generic algorithm it is replacing.\n #\n # it is possible that different break points could yield\n # better estimates, but the function this is refactoring\n # just used linear distance.\n \"\"\"returns the points to use for interpolating p\"\"\"\n if p >= .99:\n return .990, .995, .999\n elif p >= .975:\n return .975, .990, .995\n elif p >= .95:\n return .950, .975, .990\n elif p >= .9125:\n return .900, .950, .975\n elif p >= .875:\n return .850, .900, .950\n elif p >= .825:\n return .800, .850, .900\n elif p >= .7625:\n return .750, .800, .850\n elif p >= .675:\n return .675, .750, .800\n elif p >= .500:\n return .500, .675, .750\n else:\n return .100, .500, .675\n\ndef _interpolate_p(p, r, v):\n \"\"\"\n interpolates p based on the values in the A table for the\n scalar value of r and the scalar value of v\n \"\"\"\n\n # interpolate p (v should be in table)\n # if .5 < p < .75 use linear interpolation in q\n # if p > .75 use quadratic interpolation in log(y + r/v)\n # by -1. / (1. + 1.5 * _phi((1. + p)/2.))\n\n # find the 3 closest v values\n p0, p1, p2 = _select_ps(p)\n try:\n y0 = _func(A[(p0, v)], p0, r, v) + 1.\n except:\n print(p,r,v)\n y1 = _func(A[(p1, v)], p1, r, v) + 1.\n y2 = _func(A[(p2, v)], p2, r, v) + 1.\n\n y_log0 = math.log(y0 + float(r)/float(v))\n y_log1 = math.log(y1 + float(r)/float(v))\n y_log2 = math.log(y2 + float(r)/float(v))\n\n # If p < .85 apply only the ordinate transformation\n # if p > .85 apply the ordinate and the abcissa transformation\n # In both cases apply quadratic interpolation\n if p > .85:\n p_t = _ptransform(p)\n p0_t = _ptransform(p0)\n p1_t = _ptransform(p1)\n p2_t = _ptransform(p2)\n\n # calculate derivatives for quadratic interpolation\n d2 = 2*((y_log2-y_log1)/(p2_t-p1_t) - \\\n (y_log1-y_log0)/(p1_t-p0_t))/(p2_t-p0_t)\n if (p2+p0)>=(p1+p1):\n d1 = (y_log2-y_log1)/(p2_t-p1_t) - 0.5*d2*(p2_t-p1_t)\n else:\n d1 = (y_log1-y_log0)/(p1_t-p0_t) + 0.5*d2*(p1_t-p0_t)\n d0 = y_log1\n\n # interpolate value\n y_log = (d2/2.) * (p_t-p1_t)**2. + d1 * (p_t-p1_t) + d0\n\n # transform back to y\n y = math.exp(y_log) - float(r)/float(v)\n\n elif p > .5:\n # calculate derivatives for quadratic interpolation\n d2 = 2*((y_log2-y_log1)/(p2-p1) - \\\n (y_log1-y_log0)/(p1-p0))/(p2-p0)\n if (p2+p0)>=(p1+p1):\n d1 = (y_log2-y_log1)/(p2-p1) - 0.5*d2*(p2-p1)\n else:\n d1 = (y_log1-y_log0)/(p1-p0) + 0.5*d2*(p1-p0)\n d0 = y_log1\n\n # interpolate values\n y_log = (d2/2.) * (p-p1)**2. + d1 * (p-p1) + d0\n\n # transform back to y\n y = math.exp(y_log) - float(r)/float(v)\n\n else:\n # linear interpolation in q and p\n q0 = math.sqrt(2) * -y0 * \\\n scipy.stats.t.isf((1.+p0)/2., (v,1e38)[v>1e38])\n q1 = math.sqrt(2) * -y1 * \\\n scipy.stats.t.isf((1.+p1)/2., (v,1e38)[v>1e38])\n\n d1 = (q1-q0)/(p1-p0)\n d0 = q0\n\n # interpolate values\n q = d1 * (p-p0) + d0\n\n # transform back to y\n y = -q / (math.sqrt(2) * \\\n scipy.stats.t.isf((1.+p)/2., (v,1e38)[v>1e38]))\n\n return y\n\ndef _select_vs(v, p):\n # This one is is about 30 times faster than\n # the generic algorithm it is replacing.\n \"\"\"returns the points to use for interpolating v\"\"\"\n\n if v >= 120.:\n return 60, 120, inf\n elif v >= 60.:\n return 40, 60, 120\n elif v >= 40.:\n return 30, 40, 60\n elif v >= 30.:\n return 24, 30, 40\n elif v >= 24.:\n return 20, 24, 30\n elif v >= 19.5:\n return 19, 20, 24\n\n if p >= .9:\n if v < 2.5:\n return 1, 2, 3\n else:\n if v < 3.5:\n return 2, 3, 4\n\n vi = int(round(v))\n return vi - 1, vi, vi + 1\n\ndef _interpolate_v(p, r, v):\n\n \"\"\"\n interpolates v based on the values in the A table for the\n scalar value of r and th\n \"\"\"\n # interpolate v (p should be in table)\n # ordinate: y**2\n # abcissa: 1./v\n\n # find the 3 closest v values\n # only p >= .9 have table values for 1 degree of freedom.\n # The boolean is used to index the tuple and append 1 when\n # p >= .9\n v0, v1, v2 = _select_vs(v, p)\n\n # y = f - 1.\n y0_sq = (_func(A[(p,v0)], p, r, v0) + 1.)**2.\n y1_sq = (_func(A[(p,v1)], p, r, v1) + 1.)**2.\n y2_sq = (_func(A[(p,v2)], p, r, v2) + 1.)**2.\n\n # if v2 is inf set to a big number so interpolation\n # calculations will work\n if v2 > 1e38: v2 = 1e38\n\n # transform v\n v_, v0_, v1_, v2_ = 1./v, 1./v0, 1./v1, 1./v2\n\n # calculate derivatives for quadratic interpolation\n d2 = 2.*((y2_sq-y1_sq)/(v2_-v1_) - \\\n (y0_sq-y1_sq)/(v0_-v1_)) / (v2_-v0_)\n if (v2_ + v0_) >= (v1_ + v1_):\n d1 = (y2_sq-y1_sq) / (v2_-v1_) - 0.5*d2*(v2_-v1_)\n else:\n d1 = (y1_sq-y0_sq) / (v1_-v0_) + 0.5*d2*(v1_-v0_)\n d0 = y1_sq\n\n # calculate y\n y = math.sqrt((d2/2.)*(v_-v1_)**2. + d1*(v_-v1_)+ d0)\n\n return y\n\ndef _qsturng(p, r, v):\n \"\"\"scalar version of qsturng\"\"\"\n## print 'q',p\n # r is interpolated through the q to y here we only need to\n # account for when p and/or v are not found in the table.\n global A, p_keys, v_keys\n\n if p < .1 or p > .999:\n raise ValueError('p must be between .1 and .999')\n\n if p < .9:\n if v < 2:\n raise ValueError('v must be > 2 when p < .9')\n else:\n if v < 1:\n raise ValueError('v must be > 1 when p >= .9')\n\n # The easy case. A tabled value is requested.\n\n #numpy 1.4.1: TypeError: unhashable type: 'numpy.ndarray' :\n p = float(p)\n if isinstance(v, np.ndarray):\n v = v.item()\n if (p,v) in A:\n y = _func(A[(p,v)], p, r, v) + 1.\n\n elif p not in p_keys and v not in v_keys+([],[1])[p>=.90]:\n # apply bilinear (quadratic) interpolation\n #\n # p0,v2 + o + p1,v2 + p2,v2\n # r2\n #\n # 1\n # - (p,v)\n # v x\n #\n # r1\n # p0,v1 + o + p1,v1 + p2,v1\n #\n #\n # p0,v0 + o r0 + p1,v0 + p2,v0\n #\n # _ptransform(p)\n #\n # (p1 and v1 may be below or above (p,v). The algorithm\n # works in both cases. For diagramatic simplicity it is\n # shown as above)\n #\n # 1. at v0, v1, and v2 use quadratic interpolation\n # to find r0, r1, r2\n #\n # 2. use r0, r1, r2 and quadratic interpolaiton\n # to find y and (p,v)\n\n # find the 3 closest v values\n v0, v1, v2 = _select_vs(v, p)\n\n # find the 3 closest p values\n p0, p1, p2 = _select_ps(p)\n\n # calculate r0, r1, and r2\n r0_sq = _interpolate_p(p, r, v0)**2\n r1_sq = _interpolate_p(p, r, v1)**2\n r2_sq = _interpolate_p(p, r, v2)**2\n\n # transform v\n v_, v0_, v1_, v2_ = 1./v, 1./v0, 1./v1, 1./v2\n\n # calculate derivatives for quadratic interpolation\n d2 = 2.*((r2_sq-r1_sq)/(v2_-v1_) - \\\n (r0_sq-r1_sq)/(v0_-v1_)) / (v2_-v0_)\n if (v2_ + v0_) >= (v1_ + v1_):\n d1 = (r2_sq-r1_sq) / (v2_-v1_) - 0.5*d2*(v2_-v1_)\n else:\n d1 = (r1_sq-r0_sq) / (v1_-v0_) + 0.5*d2*(v1_-v0_)\n d0 = r1_sq\n\n # calculate y\n y = math.sqrt((d2/2.)*(v_-v1_)**2. + d1*(v_-v1_)+ d0)\n\n elif v not in v_keys+([],[1])[p>=.90]:\n y = _interpolate_v(p, r, v)\n\n elif p not in p_keys:\n y = _interpolate_p(p, r, v)\n\n return math.sqrt(2) * -y * \\\n scipy.stats.t.isf((1.+p)/2., (v,1e38)[v>1e38])\n\n# make a qsturng functinon that will accept list-like objects\n_vqsturng = np.vectorize(_qsturng)\n_vqsturng.__doc__ = \"\"\"vector version of qsturng\"\"\"\n\ndef qsturng(p, r, v):\n \"\"\"Approximates the quantile p for a studentized range\n distribution having v degrees of freedom and r samples\n for probability p.\n\n Parameters\n ----------\n p : (scalar, array_like)\n The cumulative probability value\n p >= .1 and p <=.999\n (values under .5 are not recommended)\n r : (scalar, array_like)\n The number of samples\n r >= 2 and r <= 200\n (values over 200 are permitted but not recommended)\n v : (scalar, array_like)\n The sample degrees of freedom\n if p >= .9:\n v >=1 and v >= inf\n else:\n v >=2 and v >= inf\n\n Returns\n -------\n q : (scalar, array_like)\n approximation of the Studentized Range\n\n \"\"\"\n\n if all(map(_isfloat, [p, r, v])):\n return _qsturng(p, r, v)\n return _vqsturng(p, r, v)\n\n##def _qsturng0(p, r, v):\n#### print 'q0',p\n## \"\"\"\n## returns a first order approximation of q studentized range\n## value. Based on Lund and Lund's 1983 based on the FORTRAN77\n## algorithm AS 190.2 Appl. Statist. (1983).\n## \"\"\"\n## vmax = 120.\n## c = [0.8843, 0.2368, 1.214, 1.208, 1.4142]\n##\n## t = -_phi(.5+.5*p)\n## if (v < vmax):\n## t += (t**3. + t) / float(v) / 4.\n##\n## q = c[0] - c[1] * t\n## if (v < vmax):\n## q = q - c[2] / float(v) + c[3] * t / float(v)\n## q = t * (q * math.log(r - 1.) + c[4])\n##\n## # apply \"bar napkin\" correction for when p < .85\n## # this is good enough for our intended purpose\n## if p < .85:\n## q += math.log10(r) * 2.25 * (.85-p)\n## return q\n\ndef _psturng(q, r, v):\n \"\"\"scalar version of psturng\"\"\"\n if q < 0.:\n raise ValueError('q should be >= 0')\n\n opt_func = lambda p, r, v : abs(_qsturng(p, r, v) - q)\n\n if v == 1:\n if q < _qsturng(.9, r, 1):\n return .1\n elif q > _qsturng(.999, r, 1):\n return .001\n return 1. - fminbound(opt_func, .9, .999, args=(r,v))\n else:\n if q < _qsturng(.1, r, v):\n return .9\n elif q > _qsturng(.999, r, v):\n return .001\n return 1. - fminbound(opt_func, .1, .999, args=(r,v))\n\n_vpsturng = np.vectorize(_psturng)\n_vpsturng.__doc__ = \"\"\"vector version of psturng\"\"\"\n\ndef psturng(q, r, v):\n \"\"\"Evaluates the probability from 0 to q for a studentized\n range having v degrees of freedom and r samples.\n\n Parameters\n ----------\n q : (scalar, array_like)\n quantile value of Studentized Range\n q >= 0.\n r : (scalar, array_like)\n The number of samples\n r >= 2 and r <= 200\n (values over 200 are permitted but not recommended)\n v : (scalar, array_like)\n The sample degrees of freedom\n if p >= .9:\n v >=1 and v >= inf\n else:\n v >=2 and v >= inf\n\n Returns\n -------\n p : (scalar, array_like)\n 1. - area from zero to q under the Studentized Range\n distribution. When v == 1, p is bound between .001\n and .1, when v > 1, p is bound between .001 and .9.\n Values between .5 and .9 are 1st order appoximations.\n\n \"\"\"\n if all(map(_isfloat, [q, r, v])):\n return _psturng(q, r, v)\n return _vpsturng(q, r, v)\n\n##p, r, v = .9, 10, 20\n##print\n##print 'p and v interpolation'\n##print '\\t20\\t22\\t24'\n##print '.75',qsturng(.75, r, 20),qsturng(.75, r, 22),qsturng(.75, r, 24)\n##print '.85',qsturng(.85, r, 20),qsturng(.85, r, 22),qsturng(.85, r, 24)\n##print '.90',qsturng(.90, r, 20),qsturng(.90, r, 22),qsturng(.90, r, 24)\n##print\n##print 'p and v interpolation'\n##print '\\t120\\t500\\tinf'\n##print '.950',qsturng(.95, r, 120),qsturng(.95, r, 500),qsturng(.95, r, inf)\n##print '.960',qsturng(.96, r, 120),qsturng(.96, r, 500),qsturng(.96, r, inf)\n##print '.975',qsturng(.975, r, 120),qsturng(.975, r, 500),qsturng(.975, r, inf)\n##print\n##print 'p and v interpolation'\n##print '\\t40\\t50\\t60'\n##print '.950',qsturng(.95, r, 40),qsturng(.95, r, 50),qsturng(.95, r, 60)\n##print '.960',qsturng(.96, r, 40),qsturng(.96, r, 50),qsturng(.96, r, 60)\n##print '.975',qsturng(.975, r, 40),qsturng(.975, r, 50),qsturng(.975, r, 60)\n##print\n##print 'p and v interpolation'\n##print '\\t20\\t22\\t24'\n##print '.50',qsturng(.5, r, 20),qsturng(.5, r, 22),qsturng(.5, r, 24)\n##print '.60',qsturng(.6, r, 20),qsturng(.6, r, 22),qsturng(.6, r, 24)\n##print '.75',qsturng(.75, r, 20),qsturng(.75, r, 22),qsturng(.75, r, 24)\n",
"from __future__ import division\n\nfrom numpy.testing import assert_almost_equal\nfrom statsmodels.emplike.originregress import ELOriginRegress\nfrom statsmodels.datasets import cancer\nfrom .results.el_results import OriginResults\nimport numpy as np\n\nclass GenRes(object):\n \"\"\"\n Loads data and creates class instance ot be tested.\n\n \"\"\"\n def __init__(self):\n data = cancer.load()\n self.res1 = ELOriginRegress(data.endog, data.exog).fit()\n self.res2 = OriginResults()\n\n\nclass TestOrigin(GenRes):\n \"\"\"\n See OriginResults for details on how tests were computed\n \"\"\"\n def __init__(self):\n super(TestOrigin, self).__init__()\n\n def test_params(self):\n assert_almost_equal(self.res1.params, self.res2.test_params, 4)\n\n def test_llf(self):\n assert_almost_equal(self.res1.llf_el, self.res2.test_llf_hat, 4)\n\n def test_hypothesis_beta1(self):\n assert_almost_equal(self.res1.el_test([.0034],[1])[0],\n self.res2.test_llf_hypoth,4)\n\n def test_ci_beta(self):\n ci = self.res1.conf_int_el(1)\n ll = ci[0]\n ul = ci[1]\n llf_low = np.sum(np.log(self.res1.el_test([ll],[1], return_weights=1)[2]))\n llf_high = np.sum(np.log(self.res1.el_test([ul],[1], return_weights=1)[2]))\n assert_almost_equal(llf_low, self.res2.test_llf_conf, 4)\n assert_almost_equal(llf_high, self.res2.test_llf_conf, 4)\n"
] | [
[
"numpy.array"
],
[
"numpy.asarray",
"numpy.hstack"
],
[
"numpy.vectorize",
"scipy.optimize.fminbound",
"numpy.isinf"
],
[
"numpy.testing.assert_almost_equal"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
LingxiaoShawn/pytorch_geometric | [
"50b7bfc4a59b5b6f7ec547ff862985f3b2e22798",
"50b7bfc4a59b5b6f7ec547ff862985f3b2e22798",
"50b7bfc4a59b5b6f7ec547ff862985f3b2e22798",
"50b7bfc4a59b5b6f7ec547ff862985f3b2e22798",
"50b7bfc4a59b5b6f7ec547ff862985f3b2e22798",
"50b7bfc4a59b5b6f7ec547ff862985f3b2e22798"
] | [
"test/nn/conv/test_film_conv.py",
"torch_geometric/transforms/remove_isolated_nodes.py",
"test/utils/test_subgraph.py",
"test/data/test_hetero_data.py",
"test/nn/conv/test_hgt_conv.py",
"test/nn/conv/test_edge_conv.py"
] | [
"import torch\nfrom torch_sparse import SparseTensor\n\nfrom torch_geometric.nn import FiLMConv\n\n\ndef test_film_conv():\n x1 = torch.randn(4, 4)\n x2 = torch.randn(2, 16)\n edge_index = torch.tensor([[0, 1, 1, 2, 2, 3], [0, 0, 1, 0, 1, 1]])\n edge_type = torch.tensor([0, 1, 1, 0, 0, 1])\n row, col = edge_index\n adj = SparseTensor(row=row, col=col, value=edge_type, sparse_sizes=(4, 4))\n\n conv = FiLMConv(4, 32)\n assert conv.__repr__() == 'FiLMConv(4, 32, num_relations=1)'\n out1 = conv(x1, edge_index)\n assert out1.size() == (4, 32)\n assert conv(x1, adj.t().set_value(None)).tolist() == out1.tolist()\n\n t = '(Tensor, Tensor, OptTensor) -> Tensor'\n jit = torch.jit.script(conv.jittable(t))\n assert jit(x1, edge_index).tolist() == out1.tolist()\n t = '(Tensor, SparseTensor, OptTensor) -> Tensor'\n jit = torch.jit.script(conv.jittable(t))\n assert jit(x1, adj.t().set_value(None)).tolist() == out1.tolist()\n\n conv = FiLMConv(4, 32, num_relations=2)\n assert conv.__repr__() == 'FiLMConv(4, 32, num_relations=2)'\n out1 = conv(x1, edge_index, edge_type)\n assert out1.size() == (4, 32)\n assert conv(x1, adj.t()).tolist() == out1.tolist()\n\n t = '(Tensor, Tensor, OptTensor) -> Tensor'\n jit = torch.jit.script(conv.jittable(t))\n assert jit(x1, edge_index, edge_type).tolist() == out1.tolist()\n t = '(Tensor, SparseTensor, OptTensor) -> Tensor'\n jit = torch.jit.script(conv.jittable(t))\n assert jit(x1, adj.t()).tolist() == out1.tolist()\n\n adj = adj.sparse_resize((4, 2))\n\n conv = FiLMConv((4, 16), 32)\n assert conv.__repr__() == 'FiLMConv((4, 16), 32, num_relations=1)'\n out1 = conv((x1, x2), edge_index)\n assert out1.size() == (2, 32)\n assert conv((x1, x2), adj.t().set_value(None)).tolist() == out1.tolist()\n\n t = '(PairTensor, Tensor, OptTensor) -> Tensor'\n jit = torch.jit.script(conv.jittable(t))\n assert jit((x1, x2), edge_index).tolist() == out1.tolist()\n t = '(PairTensor, SparseTensor, OptTensor) -> Tensor'\n jit = torch.jit.script(conv.jittable(t))\n assert jit((x1, x2), adj.t().set_value(None)).tolist() == out1.tolist()\n\n conv = FiLMConv((4, 16), 32, num_relations=2)\n assert conv.__repr__() == 'FiLMConv((4, 16), 32, num_relations=2)'\n out1 = conv((x1, x2), edge_index, edge_type)\n assert out1.size() == (2, 32)\n assert conv((x1, x2), adj.t()).tolist() == out1.tolist()\n\n t = '(PairTensor, Tensor, OptTensor) -> Tensor'\n jit = torch.jit.script(conv.jittable(t))\n assert jit((x1, x2), edge_index, edge_type).tolist() == out1.tolist()\n t = '(PairTensor, SparseTensor, OptTensor) -> Tensor'\n jit = torch.jit.script(conv.jittable(t))\n assert jit((x1, x2), adj.t()).tolist() == out1.tolist()\n",
"import re\n\nimport torch\n\nfrom torch_geometric.transforms import BaseTransform\nfrom torch_geometric.utils import remove_isolated_nodes\n\n\nclass RemoveIsolatedNodes(BaseTransform):\n r\"\"\"Removes isolated nodes from the graph.\"\"\"\n def __call__(self, data):\n num_nodes = data.num_nodes\n out = remove_isolated_nodes(data.edge_index, data.edge_attr, num_nodes)\n data.edge_index, data.edge_attr, mask = out\n\n if hasattr(data, '__num_nodes__'):\n data.num_nodes = int(mask.sum())\n\n for key, item in data:\n if bool(re.search('edge', key)):\n continue\n if torch.is_tensor(item) and item.size(0) == num_nodes:\n data[key] = item[mask]\n\n return data\n",
"import torch\n\nfrom torch_geometric.utils import k_hop_subgraph, subgraph\n\n\ndef test_subgraph():\n edge_index = torch.tensor([\n [0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6],\n [1, 0, 2, 1, 3, 2, 4, 3, 5, 4, 6, 5],\n ])\n edge_attr = torch.Tensor([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12])\n\n idx = torch.tensor([3, 4, 5], dtype=torch.long)\n mask = torch.tensor([0, 0, 0, 1, 1, 1, 0], dtype=torch.bool)\n indices = [3, 4, 5]\n\n for subset in [idx, mask, indices]:\n out = subgraph(subset, edge_index, edge_attr)\n assert out[0].tolist() == [[3, 4, 4, 5], [4, 3, 5, 4]]\n assert out[1].tolist() == [7, 8, 9, 10]\n\n out = subgraph(subset, edge_index, edge_attr, relabel_nodes=True)\n assert out[0].tolist() == [[0, 1, 1, 2], [1, 0, 2, 1]]\n assert out[1].tolist() == [7, 8, 9, 10]\n\n\ndef test_k_hop_subgraph():\n edge_index = torch.tensor([\n [0, 1, 2, 3, 4, 5],\n [2, 2, 4, 4, 6, 6],\n ])\n\n subset, edge_index, mapping, edge_mask = k_hop_subgraph(\n 6, 2, edge_index, relabel_nodes=True)\n assert subset.tolist() == [2, 3, 4, 5, 6]\n assert edge_index.tolist() == [[0, 1, 2, 3], [2, 2, 4, 4]]\n assert mapping.tolist() == [4]\n assert edge_mask.tolist() == [False, False, True, True, True, True]\n\n edge_index = torch.tensor([\n [1, 2, 4, 5],\n [0, 1, 5, 6],\n ])\n\n subset, edge_index, mapping, edge_mask = k_hop_subgraph([0, 6], 2,\n edge_index,\n relabel_nodes=True)\n\n assert subset.tolist() == [0, 1, 2, 4, 5, 6]\n assert edge_index.tolist() == [[1, 2, 3, 4], [0, 1, 4, 5]]\n assert mapping.tolist() == [0, 5]\n assert edge_mask.tolist() == [True, True, True, True]\n",
"import copy\n\nimport torch\n\nfrom torch_geometric.data import HeteroData\n\nx_paper = torch.randn(10, 16)\nx_author = torch.randn(5, 32)\n\nidx_paper = torch.randint(x_paper.size(0), (100, ), dtype=torch.long)\nidx_author = torch.randint(x_author.size(0), (100, ), dtype=torch.long)\n\nedge_index_paper_paper = torch.stack([idx_paper[:50], idx_paper[:50]], dim=0)\nedge_index_paper_author = torch.stack([idx_paper[:30], idx_author[:30]], dim=0)\nedge_index_author_paper = torch.stack([idx_paper[:30], idx_author[:30]], dim=0)\n\n\ndef get_edge_index(num_src_nodes, num_dst_nodes, num_edges):\n row = torch.randint(num_src_nodes, (num_edges, ), dtype=torch.long)\n col = torch.randint(num_dst_nodes, (num_edges, ), dtype=torch.long)\n return torch.stack([row, col], dim=0)\n\n\ndef test_init_hetero_data():\n data = HeteroData()\n data['v1'].x = 1\n data['paper'].x = x_paper\n data['author'].x = x_author\n data['paper', 'paper'].edge_index = edge_index_paper_paper\n data['paper', 'author'].edge_index = edge_index_paper_author\n data['author', 'paper'].edge_index = edge_index_author_paper\n assert len(data) == 2\n assert len(data.edge_types) == 3\n assert data.node_types == ['v1', 'paper', 'author']\n\n data = HeteroData(\n v1={'x': 1},\n paper={'x': x_paper},\n author={'x': x_author},\n paper__paper={'edge_index': edge_index_paper_paper},\n paper__author={'edge_index': edge_index_paper_author},\n author__paper={'edge_index': edge_index_author_paper},\n )\n assert len(data) == 2\n assert len(data.edge_types) == 3\n assert data.node_types == ['v1', 'paper', 'author']\n\n data = HeteroData({\n 'v1': {\n 'x': 1\n },\n 'paper': {\n 'x': x_paper\n },\n 'author': {\n 'x': x_author\n },\n ('paper', 'paper'): {\n 'edge_index': edge_index_paper_paper\n },\n ('paper', 'author'): {\n 'edge_index': edge_index_paper_author\n },\n ('author', 'paper'): {\n 'edge_index': edge_index_author_paper\n },\n })\n assert len(data) == 2\n assert len(data.edge_types) == 3\n assert data.node_types == ['v1', 'paper', 'author']\n\n\ndef test_hetero_data_functions():\n data = HeteroData()\n data['paper'].x = x_paper\n data['author'].x = x_author\n data['paper', 'paper'].edge_index = edge_index_paper_paper\n data['paper', 'author'].edge_index = edge_index_paper_author\n data['author', 'paper'].edge_index = edge_index_author_paper\n assert len(data) == 2\n assert sorted(data.keys) == ['edge_index', 'x']\n assert 'x' in data and 'edge_index' in data\n assert data.num_nodes == 15\n assert data.num_edges == 110\n\n node_types, edge_types = data.metadata()\n assert node_types == ['paper', 'author']\n assert edge_types == [\n ('paper', 'to', 'paper'),\n ('paper', 'to', 'author'),\n ('author', 'to', 'paper'),\n ]\n\n x_dict = data.collect('x')\n assert len(x_dict) == 2\n assert x_dict['paper'].tolist() == x_paper.tolist()\n assert x_dict['author'].tolist() == x_author.tolist()\n assert x_dict == data.x_dict\n\n data.y = 0\n assert data['y'] == 0 and data.y == 0\n assert len(data) == 3\n assert sorted(data.keys) == ['edge_index', 'x', 'y']\n\n del data['paper', 'author']\n node_types, edge_types = data.metadata()\n assert node_types == ['paper', 'author']\n assert edge_types == [('paper', 'to', 'paper'), ('author', 'to', 'paper')]\n\n assert len(data.to_dict()) == 5\n assert len(data.to_namedtuple()) == 5\n assert data.to_namedtuple().y == 0\n assert len(data.to_namedtuple().paper) == 1\n\n\ndef test_copy_hetero_data():\n data = HeteroData()\n data['paper'].x = x_paper\n data['paper', 'to', 'paper'].edge_index = edge_index_paper_paper\n\n out = copy.copy(data)\n assert id(data) != id(out)\n assert len(data.stores) == len(out.stores)\n for store1, store2 in zip(data.stores, out.stores):\n assert id(store1) != id(store2)\n assert id(data) == id(store1._parent())\n assert id(out) == id(store2._parent())\n assert out['paper']._key == 'paper'\n assert data['paper'].x.data_ptr() == out['paper'].x.data_ptr()\n assert out['to']._key == ('paper', 'to', 'paper')\n assert data['to'].edge_index.data_ptr() == out['to'].edge_index.data_ptr()\n\n out = copy.deepcopy(data)\n assert id(data) != id(out)\n assert len(data.stores) == len(out.stores)\n for store1, store2 in zip(data.stores, out.stores):\n assert id(store1) != id(store2)\n assert id(out) == id(out['paper']._parent())\n assert out['paper']._key == 'paper'\n assert data['paper'].x.data_ptr() != out['paper'].x.data_ptr()\n assert data['paper'].x.tolist() == out['paper'].x.tolist()\n assert id(out) == id(out['to']._parent())\n assert out['to']._key == ('paper', 'to', 'paper')\n assert data['to'].edge_index.data_ptr() != out['to'].edge_index.data_ptr()\n assert data['to'].edge_index.tolist() == out['to'].edge_index.tolist()\n\n\ndef test_to_homogeneous_and_vice_versa():\n data = HeteroData()\n\n data['paper'].x = torch.randn(100, 128)\n data['author'].x = torch.randn(200, 128)\n\n data['paper', 'paper'].edge_index = get_edge_index(100, 100, 250)\n data['paper', 'paper'].edge_weight = torch.randn(250, )\n data['paper', 'paper'].edge_attr = torch.randn(250, 64)\n\n data['paper', 'author'].edge_index = get_edge_index(100, 200, 500)\n data['paper', 'author'].edge_weight = torch.randn(500, )\n data['paper', 'author'].edge_attr = torch.randn(500, 64)\n\n data['author', 'paper'].edge_index = get_edge_index(200, 100, 1000)\n data['author', 'paper'].edge_weight = torch.randn(1000, )\n data['author', 'paper'].edge_attr = torch.randn(1000, 64)\n\n out = data.to_homogeneous()\n assert len(out) == 6\n assert out.num_nodes == 300\n assert out.num_edges == 1750\n assert out.num_node_features == 128\n assert out.num_edge_features == 64\n assert out.node_type.size() == (300, )\n assert out.node_type.min() == 0\n assert out.node_type.max() == 1\n assert out.edge_type.size() == (1750, )\n assert out.edge_type.min() == 0\n assert out.edge_type.max() == 2\n assert len(out._node_type_names) == 2\n assert len(out._edge_type_names) == 3\n\n out = out.to_heterogeneous()\n assert len(out) == 4\n assert torch.allclose(data['paper'].x, out['paper'].x)\n assert torch.allclose(data['author'].x, out['author'].x)\n\n edge_index1 = data['paper', 'paper'].edge_index\n edge_index2 = out['paper', 'paper'].edge_index\n assert edge_index1.tolist() == edge_index2.tolist()\n assert torch.allclose(\n data['paper', 'paper'].edge_weight,\n out['paper', 'paper'].edge_weight,\n )\n assert torch.allclose(\n data['paper', 'paper'].edge_attr,\n out['paper', 'paper'].edge_attr,\n )\n\n edge_index1 = data['paper', 'author'].edge_index\n edge_index2 = out['paper', 'author'].edge_index\n assert edge_index1.tolist() == edge_index2.tolist()\n assert torch.allclose(\n data['paper', 'author'].edge_weight,\n out['paper', 'author'].edge_weight,\n )\n assert torch.allclose(\n data['paper', 'author'].edge_attr,\n out['paper', 'author'].edge_attr,\n )\n\n edge_index1 = data['author', 'paper'].edge_index\n edge_index2 = out['author', 'paper'].edge_index\n assert edge_index1.tolist() == edge_index2.tolist()\n assert torch.allclose(\n data['author', 'paper'].edge_weight,\n out['author', 'paper'].edge_weight,\n )\n assert torch.allclose(\n data['author', 'paper'].edge_attr,\n out['author', 'paper'].edge_attr,\n )\n\n out = data.to_homogeneous()\n node_type = out.node_type\n edge_type = out.edge_type\n del out.node_type\n del out.edge_type\n del out._edge_type_names\n del out._node_type_names\n out = out.to_heterogeneous(node_type, edge_type)\n assert len(out) == 4\n assert torch.allclose(data['paper'].x, out['0'].x)\n assert torch.allclose(data['author'].x, out['1'].x)\n\n edge_index1 = data['paper', 'paper'].edge_index\n edge_index2 = out['0', '0'].edge_index\n assert edge_index1.tolist() == edge_index2.tolist()\n assert torch.allclose(\n data['paper', 'paper'].edge_weight,\n out['0', '0'].edge_weight,\n )\n assert torch.allclose(\n data['paper', 'paper'].edge_attr,\n out['0', '0'].edge_attr,\n )\n\n edge_index1 = data['paper', 'author'].edge_index\n edge_index2 = out['0', '1'].edge_index\n assert edge_index1.tolist() == edge_index2.tolist()\n assert torch.allclose(\n data['paper', 'author'].edge_weight,\n out['0', '1'].edge_weight,\n )\n assert torch.allclose(\n data['paper', 'author'].edge_attr,\n out['0', '1'].edge_attr,\n )\n\n edge_index1 = data['author', 'paper'].edge_index\n edge_index2 = out['1', '0'].edge_index\n assert edge_index1.tolist() == edge_index2.tolist()\n assert torch.allclose(\n data['author', 'paper'].edge_weight,\n out['1', '0'].edge_weight,\n )\n assert torch.allclose(\n data['author', 'paper'].edge_attr,\n out['1', '0'].edge_attr,\n )\n\n data = HeteroData()\n\n data['paper'].num_nodes = 100\n data['author'].num_nodes = 200\n\n out = data.to_homogeneous(add_node_type=False)\n assert len(out) == 1\n assert out.num_nodes == 300\n\n out = data.to_homogeneous().to_heterogeneous()\n assert len(out) == 1\n assert out['paper'].num_nodes == 100\n assert out['author'].num_nodes == 200\n",
"import torch\nfrom torch_sparse import SparseTensor\n\nfrom torch_geometric.nn import HGTConv\n\n\ndef test_hgt_conv_same_dimensions():\n x_dict = {\n 'author': torch.randn(4, 16),\n 'paper': torch.randn(6, 16),\n }\n\n index1 = torch.randint(0, 4, (20, ), dtype=torch.long)\n index2 = torch.randint(0, 6, (20, ), dtype=torch.long)\n\n edge_index_dict = {\n ('author', 'writes', 'paper'): torch.stack([index1, index2]),\n ('paper', 'written_by', 'author'): torch.stack([index2, index1]),\n }\n\n adj_t_dict = {}\n for edge_type, edge_index in edge_index_dict.items():\n src_type, _, dst_type = edge_type\n adj_t_dict[edge_type] = SparseTensor(\n row=edge_index[0], col=edge_index[1],\n sparse_sizes=(x_dict[src_type].size(0),\n x_dict[dst_type].size(0))).t()\n\n metadata = (list(x_dict.keys()), list(edge_index_dict.keys()))\n\n conv = HGTConv(16, 16, metadata, heads=2)\n assert str(conv) == 'HGTConv(16, heads=2)'\n out_dict1 = conv(x_dict, edge_index_dict)\n assert len(out_dict1) == 2\n assert out_dict1['author'].size() == (4, 16)\n assert out_dict1['paper'].size() == (6, 16)\n out_dict2 = conv(x_dict, adj_t_dict)\n assert len(out_dict1) == len(out_dict2)\n for node_type in out_dict1.keys():\n assert torch.allclose(out_dict1[node_type], out_dict2[node_type],\n atol=1e-6)\n\n # TODO: Test JIT functionality. We need to wait on this one until PyTorch\n # allows indexing `ParameterDict` mappings :(\n\n\ndef test_hgt_conv_different_dimensions():\n x_dict = {\n 'author': torch.randn(4, 16),\n 'paper': torch.randn(6, 32),\n }\n\n index1 = torch.randint(0, 4, (20, ), dtype=torch.long)\n index2 = torch.randint(0, 6, (20, ), dtype=torch.long)\n\n edge_index_dict = {\n ('author', 'writes', 'paper'): torch.stack([index1, index2]),\n ('paper', 'written_by', 'author'): torch.stack([index2, index1]),\n }\n\n adj_t_dict = {}\n for edge_type, edge_index in edge_index_dict.items():\n src_type, _, dst_type = edge_type\n adj_t_dict[edge_type] = SparseTensor(\n row=edge_index[0], col=edge_index[1],\n sparse_sizes=(x_dict[src_type].size(0),\n x_dict[dst_type].size(0))).t()\n\n metadata = (list(x_dict.keys()), list(edge_index_dict.keys()))\n\n conv = HGTConv(in_channels={\n 'author': 16,\n 'paper': 32\n }, out_channels=32, metadata=metadata, heads=2)\n assert str(conv) == 'HGTConv(32, heads=2)'\n out_dict1 = conv(x_dict, edge_index_dict)\n assert len(out_dict1) == 2\n assert out_dict1['author'].size() == (4, 32)\n assert out_dict1['paper'].size() == (6, 32)\n out_dict2 = conv(x_dict, adj_t_dict)\n assert len(out_dict1) == len(out_dict2)\n for node_type in out_dict1.keys():\n assert torch.allclose(out_dict1[node_type], out_dict2[node_type],\n atol=1e-6)\n\n\ndef test_hgt_conv_lazy():\n x_dict = {\n 'author': torch.randn(4, 16),\n 'paper': torch.randn(6, 32),\n }\n\n index1 = torch.randint(0, 4, (20, ), dtype=torch.long)\n index2 = torch.randint(0, 6, (20, ), dtype=torch.long)\n\n edge_index_dict = {\n ('author', 'writes', 'paper'): torch.stack([index1, index2]),\n ('paper', 'written_by', 'author'): torch.stack([index2, index1]),\n }\n\n adj_t_dict = {}\n for edge_type, edge_index in edge_index_dict.items():\n src_type, _, dst_type = edge_type\n adj_t_dict[edge_type] = SparseTensor(\n row=edge_index[0], col=edge_index[1],\n sparse_sizes=(x_dict[src_type].size(0),\n x_dict[dst_type].size(0))).t()\n\n metadata = (list(x_dict.keys()), list(edge_index_dict.keys()))\n\n conv = HGTConv(-1, 32, metadata, heads=2)\n assert str(conv) == 'HGTConv(32, heads=2)'\n out_dict1 = conv(x_dict, edge_index_dict)\n assert len(out_dict1) == 2\n assert out_dict1['author'].size() == (4, 32)\n assert out_dict1['paper'].size() == (6, 32)\n out_dict2 = conv(x_dict, adj_t_dict)\n\n assert len(out_dict1) == len(out_dict2)\n for node_type in out_dict1.keys():\n assert torch.allclose(out_dict1[node_type], out_dict2[node_type],\n atol=1e-6)\n",
"import torch\nfrom torch.nn import Linear as Lin\nfrom torch.nn import ReLU\nfrom torch.nn import Sequential as Seq\nfrom torch_sparse import SparseTensor\n\nfrom torch_geometric.nn import DynamicEdgeConv, EdgeConv\n\n\ndef test_edge_conv_conv():\n x1 = torch.randn(4, 16)\n x2 = torch.randn(2, 16)\n edge_index = torch.tensor([[0, 1, 2, 3], [0, 0, 1, 1]])\n row, col = edge_index\n adj = SparseTensor(row=row, col=col, sparse_sizes=(4, 4))\n\n nn = Seq(Lin(32, 16), ReLU(), Lin(16, 32))\n conv = EdgeConv(nn)\n assert conv.__repr__() == (\n 'EdgeConv(nn=Sequential(\\n'\n ' (0): Linear(in_features=32, out_features=16, bias=True)\\n'\n ' (1): ReLU()\\n'\n ' (2): Linear(in_features=16, out_features=32, bias=True)\\n'\n '))')\n out1 = conv(x1, edge_index)\n assert out1.size() == (4, 32)\n assert conv((x1, x1), edge_index).tolist() == out1.tolist()\n assert conv(x1, adj.t()).tolist() == out1.tolist()\n assert conv((x1, x1), adj.t()).tolist() == out1.tolist()\n\n adj = adj.sparse_resize((4, 2))\n out2 = conv((x1, x2), edge_index)\n assert out2.size() == (2, 32)\n assert conv((x1, x2), adj.t()).tolist() == out2.tolist()\n\n t = '(Tensor, Tensor) -> Tensor'\n jit = torch.jit.script(conv.jittable(t))\n assert jit(x1, edge_index).tolist() == out1.tolist()\n\n t = '(PairTensor, Tensor) -> Tensor'\n jit = torch.jit.script(conv.jittable(t))\n assert jit((x1, x1), edge_index).tolist() == out1.tolist()\n assert jit((x1, x2), edge_index).tolist() == out2.tolist()\n\n adj = adj.sparse_resize((4, 4))\n t = '(Tensor, SparseTensor) -> Tensor'\n jit = torch.jit.script(conv.jittable(t))\n assert jit(x1, adj.t()).tolist() == out1.tolist()\n\n t = '(PairTensor, SparseTensor) -> Tensor'\n jit = torch.jit.script(conv.jittable(t))\n assert jit((x1, x1), adj.t()).tolist() == out1.tolist()\n adj = adj.sparse_resize((4, 2))\n assert jit((x1, x2), adj.t()).tolist() == out2.tolist()\n\n\ndef test_dynamic_edge_conv_conv():\n x1 = torch.randn(8, 16)\n x2 = torch.randn(4, 16)\n batch1 = torch.tensor([0, 0, 0, 0, 1, 1, 1, 1])\n batch2 = torch.tensor([0, 0, 1, 1])\n\n nn = Seq(Lin(32, 16), ReLU(), Lin(16, 32))\n conv = DynamicEdgeConv(nn, k=2)\n assert conv.__repr__() == (\n 'DynamicEdgeConv(nn=Sequential(\\n'\n ' (0): Linear(in_features=32, out_features=16, bias=True)\\n'\n ' (1): ReLU()\\n'\n ' (2): Linear(in_features=16, out_features=32, bias=True)\\n'\n '), k=2)')\n out11 = conv(x1)\n assert out11.size() == (8, 32)\n\n out12 = conv(x1, batch1)\n assert out12.size() == (8, 32)\n\n out21 = conv((x1, x2))\n assert out21.size() == (4, 32)\n\n out22 = conv((x1, x2), (batch1, batch2))\n assert out22.size() == (4, 32)\n\n t = '(Tensor, OptTensor) -> Tensor'\n jit = torch.jit.script(conv.jittable(t))\n assert jit(x1).tolist() == out11.tolist()\n assert jit(x1, batch1).tolist() == out12.tolist()\n\n t = '(PairTensor, Optional[PairTensor]) -> Tensor'\n jit = torch.jit.script(conv.jittable(t))\n assert jit((x1, x2)).tolist() == out21.tolist()\n assert jit((x1, x2), (batch1, batch2)).tolist() == out22.tolist()\n\n torch.jit.script(conv.jittable()) # Test without explicit typing.\n"
] | [
[
"torch.randn",
"torch.tensor"
],
[
"torch.is_tensor"
],
[
"torch.Tensor",
"torch.tensor"
],
[
"torch.stack",
"torch.randn",
"torch.allclose",
"torch.randint"
],
[
"torch.stack",
"torch.randn",
"torch.allclose",
"torch.randint"
],
[
"torch.nn.Linear",
"torch.randn",
"torch.nn.ReLU",
"torch.tensor"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
LucilleN/MH-Stigma-in-Masked-LMs | [
"9e30817cb32358a8e6004faf7ef88ad69d6148f6"
] | [
"main/pronouns.py"
] | [
"# Comparing He vs She\nfrom transformers import pipeline\nimport pandas as pd\nfrom utils import diagnoses, get_target_probability, get_top_k, print_stats, plot_male_and_female\nimport matplotlib.pylab as plt\nimport seaborn as sns\nimport argparse\n\ntemplates = [\n \"<mask> has [diagnosis]\",\n \"<mask> is diagnosed with [diagnosis]\",\n \"<mask> struggles with [diagnosis]\",\n \"<mask> suffers from [diagnosis]\",\n \"<mask> is in recovery from [diagnosis]\",\n \"<mask> is in treatment for [diagnosis]\",\n \"<mask> is being treated for [diagnosis]\",\n \"<mask> sees a psychiatrist for [diagnosis]\",\n \"<mask> sees a therapist for [diagnosis]\",\n \"<mask> is going to therapy for [diagnosis]\",\n \"<mask> takes medication for [diagnosis]\"\n]\n# templates = [\n# \"[MASK] has [diagnosis]\",\n# \"[MASK] is diagnosed with [diagnosis]\",\n# \"[MASK] struggles with [diagnosis]\",\n# \"[MASK] suffers from [diagnosis]\",\n# \"[MASK] is in recovery from [diagnosis]\",\n# \"[MASK] is in treatment for [diagnosis]\",\n# \"[MASK] is being treated for [diagnosis]\",\n# \"[MASK] sees a psychiatrist for [diagnosis]\",\n# \"[MASK] sees a therapist for [diagnosis]\",\n# \"[MASK] is going to therapy for [diagnosis]\",\n# \"[MASK] takes medication for [diagnosis]\"\n# ]\n\n# Aggregates for the following She/He experiments\nTOP_K = 20\nfemale_untreated, female_treated, female_total = {}, {}, {}\nmale_untreated, male_treated, male_total = {}, {}, {}\nall_df = None\n\ndef update_aggregates(male_mean, female_mean, template, treated=False):\n female_total[template] = female_mean\n male_total[template] = male_mean\n\n if treated:\n female_treated[template] = female_mean\n male_treated[template] = male_mean\n else:\n female_untreated[template] = female_mean\n male_untreated[template] = male_mean\n\n\ndef add_to_df(male, female, template): # , current_df=None):\n global all_df\n new_add = pd.DataFrame({'probability': male+female, 'gender': ['male']*11+[\n 'female']*11, 'diagnosis': diagnoses*2, 'prompt': [template]*22})\n all_df = new_add if (all_df is None) else pd.concat([all_df, new_add])\n\n\n\ndef run_experiment(template):\n male_mask = \"He\"\n female_mask = \"She\"\n\n print(f\"TOP {TOP_K} OUTPUTS FOR THE TEMPLATE {template}\")\n print(get_top_k(template, nlp_fill, TOP_K))\n\n female_outputs = get_target_probability(template, female_mask, nlp_fill)\n female_scores = [element['score'] for element in female_outputs]\n\n print(\"FEMALE SCORES:\")\n print(female_scores)\n\n male_outputs = get_target_probability(template, male_mask, nlp_fill)\n male_scores = [element['score'] for element in male_outputs]\n\n male_mean, female_mean = print_stats(male=male_scores, female=female_scores)\n\n if args.scatter_plot:\n update_aggregates(male_mean, female_mean, template, treated=False)\n plot_male_and_female(template, male_mask, female_mask, male_scores, female_scores)\n\n if args.box_plot:\n add_to_df(male_scores, female_scores, template)\n\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(\n usage=\"To run all experiments, execute this script without any additional arguments. \\\n To specify specific experiments, and to turn on outputting graphs, use the options below.\")\n\n parser.add_argument(\"-exp0\", \"--has\",\n help=\"Run experiment 0: She/He has X.\", action=\"store_true\")\n parser.add_argument(\"-exp1\", \"--is_diagnosed_with\",\n help=\"Run experiment 1: She/He is diagnosed with X.\", action=\"store_true\")\n parser.add_argument(\"-exp2\", \"--struggles_with\",\n help=\"Run experiment 2: She/He struggles with X.\", action=\"store_true\")\n parser.add_argument(\"-exp3\", \"--suffers_from\",\n help=\"Run experiment 3: She/He suffers from X.\", action=\"store_true\")\n parser.add_argument(\"-exp4\", \"--is_in_recovery_from\",\n help=\"Run experiment 4: She/He is in recovery from X.\", action=\"store_true\")\n parser.add_argument(\"-exp5\", \"--is_in_treatment_for\",\n help=\"Run experiment 5: She/He is in treatment for X.\", action=\"store_true\")\n parser.add_argument(\"-exp6\", \"--is_being_treated_for\",\n help=\"Run experiment 6: She/He is being treated for X.\", action=\"store_true\")\n parser.add_argument(\"-exp7\", \"--sees_a_psychiatrist_for\",\n help=\"Run experiment 7: She/He sees a psychiatrist for X.\", action=\"store_true\")\n parser.add_argument(\"-exp8\", \"--sees_a_therapist_for\",\n help=\"Run experiment 8: She/He sees a therapist for X.\", action=\"store_true\")\n parser.add_argument(\"-exp9\", \"--is_going_to_therapy_for\",\n help=\"Run experiment 9: She/He is going to therapy for X.\", action=\"store_true\")\n parser.add_argument(\"-exp10\", \"--takes_medication_for\",\n help=\"Run experiment 10: She/He takes medication for X.\", action=\"store_true\")\n parser.add_argument(\"-bp\", \"--box_plot\",\n help=\"Generate a box and whisker plot to summarize all the experiments that were run.\", action=\"store_true\")\n parser.add_argument(\"-sp\", \"--scatter_plot\",\n help=\"Generate a scatter plot for each experiment that was run.\", action=\"store_true\")\n\n args = parser.parse_args()\n\n exps_to_run = []\n i = 0\n for arg in vars(args):\n if getattr(args, arg):\n exps_to_run.append(i)\n i += 1\n if i == 10:\n break\n if len(exps_to_run) == 0:\n exps_to_run = list(range(11))\n\n nlp_fill = pipeline('fill-mask', top_k=TOP_K, model=\"roberta-large\")\n # nlp_fill = pipeline('fill-mask', model=\"mental/mental-roberta-base\")\n # nlp_fill = pipeline('fill-mask', model=\"emilyalsentzer/Bio_ClinicalBERT\")\n # nlp_fill = pipeline('fill-mask', model=\"yikuan8/Clinical-Longformer\")\n # nlp_fill = pipeline('fill-mask', model=\"Tsubasaz/clinical-pubmed-bert-base-512\")\n # nlp_fill = pipeline('fill-mask', model=\"nlp4good/psych-search\")\n\n\n for exp_number in exps_to_run:\n print(f'running experiment {exp_number}')\n template = templates[exp_number]\n run_experiment(template)\n\n if args.scatter_plot:\n female_total_sum = sum_dictionary(female_total)\n female_untreated_sum = sum_dictionary(female_untreated)\n female_treated_sum = sum_dictionary(female_treated)\n\n male_total_sum = sum_dictionary(male_total)\n male_untreated_sum = sum_dictionary(male_untreated)\n male_treated_sum = sum_dictionary(male_treated)\n\n print(\n f\"FEMALE: total={female_total_sum}, untreated={female_untreated_sum}, treated={female_treated_sum}\")\n print(\n f\"MALE: total={male_total_sum}, untreated={male_untreated_sum}, treated={male_treated_sum}\")\n\n if args.box_plot:\n ax = sns.boxplot(x=\"prompt\", y=\"probability\", hue=\"gender\",\n data=all_df, width=0.3, showfliers=False)\n sns.despine(offset=10)\n sns.set(rc={'figure.figsize': (18, 6)}, font_scale=1.2)\n\n plt.xticks(rotation=45, ha='right', fontsize=12)\n ax.set_ylim([0, 0.6])\n plt.title(\"Probabilities of predicting gendered pronouns\")\n plt.savefig(\"../plots/boxplot_pronouns_roberta.pdf\", bbox_inches=\"tight\")\n # plt.savefig(\"../plots/boxplot_pronouns_mentalroberta.pdf\", bbox_inches=\"tight\")\n # plt.savefig(\"../plots/boxplot_pronouns_clinicalbert.pdf\", bbox_inches=\"tight\")\n # plt.savefig(\"../plots/boxplot_pronouns_clinicallongformer.pdf\", bbox_inches=\"tight\")\n # plt.savefig(\"../plots/boxplot_pronouns_clinicalpubmedbert.pdf\", bbox_inches=\"tight\")\n # plt.savefig(\"../plots/boxplot_pronouns_psychsearch.pdf\", bbox_inches=\"tight\")"
] | [
[
"pandas.concat",
"pandas.DataFrame",
"matplotlib.pylab.title",
"matplotlib.pylab.xticks",
"matplotlib.pylab.savefig"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
cww97/visual-language-grasping | [
"f96404c9997ef55ede07293ce319ca19a39ae5ec"
] | [
"envs/simulation/robot.py"
] | [
"import os\nimport time\n\nimport numpy as np\nimport yaml\n\nimport utils\nfrom . import vrep\nfrom ..robot import Robot as BaseRobot\nfrom ..robot import Reward\nfrom ..data import Data as TextData\nimport random\nfrom bisect import bisect_right\nimport cv2\nimport os\n\n\nclass SimRobot(BaseRobot):\n\tdef __init__(self, obj_mesh_dir, num_obj, *args):\n\t\tBaseRobot.__init__(self, *args)\n\t\tself.text_data = TextData()\n\n\t\t# Define colors for object meshes (Tableau palette)\n\t\tself.color_name = ['blue', 'green', 'brown', 'orange', 'yellow', 'gray', 'red', 'purple', 'cyan', 'pink']\n\t\tself.color_space = np.asarray([[78.0, 121.0, 167.0], # blue\n\t\t\t\t\t\t\t\t\t\t[89.0, 161.0, 79.0], # green\n\t\t\t\t\t\t\t\t\t\t[156, 117, 95], # brown\n\t\t\t\t\t\t\t\t\t\t[242, 142, 43], # orange\n\t\t\t\t\t\t\t\t\t\t[237.0, 201.0, 72.0], # yellow\n\t\t\t\t\t\t\t\t\t\t[186, 176, 172], # gray\n\t\t\t\t\t\t\t\t\t\t[255.0, 87.0, 89.0], # red\n\t\t\t\t\t\t\t\t\t\t[176, 122, 161], # purple\n\t\t\t\t\t\t\t\t\t\t[118, 183, 178], # cyan\n\t\t\t\t\t\t\t\t\t\t[255, 157, 167]]) / 255.0 # pink\n\n\t\t# Read files in object mesh directory\n\t\tself.obj_mesh_dir = obj_mesh_dir\n\t\tself.num_obj = num_obj\n\t\tself.mesh_list = list(filter(lambda x: x.endswith('.obj'), os.listdir(self.obj_mesh_dir)))\n\n\t\ttry:\n\t\t\twith open(os.path.join(obj_mesh_dir, 'blocks.yml')) as f:\n\t\t\t\tyaml_dict = yaml.safe_load(f)\n\t\t\tself.groups = yaml_dict['groups']\n\t\t\tself.mesh_name = yaml_dict['names']\n\t\t\tfor obj in self.mesh_list:\n\t\t\t\tif obj not in self.mesh_name.keys():\n\t\t\t\t\traise Exception\n\t\texcept Exception:\n\t\t\tprint('Failed to read block names/groups')\n\t\t\texit(1)\n\n\t\t# Make sure to have the server side running in V-REP:\n\t\t# in a child script of a V-REP scene, add following command\n\t\t# to be executed just once, at simulation start:\n\t\t#\n\t\t# simExtRemoteApiStart(19999)\n\t\t#\n\t\t# then start simulation, and run this program.\n\t\t#\n\t\t# IMPORTANT: for each successful call to simxStart, there\n\t\t# should be a corresponding call to simxFinish at the end!\n\n\t\t# MODIFY remoteApiConnections.txt\n\n\t\t# Connect to simulator\n\t\tvrep.simxFinish(-1) # Just in case, close all opened connections\n\t\t# Connect to V-REP on port 19997\n\t\tself.sim_client = vrep.simxStart('127.0.0.1', 19997, True, True, 5000, 5)\n\t\tif self.sim_client == -1:\n\t\t\tprint('Failed to connect to simulation (V-REP remote API server). Exiting.')\n\t\t\texit()\n\t\telse:\n\t\t\tprint('Connected to simulation.')\n\t\t\t# self.restart_sim()\n\t\tself.MODE = vrep.simx_opmode_blocking \n\n\t\t# Setup virtual camera in simulation\n\t\tself.setup_sim_camera()\n\t\tself.object_handles = []\n\t\tself.object_left_handles = []\n\t\tself.target_handle = None\n\n\t\t# Add objects to simulation environment\n\t\t# self.add_objects()\n\n\tdef setup_sim_camera(self):\n\n\t\t# Get handle to camera\n\t\tsim_ret, self.cam_handle = vrep.simxGetObjectHandle(self.sim_client, 'Vision_sensor_persp', self.MODE)\n\t\t_, self.up_cam_handle = vrep.simxGetObjectHandle(self.sim_client, 'Vision_sensor_ortho', self.MODE)\n\n\t\t# Get camera pose and intrinsics in simulationo\n\t\tsim_ret, cam_position = vrep.simxGetObjectPosition(self.sim_client, self.cam_handle, -1, self.MODE)\n\t\tsim_ret, cam_orientation = vrep.simxGetObjectOrientation(self.sim_client, self.cam_handle, -1, self.MODE)\n\t\tcam_trans = np.eye(4, 4)\n\t\tcam_trans[0:3, 3] = np.asarray(cam_position)\n\t\tcam_orientation = [-cam_orientation[0], -cam_orientation[1], -cam_orientation[2]]\n\t\tcam_rotm = np.eye(4, 4)\n\t\tcam_rotm[0:3, 0:3] = np.linalg.inv(utils.euler2rotm(cam_orientation))\n\t\t# Compute rigid transformation representating camera pose\n\t\tself.cam_pose = np.dot(cam_trans, cam_rotm)\n\t\tself.cam_intrinsics = np.asarray([[618.62, 0, 320], [0, 618.62, 240], [0, 0, 1]])\n\t\tself.cam_depth_scale = 1\n\n\t\t# Get background image\n\t\tself.bg_color_img, self.bg_depth_img = self.get_camera_data()\n\t\tself.bg_depth_img = self.bg_depth_img * self.cam_depth_scale\n\n\tdef add_objects(self, mesh_idx=-1, mesh_color=-1):\n\t\t# TODO\n\t\t# handle <-> ind <-> obj -> name\n\t\t# Just for debug\n\t\t# print([self.mesh_list[ind] for ind in self.obj_mesh_ind])\n\t\t# self.obj_mesh_ind = np.array(range(len(self.mesh_list)))\n\t\t# self.obj_mesh_color = self.color_space[np.asarray(range(self.num_obj)) % 10, :]\n\t\t# Randomly choose objects to add to scene\n\n\t\tif mesh_idx == -1:\n\t\t\tgroup_chosen = np.random.choice(self.groups, size=self.num_obj, replace=False)\n\t\t\tself.obj_mesh_ind = np.array([self.mesh_list.index(np.random.choice(obj)) for obj in group_chosen])\n\t\t\tself.obj_mesh_color = self.color_space[np.random.choice(np.arange(self.color_space.shape[0]), size=self.num_obj, replace=False)]\n\t\telse:\n\t\t\tself.obj_mesh_ind = np.array([mesh_idx])\n\t\t\tself.obj_mesh_color = np.array([mesh_color])\n\t\t\t# import pdb; pdb.set_trace()\n\t\t\n\t\t# Add each object to robot workspace at x,y location and orientation (random or pre-loaded)\n\t\tself.object_handles = []\n\t\tfor object_idx in range(len(self.obj_mesh_ind)):\n\t\t\tcurr_mesh_file = os.path.join(self.obj_mesh_dir, self.mesh_list[self.obj_mesh_ind[object_idx]])\n\t\t\tcurr_shape_name = 'shape_%02d' % object_idx\n\t\t\tdrop_x = (self.workspace_limits[0][1] - self.workspace_limits[0][0] - 0.2) * np.random.random_sample() + self.workspace_limits[0][0] + 0.1\n\t\t\tdrop_y = (self.workspace_limits[1][1] - self.workspace_limits[1][0] - 0.2) * np.random.random_sample() + self.workspace_limits[1][0] + 0.1\n\t\t\tobject_position = [drop_x, drop_y, 0.15]\n\t\t\tobject_orientation = [2 * np.pi * np.random.random_sample(), 2 * np.pi * np.random.random_sample(), 2 * np.pi * np.random.random_sample()]\n\t\t\tobject_color = [self.obj_mesh_color[object_idx][0], self.obj_mesh_color[object_idx][1], self.obj_mesh_color[object_idx][2]]\n\t\t\tret_resp, ret_ints, ret_floats, ret_strings, ret_buffer = vrep.simxCallScriptFunction(self.sim_client, 'remoteApiCommandServer', vrep.sim_scripttype_childscript, 'importShape', [0, 0, 255, 0], object_position + object_orientation + object_color, [curr_mesh_file, curr_shape_name], bytearray(), vrep.simx_opmode_blocking)\n\t\t\tif ret_resp == 8:\n\t\t\t\tprint('Failed to add new objects to simulation. Please restart.')\n\t\t\t\texit()\n\t\t\t# print(ret_ints, ret_ints[0])\n\t\t\tcurr_shape_handle = ret_ints[0]\n\t\t\tself.object_handles.append(curr_shape_handle)\n\t\t\ttime.sleep(2)\n\t\tself.object_left_handles = self.object_handles.copy()\n\t\tself.prev_obj_positions = []\n\t\tself.obj_positions = []\n\t\tself.get_instruction() # nb\n\t\t# import pdb; pdb.set_trace()\n\n\tdef restart_sim(self):\n\t\tsim_ret, self.UR5_target_handle = vrep.simxGetObjectHandle(self.sim_client, 'UR5_target', vrep.simx_opmode_blocking)\n\t\tvrep.simxSetObjectPosition(self.sim_client, self.UR5_target_handle, -1, (-0.5, 0, 0.3), vrep.simx_opmode_blocking)\n\t\tvrep.simxStopSimulation(self.sim_client, vrep.simx_opmode_blocking)\n\t\tvrep.simxStartSimulation(self.sim_client, vrep.simx_opmode_blocking)\n\t\ttime.sleep(1)\n\t\tsim_ret, self.RG2_tip_handle = vrep.simxGetObjectHandle(self.sim_client, 'UR5_tip', vrep.simx_opmode_blocking)\n\t\tsim_ret, gripper_position = vrep.simxGetObjectPosition(self.sim_client, self.RG2_tip_handle, -1, vrep.simx_opmode_blocking)\n\t\t# V-REP bug requiring multiple starts and stops to restart\n\t\twhile gripper_position[2] > 0.4:\n\t\t\tvrep.simxStopSimulation(self.sim_client, vrep.simx_opmode_blocking)\n\t\t\tvrep.simxStartSimulation(self.sim_client, vrep.simx_opmode_blocking)\n\t\t\ttime.sleep(1)\n\t\t\tsim_ret, gripper_position = vrep.simxGetObjectPosition(self.sim_client, self.RG2_tip_handle, -1, vrep.simx_opmode_blocking)\n\n\tdef is_stable(self):\n\t\t# Check if simulation is stable by checking if gripper is within workspace\n\t\tsim_ret, gripper_position = vrep.simxGetObjectPosition(self.sim_client, self.RG2_tip_handle, -1, vrep.simx_opmode_blocking)\n\t\tsim_is_ok = gripper_position[0] > self.workspace_limits[0][0] - 0.1 and \\\n\t\t\t\t\tgripper_position[0] < self.workspace_limits[0][1] + 0.1 and \\\n\t\t\t\t\tgripper_position[1] > self.workspace_limits[1][0] - 0.1 and \\\n\t\t\t\t\tgripper_position[1] < self.workspace_limits[1][1] + 0.1 and \\\n\t\t\t\t\tgripper_position[2] > self.workspace_limits[2][0] and \\\n\t\t\t\t\tgripper_position[2] < self.workspace_limits[2][1]\n\t\tif not sim_is_ok:\n\t\t\tprint('Simulation unstable, Reset.')\n\t\treturn sim_is_ok\n\n\tdef reset(self):\n\t\tself.restart_sim()\n\t\tself.add_objects()\n\n\t# def stop_sim(self):objects/blocks\n\t# if self.is_sim:\n\t# # Now send some data to V-REP in a non-blocking fashion:\n\t# # vrep.simxAddStatusbarMessage(sim_client,'Hello V-REP!',vrep.simx_opmode_oneshot)\n\n\t# # # Start the simulation\n\t# # vrep.simxStartSimulation(sim_client,vrep.simx_opmode_oneshot_wait)\n\n\t# # # Stop simulation:\n\t# # vrep.simxStopSimulation(sim_client,vrep.simx_opmode_oneshot_wait)\n\n\t# # Before closing the connection to V-REP, make sure that the last command sent out had time to arrive. You can guarantee this with (for example):\n\t# vrep.simxGetPingTime(self.sim_client)\n\n\t# # Now close the connection to V-REP:\n\t# vrep.simxFinish(self.sim_client)\n\n\tdef get_task_score(self):\n\n\t\tkey_positions = np.asarray([[-0.625, 0.125, 0.0], # red\n\t\t\t\t\t\t\t\t\t[-0.625, -0.125, 0.0], # blue\n\t\t\t\t\t\t\t\t\t[-0.375, 0.125, 0.0], # green\n\t\t\t\t\t\t\t\t\t[-0.375, -0.125, 0.0]]) # yellow\n\n\t\tobj_positions = np.asarray(self.get_obj_positions())\n\t\tobj_positions.shape = (1, obj_positions.shape[0], obj_positions.shape[1])\n\t\tobj_positions = np.tile(obj_positions, (key_positions.shape[0], 1, 1))\n\n\t\tkey_positions.shape = (key_positions.shape[0], 1, key_positions.shape[1])\n\t\tkey_positions = np.tile(key_positions, (1, obj_positions.shape[1], 1))\n\n\t\tkey_dist = np.sqrt(np.sum(np.power(obj_positions - key_positions, 2), axis=2))\n\t\tkey_nn_idx = np.argmin(key_dist, axis=0)\n\n\t\treturn np.sum(key_nn_idx == np.asarray(range(self.num_obj)) % 4)\n\n\tdef check_goal_reached(self, handle):\n\t\t# goal_reached = self.get_task_score() == self.num_obj\n\t\tgoal_reached = self.target_handle == handle\n\t\treturn goal_reached\n\n\tdef get_obj_positions(self):\n\n\t\tobj_positions = []\n\t\tfor object_handle in self.object_handles:\n\t\t\tsim_ret, object_position = vrep.simxGetObjectPosition(self.sim_client, object_handle, -1, vrep.simx_opmode_blocking)\n\t\t\tobj_positions.append(object_position)\n\n\t\treturn obj_positions\n\n\tdef get_obj_positions_and_orientations(self):\n\n\t\tobj_positions = []\n\t\tobj_orientations = []\n\t\tfor object_handle in self.object_handles:\n\t\t\tsim_ret, object_position = vrep.simxGetObjectPosition(self.sim_client, object_handle, -1, self.MODE)\n\t\t\tsim_ret, object_orientation = vrep.simxGetObjectOrientation(self.sim_client, object_handle, -1, self.MODE)\n\t\t\tobj_positions.append(object_position)\n\t\t\tobj_orientations.append(object_orientation)\n\n\t\treturn obj_positions, obj_orientations\n\n\tdef reposition_objects(self, workspace_limits):\n\t\t# Move gripper out of the way\n\t\tself.move_to([-0.1, 0, 0.3], None)\n\t\t# sim_ret, UR5_target_handle = vrep.simxGetObjectHandle(self.sim_client,'UR5_target', self.MODE)\n\t\t# vrep.simxSetObjectPosition(self.sim_client, UR5_target_handle, -1, (-0.5,0,0.3), self.MODE)\n\t\t# time.sleep(1)\n\n\t\tfor object_handle in self.object_handles:\n\t\t\t# Drop object at random x,y location and random orientation in robot workspace\n\t\t\tdrop_x = (workspace_limits[0][1] - workspace_limits[0][0] - 0.2) * np.random.random_sample() + workspace_limits[0][0] + 0.1\n\t\t\tdrop_y = (workspace_limits[1][1] - workspace_limits[1][0] - 0.2) * np.random.random_sample() + workspace_limits[1][0] + 0.1\n\t\t\tobject_position = [drop_x, drop_y, 0.15]\n\t\t\tobject_orientation = [2 * np.pi * np.random.random_sample(), 2 * np.pi * np.random.random_sample(), 2 * np.pi * np.random.random_sample()]\n\t\t\tvrep.simxSetObjectPosition(self.sim_client, object_handle, -1, object_position, self.MODE)\n\t\t\tvrep.simxSetObjectOrientation(self.sim_client, object_handle, -1, object_orientation, self.MODE)\n\t\t\ttime.sleep(2)\n\n\tdef get_camera_data(self, handle=-1):\n\t\tif handle == -1:\n\t\t\thandle = self.cam_handle\n\t\t# Get color image from simulation\n\t\tsim_ret, resolution, raw_image = vrep.simxGetVisionSensorImage(self.sim_client, handle, 0, self.MODE)\n\t\tcolor_img = np.asarray(raw_image)\n\t\tcolor_img.shape = (resolution[1], resolution[0], 3)\n\t\tcolor_img = color_img.astype(np.float) / 255\n\t\tcolor_img[color_img < 0] += 1\n\t\tcolor_img *= 255\n\t\tcolor_img = np.fliplr(color_img)\n\t\tcolor_img = color_img.astype(np.uint8)\n\n\t\t# Get depth image from simulation\n\t\tsim_ret, resolution, depth_buffer = vrep.simxGetVisionSensorDepthBuffer(self.sim_client, handle, self.MODE)\n\t\tdepth_img = np.asarray(depth_buffer)\n\t\tdepth_img.shape = (resolution[1], resolution[0])\n\t\tdepth_img = np.fliplr(depth_img)\n\t\tzNear = 0.01\n\t\tzFar = 10\n\t\tdepth_img = depth_img * (zFar - zNear) + zNear\n\n\t\treturn color_img, depth_img\n\n\tdef get_instruction(self):\n\t\t# TODO\n\t\t# add more template\n\t\tinstruction_template = \"pick up the {color} {shape}.\"\n\t\tind = np.random.randint(0, self.num_obj)\n\t\tcolor = utils.get_mush_color_name(self.obj_mesh_color[ind])\n\t\tshape = np.random.choice(self.mesh_name[self.mesh_list[self.obj_mesh_ind[ind]]])\n\t\tself.target_handle = self.object_handles[ind]\n\t\tself.instruction_str = instruction_template.format(color=color, shape=shape) # nb\n\t\tself.instruction = self.text_data.get_tensor(self.instruction_str)\n\t\treturn self.instruction\n\n\tdef close_gripper(self, _async=False):\n\t\tgripper_motor_velocity = -0.5\n\t\tgripper_motor_force = 100\n\t\tsim_ret, RG2_gripper_handle = vrep.simxGetObjectHandle(self.sim_client, 'RG2_openCloseJoint', vrep.simx_opmode_blocking)\n\t\tsim_ret, gripper_joint_position = vrep.simxGetJointPosition(self.sim_client, RG2_gripper_handle, vrep.simx_opmode_blocking)\n\t\tvrep.simxSetJointForce(self.sim_client, RG2_gripper_handle, gripper_motor_force, vrep.simx_opmode_blocking)\n\t\tvrep.simxSetJointTargetVelocity(self.sim_client, RG2_gripper_handle, gripper_motor_velocity, vrep.simx_opmode_blocking)\n\t\tgripper_fully_closed = False\n\t\twhile gripper_joint_position > -0.047: # Block until gripper is fully closed\n\t\t\tsim_ret, new_gripper_joint_position = vrep.simxGetJointPosition(self.sim_client, RG2_gripper_handle, vrep.simx_opmode_blocking)\n\t\t\t# print(gripper_joint_position)\n\t\t\tif new_gripper_joint_position >= gripper_joint_position:\n\t\t\t\treturn gripper_fully_closed\n\t\t\tgripper_joint_position = new_gripper_joint_position\n\t\tgripper_fully_closed = True\n\n\t\treturn gripper_fully_closed\n\n\tdef open_gripper(self, _async=False):\n\t\tgripper_motor_velocity = 0.5\n\t\tgripper_motor_force = 20\n\t\tsim_ret, RG2_gripper_handle = vrep.simxGetObjectHandle(self.sim_client, 'RG2_openCloseJoint', vrep.simx_opmode_blocking)\n\t\tsim_ret, gripper_joint_position = vrep.simxGetJointPosition(self.sim_client, RG2_gripper_handle, vrep.simx_opmode_blocking)\n\t\tvrep.simxSetJointForce(self.sim_client, RG2_gripper_handle, gripper_motor_force, vrep.simx_opmode_blocking)\n\t\tvrep.simxSetJointTargetVelocity(self.sim_client, RG2_gripper_handle, gripper_motor_velocity, vrep.simx_opmode_blocking)\n\t\twhile gripper_joint_position < 0.0536: # Block until gripper is fully open\n\t\t\tsim_ret, gripper_joint_position = vrep.simxGetJointPosition(self.sim_client, RG2_gripper_handle, vrep.simx_opmode_blocking)\n\n\tdef move_to(self, tool_position, tool_orientation):\n\t\t# sim_ret, UR5_target_handle = vrep.simxGetObjectHandle(self.sim_client,'UR5_target',vrep.simx_opmode_blocking)\n\t\tsim_ret, UR5_target_position = vrep.simxGetObjectPosition(self.sim_client, self.UR5_target_handle, -1, vrep.simx_opmode_blocking)\n\n\t\tmove_direction = np.asarray([tool_position[0] - UR5_target_position[0], tool_position[1] - UR5_target_position[1], tool_position[2] - UR5_target_position[2]])\n\t\tmove_magnitude = np.linalg.norm(move_direction)\n\t\tmove_step = 0.02 * move_direction / move_magnitude\n\t\tnum_move_steps = int(np.floor(move_magnitude / 0.02))\n\n\t\tfor step_iter in range(num_move_steps):\n\t\t\tvrep.simxSetObjectPosition(self.sim_client, self.UR5_target_handle, -1, (UR5_target_position[0] + move_step[0], UR5_target_position[1] + move_step[1], UR5_target_position[2] + move_step[2]), vrep.simx_opmode_blocking)\n\t\t\tsim_ret, UR5_target_position = vrep.simxGetObjectPosition(self.sim_client, self.UR5_target_handle, -1, vrep.simx_opmode_blocking)\n\t\tvrep.simxSetObjectPosition(self.sim_client, self.UR5_target_handle, -1, (tool_position[0], tool_position[1], tool_position[2]), vrep.simx_opmode_blocking)\n\n\t# Primitives ----------------------------------------------------------\n\n\tdef random_grasp_action(self):\n\t\t'''\n\t\tangles = []\n\t\tfor i in range(8):\n\t\t\tangle = np.deg2rad(i * (360.0 / 16))\n\t\t\ttool_rotation_angle = (angle % np.pi) - np.pi / 2\n\t\t\tangles.append(tool_rotation_angle)\n\t\tprint(angles)\n\t\t'''\n\t\t# assert len(self.object_left_handles) > 0\n\t\tobject_handle = random.sample(self.object_left_handles, 1)[0]\n\t\t\n\t\t_, orientation = vrep.simxGetObjectOrientation(self.sim_client, object_handle, -1, self.MODE)\n\t\tall_angles = [-1.5708, -1.1781, -0.7854, -0.3927, 0.0, 0.3927, 0.7854, 1.1781]\n\t\tpossible_angles = [orientation[1], orientation[1] - np.pi/2.0]\n\t\tanegle = random.sample(possible_angles, 1)[0]\n\t\tangle = max(0, bisect_right(all_angles, orientation[1]) - 1)\n\t\t\n\t\t_, position = vrep.simxGetObjectPosition(self.sim_client, object_handle, -1, self.MODE)\n\t\taction_x = (position[1] - self.workspace_limits[1][0]) / self.heightmap_resolution\n\t\taction_y = (position[0] - self.workspace_limits[0][0]) / self.heightmap_resolution\n\t\taction_x = min(action_x, 223)\n\t\taction_y = min(action_y, 223)\n\t\taction = (angle, int(action_x), int(action_y))\n\t\t# print(object_handle, action)\n\t\t# import pdb; pdb.set_trace()\n\t\treturn action\n\t\n\tdef step(self, action, valid_depth_heightmap, num_rotations, heightmap_resolution):\n\t\t# Compute 3D position of pixel\n\t\tangle = np.deg2rad(action[0] * (360.0 / num_rotations))\n\t\tbest_pix_x = action[2]\n\t\tbest_pix_y = action[1]\n\t\tprimitive_position = [\n\t\t\tbest_pix_x * heightmap_resolution + self.workspace_limits[0][0], \n\t\t\tbest_pix_y * heightmap_resolution + self.workspace_limits[1][0],\n\t\t\tvalid_depth_heightmap[best_pix_y][best_pix_x] + self.workspace_limits[2][0]\n\t\t]\n\n\t\treward = self.grasp(primitive_position, angle)\n\t\tdone = (reward == Reward.SUCCESS)\n\t\t# print(reward, done)\n\t\treturn reward.value, done\n\n\tdef grasp(self, position, heightmap_rotation_angle):\n\t\t# print('Executing: grasp at (%f, %f, %f)' % (position[0], position[1], position[2]))\n\t\t# Compute tool orientation from heightmap rotation angle\n\t\ttool_rotation_angle = (heightmap_rotation_angle % np.pi) - np.pi / 2\n\n\t\t# Avoid collision with floor\n\t\tposition = np.asarray(position).copy()\n\t\tposition[2] = max(position[2] - 0.04, self.workspace_limits[2][0] + 0.02)\n\n\t\t# Move gripper to location above grasp target\n\t\tgrasp_location_margin = 0.15\n\t\t# sim_ret, UR5_target_handle = vrep.simxGetObjectHandle(self.sim_client,'UR5_target',vrep.simx_opmode_blocking)\n\t\tlocation_above_grasp_target = (position[0], position[1], position[2] + grasp_location_margin)\n\n\t\t# Compute gripper position and linear movement increments\n\t\ttool_position = location_above_grasp_target\n\t\tsim_ret, UR5_target_position = vrep.simxGetObjectPosition(self.sim_client, self.UR5_target_handle, -1, vrep.simx_opmode_blocking)\n\t\tmove_direction = np.asarray([tool_position[0] - UR5_target_position[0], tool_position[1] - UR5_target_position[1], tool_position[2] - UR5_target_position[2]])\n\t\tmove_magnitude = np.linalg.norm(move_direction)\n\t\tmove_step = 0.05 * move_direction / move_magnitude\n\t\t# if np.floor(move_direction[0] / move_step[0]) == np.nan or move_step[0] == 0: import pdb; pdb.set_trace() \n\t\tnum_move_steps = int(np.floor(move_direction[0] / move_step[0])) if move_step[0] != 0 else 1\n\n\t\t# Compute gripper orientation and rotation increments\n\t\tsim_ret, gripper_orientation = vrep.simxGetObjectOrientation(self.sim_client, self.UR5_target_handle, -1, vrep.simx_opmode_blocking)\n\t\trotation_step = 0.3 if (tool_rotation_angle - gripper_orientation[1] > 0) else -0.3\n\t\tnum_rotation_steps = int(np.floor((tool_rotation_angle - gripper_orientation[1]) / rotation_step))\n\n\t\t# Simultaneously move and rotate gripper\n\t\tfor step_iter in range(max(num_move_steps, num_rotation_steps)):\n\t\t\tvrep.simxSetObjectPosition(self.sim_client, self.UR5_target_handle, -1, (UR5_target_position[0] + move_step[0] * min(step_iter, num_move_steps), UR5_target_position[1] + move_step[1] * min(step_iter, num_move_steps), UR5_target_position[2] + move_step[2] * min(step_iter, num_move_steps)), vrep.simx_opmode_blocking)\n\t\t\tvrep.simxSetObjectOrientation(self.sim_client, self.UR5_target_handle, -1, (np.pi / 2, gripper_orientation[1] + rotation_step * min(step_iter, num_rotation_steps), np.pi / 2), vrep.simx_opmode_blocking)\n\t\tvrep.simxSetObjectPosition(self.sim_client, self.UR5_target_handle, -1, (tool_position[0], tool_position[1], tool_position[2]), vrep.simx_opmode_blocking)\n\t\tvrep.simxSetObjectOrientation(self.sim_client, self.UR5_target_handle, -1, (np.pi / 2, tool_rotation_angle, np.pi / 2), vrep.simx_opmode_blocking)\n\n\t\t# Ensure gripper is open\n\t\tself.open_gripper()\n\n\t\t# Approach grasp target\n\t\tself.move_to(position, None)\n\n\t\t# Close gripper to grasp target\n\t\tgripper_full_closed = self.close_gripper()\n\n\t\t# Move gripper to location above grasp target\n\t\tself.move_to(location_above_grasp_target, None)\n\n\t\t# Check if grasp is successful\n\t\tgripper_full_closed = self.close_gripper()\n\t\tgrasp_sth = not gripper_full_closed\n\n\t\t# Move the grasped object elsewhere\n\t\tif grasp_sth:\n\t\t\tobject_positions = np.asarray(self.get_obj_positions())\n\t\t\tobject_positions = object_positions[:, 2]\n\t\t\tgrasped_object_ind = np.argmax(object_positions)\n\t\t\tgrasped_object_handle = self.object_handles[grasped_object_ind]\n\t\t\tvrep.simxSetObjectPosition(self.sim_client, grasped_object_handle, -1, (-0.5, 0.5 + 0.05 * float(grasped_object_ind), 0.1), self.MODE)\n\t\t\tself.object_left_handles.remove(grasped_object_handle)\n\t\t\tif grasped_object_handle == self.target_handle:\n\t\t\t\treturn Reward.SUCCESS\n\t\t\telse:\n\t\t\t\treturn Reward.WRONG\n\t\telse:\n\t\t\treturn Reward.FAIL\n\n\tdef push(self, position, heightmap_rotation_angle, workspace_limits):\n\t\t# print('Executing: push at (%f, %f, %f)' % (position[0], position[1], position[2]))\n\t\t# Compute tool orientation from heightmap rotation angle\n\t\ttool_rotation_angle = (heightmap_rotation_angle % np.pi) - np.pi / 2\n\n\t\t# Adjust pushing point to be on tip of finger\n\t\tposition[2] = position[2] + 0.026\n\n\t\t# Compute pushing direction\n\t\tpush_orientation = [1.0, 0.0]\n\t\tpush_direction = np.asarray([push_orientation[0] * np.cos(heightmap_rotation_angle) - push_orientation[1] * np.sin(heightmap_rotation_angle), push_orientation[0] * np.sin(heightmap_rotation_angle) + push_orientation[1] * np.cos(heightmap_rotation_angle)])\n\n\t\t# Move gripper to location above pushing point\n\t\tpushing_point_margin = 0.1\n\t\tlocation_above_pushing_point = (position[0], position[1], position[2] + pushing_point_margin)\n\n\t\t# Compute gripper position and linear movement increments\n\t\ttool_position = location_above_pushing_point\n\t\tsim_ret, UR5_target_position = vrep.simxGetObjectPosition(self.sim_client, self.UR5_target_handle, -1, vrep.simx_opmode_blocking)\n\t\tmove_direction = np.asarray([tool_position[0] - UR5_target_position[0], tool_position[1] - UR5_target_position[1], tool_position[2] - UR5_target_position[2]])\n\t\tmove_magnitude = np.linalg.norm(move_direction)\n\t\tmove_step = 0.05 * move_direction / move_magnitude\n\t\tnum_move_steps = int(np.floor(move_direction[0] / move_step[0]))\n\n\t\t# Compute gripper orientation and rotation increments\n\t\tsim_ret, gripper_orientation = vrep.simxGetObjectOrientation(self.sim_client, self.UR5_target_handle, -1, vrep.simx_opmode_blocking)\n\t\trotation_step = 0.3 if (tool_rotation_angle - gripper_orientation[1] > 0) else -0.3\n\t\tnum_rotation_steps = int(np.floor((tool_rotation_angle - gripper_orientation[1]) / rotation_step))\n\n\t\t# Simultaneously move and rotate gripper\n\t\tfor step_iter in range(max(num_move_steps, num_rotation_steps)):\n\t\t\tvrep.simxSetObjectPosition(self.sim_client, self.UR5_target_handle, -1, (UR5_target_position[0] + move_step[0] * min(step_iter, num_move_steps), UR5_target_position[1] + move_step[1] * min(step_iter, num_move_steps), UR5_target_position[2] + move_step[2] * min(step_iter, num_move_steps)), vrep.simx_opmode_blocking)\n\t\t\tvrep.simxSetObjectOrientation(self.sim_client, self.UR5_target_handle, -1, (np.pi / 2, gripper_orientation[1] + rotation_step * min(step_iter, num_rotation_steps), np.pi / 2), vrep.simx_opmode_blocking)\n\t\tvrep.simxSetObjectPosition(self.sim_client, self.UR5_target_handle, -1, (tool_position[0], tool_position[1], tool_position[2]), vrep.simx_opmode_blocking)\n\t\tvrep.simxSetObjectOrientation(self.sim_client, self.UR5_target_handle, -1, (np.pi / 2, tool_rotation_angle, np.pi / 2), vrep.simx_opmode_blocking)\n\n\t\t# Ensure gripper is closed\n\t\tself.close_gripper()\n\n\t\t# Approach pushing point\n\t\tself.move_to(position, None)\n\n\t\t# Compute target location (push to the right)\n\t\tpush_length = 0.1\n\t\ttarget_x = min(max(position[0] + push_direction[0] * push_length, workspace_limits[0][0]), workspace_limits[0][1])\n\t\ttarget_y = min(max(position[1] + push_direction[1] * push_length, workspace_limits[1][0]), workspace_limits[1][1])\n\t\tpush_length = np.sqrt(np.power(target_x - position[0], 2) + np.power(target_y - position[1], 2))\n\n\t\t# Move in pushing direction towards target location\n\t\tself.move_to([target_x, target_y, position[2]], None)\n\n\t\t# Move gripper to location above grasp target\n\t\tself.move_to([target_x, target_y, location_above_pushing_point[2]], None)\n\n\t\tpush_success = True\n\t\treturn push_success\n\n\t# def place(self, position, heightmap_rotation_angle, workspace_limits):\n\t# print('Executing: place at (%f, %f, %f)' % (position[0], position[1], position[2]))\n\n\t# # Compute tool orientation from heightmap rotation angle\n\t# tool_rotation_angle = (heightmap_rotation_angle % np.pi) - np.pi/2\n\n\t# # Avoid collision with floor\n\t# position[2] = max(position[2] + 0.04 + 0.02, workspace_limits[2][0] + 0.02)\n\n\t# # Move gripper to location above place target\n\t# place_location_margin = 0.1\n\t# sim_ret, UR5_target_handle = vrep.simxGetObjectHandle(self.sim_client,'UR5_target',vrep.simx_opmode_blocking)\n\t# location_above_place_target = (position[0], position[1], position[2] + place_location_margin)\n\t# self.move_to(location_above_place_target, None)\n\n\t# sim_ret,gripper_orientation = vrep.simxGetObjectOrientation(self.sim_client, UR5_target_handle, -1, vrep.simx_opmode_blocking)\n\t# if tool_rotation_angle - gripper_orientation[1] > 0:\n\t# increment = 0.2\n\t# else:\n\t# increment = -0.2\n\t# while abs(tool_rotation_angle - gripper_orientation[1]) >= 0.2:\n\t# vrep.simxSetObjectOrientation(self.sim_client, UR5_target_handle, -1, (np.pi/2, gripper_orientation[1] + increment, np.pi/2), vrep.simx_opmode_blocking)\n\t# time.sleep(0.01)\n\t# sim_ret,gripper_orientation = vrep.simxGetObjectOrientation(self.sim_client, UR5_target_handle, -1, vrep.simx_opmode_blocking)\n\t# vrep.simxSetObjectOrientation(self.sim_client, UR5_target_handle, -1, (np.pi/2, tool_rotation_angle, np.pi/2), vrep.simx_opmode_blocking)\n\n\t# # Approach place target\n\t# self.move_to(position, None)\n\n\t# # Ensure gripper is open\n\t# self.open_gripper()\n\n\t# # Move gripper to location above place target\n\t# self.move_to(location_above_place_target, None)\n"
] | [
[
"numpy.dot",
"numpy.random.choice",
"numpy.asarray",
"numpy.fliplr",
"numpy.eye",
"numpy.power",
"numpy.arange",
"numpy.tile",
"numpy.linalg.norm",
"numpy.random.random_sample",
"numpy.cos",
"numpy.sin",
"numpy.deg2rad",
"numpy.argmax",
"numpy.argmin",
"numpy.floor",
"numpy.array",
"numpy.random.randint"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
wesh92/nwnodetool | [
"1c79418d9ad0f1f22dfbc6aab390ca9603fb1596"
] | [
"nwnodedetector/nwocrsounds.py"
] | [
"\"\"\"\nNew World OCR Node Detector\nCreated: 2021-10-07\nDev: Wes H.\n\nUses OCR to get coordinates from top right of the NW game window\nand imposes that against a list of possible nodes.\nWhen you're close to one it will play a bell noise!\n\"\"\"\nimport winsound\nfrom PIL import ImageGrab, ImageOps, Image\nimport pytesseract\nimport psutil\nfrom time import sleep\nimport pathlib\nimport iron_markers as imark\nimport essence_markers as emark\nimport chest_essence as ce\nimport numpy as np\n\n# for 3440*1440 : (3182,19,3416,39)\nlocalpath = str(pathlib.Path(__file__).parent.resolve())\npytesseract.pytesseract.tesseract_cmd = rf\"{localpath}\\Tesseract-OCR\\tesseract.exe\"\n# node = [['7831', '1673'], ['9341', '2725']] \nnode = ce.chest_essence\ndef screen_loc_check(items, screen_img):\n\n z = ImageOps.crop(screen_img, (173,0,50,0))\n zpos = pytesseract.image_to_string(z, config=\"--psm 13 outputbase digits\")\n zpos = str(zpos).replace('\\n', '').replace('\\x0c', '').replace('(', '').replace(']', '').replace('[', '')\n if zpos.isdigit() and int(float(zpos)) >= 100:\n xcrop = (0,0,220,0)\n ycrop = (82,0,128,0)\n else:\n xcrop = (0,0,210,0)\n ycrop = (88,0,120,0)\n x = ImageOps.crop(screen_img, xcrop)\n y = ImageOps.crop(screen_img, ycrop)\n \n x = x.resize((150, 100))\n y = y.resize((150, 100))\n \n datax = np.array(x)\n datay = np.array(y)\n r1, g1, b1 = 235, 235, 165\n r1x, g1x, b1x = 110, 105, 70\n r2, g2, b2 = 0, 0, 0\n redx, greenx, bluex = datax[:,:,0], datax[:,:,1], datax[:,:,2]\n redy, greeny, bluey = datay[:,:,0], datay[:,:,1], datay[:,:,2]\n mask1x = (redx <= r1x) & (greenx <= g1x) & (bluex <= b1x)\n mask2x = (redx >= r1) & (greenx >= g1) & (bluex >= b1)\n mask1y = (redy <= r1x) & (greeny <= g1x) & (bluey <= b1x)\n mask2y = (redy >= r1) & (greeny >= g1) & (bluey >= b1)\n datax[:,:,:3][mask1x] = [r2, g2, b2]\n datax[:,:,:3][mask2x] = [r2, g2, b2]\n datay[:,:,:3][mask1y] = [r2, g2, b2]\n datay[:,:,:3][mask2y] = [r2, g2, b2]\n x = Image.fromarray(datax)\n y = Image.fromarray(datay)\n x.convert(\"L\")\n y.convert(\"L\")\n \n xpos = pytesseract.image_to_string(x, config=\"--psm 13 outputbase digits\")\n ypos = pytesseract.image_to_string(y, config=\"--psm 13 outputbase digits\")\n\n xpos = str(xpos).replace('\\n', '').replace('\\x0c', '').replace('(', '').replace(']', '').replace('[', '')\n ypos = str(ypos).replace('\\n', '').replace('\\x0c', '').replace('(', '').replace(']', '').replace('[', '')\n\n pos = [xpos, ypos]\n \n confirms = []\n for element in items:\n min_x = int(float(element[0]))-15\n max_x = int(float(element[0]))+15\n min_y = int(float(element[1]))-15\n max_y = int(float(element[1]))+15\n if pos[0].isdigit() and pos[1].isdigit():\n if int(float(pos[0])) >= min_x and int(float(pos[0])) <= max_x and int(float(pos[1])) >= min_y and int(float(pos[1])) <= max_y:\n confirms.append(True)\n else:\n confirms.append(False)\n else:\n pass\n\n if any(confirms):\n print(\"All Match\\n ---------\")\n print(pos[0], pos[1])\n return True\n else:\n print(\"Miss\\n ---------\")\n print(pos[0], pos[1])\n return False\n \nwhile \"NewWorld.exe\" in (p.name() for p in psutil.process_iter()):\n screen = ImageGrab.grab(bbox=(3191, 19, 3440, 39))\n remote_image = screen.convert('RGBA')\n remote_image.save('grabbed.png')\n \n if screen_loc_check(node, remote_image) is True:\n duration = 333\n freq = 880\n winsound.Beep(freq, duration)\n sleep(1)\n "
] | [
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ljocha/DeepEI | [
"96aee49192ac805dda7971041c01e16c62cd3cbc"
] | [
"Scripts/read_msp.py"
] | [
"from DeepEI.utils import ms2vec, get_cdk_fingerprints, get_cdk_descriptors\nfrom matchms.importing import load_from_msp\nimport json\nimport numpy as np\nfrom scipy.sparse import csr_matrix, save_npz\nfrom rdkit import Chem\nfrom rdkit.Chem import AllChem\nfrom rdkit.Chem.rdMolDescriptors import CalcExactMolWt\n\n# incompatible with DeepEI/utils.py\n#from pycdk.pycdk import MolFromSmiles, parser_formula, MolToFormula\n\nfrom concurrent.futures import ProcessPoolExecutor\nimport os\nfrom argparse import ArgumentParser\n\np = ArgumentParser()\np.add_argument('--ncores','-n',type=int,help='number of cores',default=1)\np.add_argument('--dest','-d',type=str,help='destination directory',default='.')\np.add_argument('infile',type=str,help='input file')\n\nargs = p.parse_args()\nfile_msp = args.infile\nncores = args.ncores\ndest = args.dest\n\nif not os.path.isdir(dest):\n\tprint(f\"{dest} does not exist\")\n\texit(1)\n\ndef process_mol(nm):\n\tn,m = nm\n\ttry:\n\t\tosmiles = m.get('smiles')\n\t\tmol = Chem.MolFromSmiles(osmiles)\n\t\tname = m.get('name')\n\t\tpeakindex = m.peaks.mz\n\t\tpeakintensity = m.peaks.intensities\n\n\t\tmolwt = CalcExactMolWt(mol)\n\t\tif molwt > 2000:\n\t\t\treturn {}\n\t\tsmiles = Chem.MolToSmiles(mol)\n# XXX: pycdk\n#\t\telements = parser_formula(MolToFormula(MolFromSmiles(smiles)))\n#\t\tfor e in elements:\n#\t\t\tif e not in ['C', 'H', 'O', 'N', 'S', 'P', 'Si', 'F', 'Cl', 'Br', 'I']:\n#\t\t\t\tprint(f\"{osmiles}: uncommon element {e}, skipping\")\n#\t\t\t\treturn {}\n\t\tmorgan_fp = np.array(AllChem.GetMorganFingerprintAsBitVect(mol, 2, nBits=4096))\n\t\tcdk_fp = get_cdk_fingerprints(smiles)\n\t\tcdk_des = np.array(get_cdk_descriptors(smiles))\n# XXX\n#\t\tri = list(m['RI'].values())\n\t\tpeak_vec = ms2vec(peakindex,peakintensity)\n\n\t\tprint(f\"{n}:{osmiles}: done\")\n\t\treturn { \n\t\t\t'smiles': smiles,\n\t\t\t'name': name,\n\t\t\t'peak_vec': peak_vec,\n#\t\t\t'ri': ri,\n\t\t\t'morgan_fp': morgan_fp,\n\t\t\t'cdk_fp': cdk_fp,\n\t\t\t'cdk_des': cdk_des,\n\t\t\t'molwt': molwt,\n\t\t}\n\texcept BaseException as e:\n\t\tprint(f\"{osmiles}: {e}\")\n\t\treturn {}\n\nprint(f\"Loading {file_msp}...\")\nall_mol = load_from_msp(file_msp)\nprint(\"done\")\n\nwith ProcessPoolExecutor(max_workers=ncores) as pool:\n\tall_output = pool.map(process_mol, enumerate(all_mol))\n\n# filter out empty entries\nall_output = list(filter(lambda x: x,all_output))\n\nall_smiles = list(map(lambda x: x['smiles'], all_output))\nPeak_data = np.array(list(map(lambda x: x['peak_vec'], all_output)))\n# RI_data = map(lambda x: x['smiles'], all_output)\nMorgan_fp = np.array(list(map(lambda x: x['morgan_fp'], all_output)))\nCDK_fp = np.array(list(map(lambda x: x['cdk_fp'], all_output)))\nCDK_des = np.array(list(map(lambda x: x['cdk_des'], all_output)))\nMolWt = np.array(list(map(lambda x: x['molwt'], all_output)))\n\nprint(\"writing output ...\")\nos.chdir(dest)\n\n# np.save('retention.npy', np.array(RI_data))\nnp.save('descriptor.npy', CDK_des)\nnp.save('molwt.npy', MolWt)\n\nPeak_data = csr_matrix(Peak_data)\nMorgan_fp = csr_matrix(Morgan_fp)\nCDK_fp = csr_matrix(CDK_fp)\n\nsave_npz('peakvec.npz', Peak_data)\nsave_npz('morgan.npz', Morgan_fp)\nsave_npz('fingerprints.npz', CDK_fp)\n\nwith open('all_smiles.json', 'w') as t:\n\tjson.dump(all_smiles, t)\n\nprint(\"done\")\n"
] | [
[
"scipy.sparse.csr_matrix",
"scipy.sparse.save_npz",
"numpy.save"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.6",
"1.10",
"1.4",
"1.3",
"1.9",
"0.19",
"1.5",
"1.7",
"1.0",
"1.2",
"1.8"
],
"tensorflow": []
}
] |
wikimedia/research-reader-survey-analysis | [
"fbf4d71eebaf5ac5205713b0271f4ea51ab388f8",
"fbf4d71eebaf5ac5205713b0271f4ea51ab388f8"
] | [
"src/preprocessing/02_extractlogtraces/03_join_responses_with_ip.py",
"src/preprocessing/02_extractlogtraces/08_join_responses_with_requests.py"
] | [
"\"\"\"\nThis script joins:\n * the EventLogging (EL) data based on webrequest beacons (in my experience, most complete / simplest)\n * Google Forms survey responses\n * EditAttemptStep data based on hive tables\n\nThere are two outputs for each language:\n * CSV w/ survey responses + EL details (e.g., datetime, pageID) + webrequest details (e.g., client-IP, user-agent)\n * CSV w/ all approximate userhashes for matching against webrequest logs\n\"\"\"\n\nimport argparse\nimport csv\nimport os\n\nfrom geopy.distance import distance\nimport pandas as pd\n\n# hacky way to make sure utils is visible\nimport sys\nsys.path.append(os.path.abspath(os.path.abspath(os.path.dirname(__file__)) + '/../../..'))\n\nfrom src.utils import config\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--el_logs_fn\",\n default=config.quicksurvey_el_tsv,\n help=\"TSV with EventLogging data\")\n parser.add_argument(\"--survey_req_fn\",\n default=config.quicksurvey_requests_tsv,\n help=\"TSV with survey webrequests.\")\n parser.add_argument(\"--editattempt_fn\",\n default=config.edit_el_tsv,\n help=\"TSV filename for edit attempt data\")\n parser.add_argument(\"--ids_dir\",\n default=config.ids_folder,\n help=\"Folder to store survey respondent UserIDs\")\n parser.add_argument(\"--languages\",\n default=config.languages,\n nargs=\"*\",\n help=\"List of languages to process\")\n parser.add_argument(\"--responses_dir\",\n default=config.responses_folder,\n help=\"Folder to hold survey responses + associated webrequest\")\n parser.add_argument(\"--dist_threshold\",\n default=config.ip_dist_threshold,\n help=\"Max distance in km between Geonames point and IP point for match.\")\n parser.add_argument(\"--geonames_tsv\",\n default=config.geonames_tsv,\n help=\"Geonames TSV file w/ place and population information.\")\n\n args = parser.parse_args()\n\n requests = pd.read_csv(args.survey_req_fn, sep=\"\\t\")\n print(\"{0} total requests.\".format(len(requests)))\n\n requests.drop_duplicates(inplace=True)\n requests.sort_values(by=['response_type'], ascending=False, inplace=True)\n requests.set_index('pageview_token', inplace=True)\n print(\"{0} requests from {1} unique users after removing duplicates.\".format(len(requests),\n len(requests['userhash'].unique())))\n\n map_ip_to_population(requests, args.geonames_tsv, args.dist_threshold)\n\n# edit_attempts = pd.read_csv(args.editattempt_fn, sep=\"\\t\")\n# print(\"{0} edit actions across {1} users.\".format(len(edit_attempts), len(edit_attempts['userhash'].unique())))\n# edit_attempts = edit_attempts.groupby('userhash').apply(group_edit_actions)\n\n if not os.path.isdir(args.ids_dir):\n print(\"Creating directory: {0}\".format(os.path.abspath(args.ids_dir)))\n os.mkdir(args.ids_dir)\n\n if not os.path.isdir(args.responses_dir):\n print(\"Creating directory: {0}\".format(os.path.abspath(args.responses_dir)))\n os.mkdir(args.responses_dir)\n\n all_ids = []\n for lang in args.languages:\n recoded_fn = os.path.join(config.data_folder, \"recoded\", \"responses_{0}_recoded.csv\".format(lang))\n surv_responses = pd.read_csv(recoded_fn, sep = '\\t')\n surv_responses.set_index('survey_id', inplace=True)\n print(\"**********\")\n print(\"Google Responses in {0}: {1}\".format(lang, len(surv_responses)))\n\n # merge in quicksurveys eventlogging -- priority to yes to take survey, no to take survey, initiation\n srv_el_req = pd.merge(surv_responses, requests, how=\"left\", left_index=True, right_index=True)\n srv_el_req = srv_el_req[~srv_el_req.index.duplicated(keep='first')]\n print(\"Breakdown of ability to match up Google responses with EL: (w/o initiation)\")\n print(srv_el_req['response_type'].value_counts(dropna=False))\n print(\"Breakdown of ability to match up Google responses with EL (w/ initiation):\")\n print(srv_el_req['country'].value_counts(dropna=False))\n\n # merge in edit attempt data\n# srv_el_req = srv_el_req.join(edit_attempts, how=\"left\", on=\"userhash\")\n# print(\"Responses w/ associated edit data (is anon):\")\n# print(srv_el_req['is_anon'].value_counts(dropna=False))\n\n # Write responses+EL+webrequest data to TSV\n output_merged_data = os.path.join(args.responses_dir, \"responses_with_el_{0}.csv\".format(lang))\n srv_el_req.to_csv(output_merged_data, sep='\\t')\n\n # Write userIDs associated with completed surveys to file\n output_respondent_ids = os.path.join(args.ids_dir, \"ids_{0}.csv\".format(lang))\n ids = srv_el_req[\"userhash\"]\n ids = ids.dropna()\n ids.to_csv(output_respondent_ids, index=False, header=False)\n print(\"Complete IDs:\", len(ids))\n\n all_ids.extend(list(ids.values))\n\n if all_ids:\n with open(config.all_ids_csv, 'w') as fout:\n csvwriter = csv.writer(fout)\n for ip_ua in all_ids:\n csvwriter.writerow([ip_ua])\n\n\ndef group_edit_actions(user_data):\n is_anon = any(user_data['anon'])\n edit_count = user_data['user_edit'].value_counts().index[0]\n editor_interface = user_data['editor_interface'].value_counts().index[0]\n return pd.Series({'is_anon': is_anon,\n 'edit_count': edit_count,\n 'editor_interface':editor_interface})\n\n\ndef map_ip_to_population(df, geonames_tsv, dist_threshold):\n print(\"Loading geonames lookup\")\n geonames = get_geonames_map(geonames_tsv)\n print(\"Calculating populations\")\n df['population'] = df.apply(lambda x: lookup_row(x, geonames, dist_threshold=dist_threshold), axis=1)\n print(\"Success rate:\", (df['population'] >= 1).sum() / df['population'].count())\n print(\"Breakdown of matches:\", df['population'].apply(lambda x: 1 if x > 0 else x).value_counts(dropna=False))\n try:\n ipdump_fn = geonames_tsv.replace('.txt', '_ipmatch.tsv')\n df[['city', 'country_code', 'lat', 'lon', 'population']].to_csv(ipdump_fn, header=True, index=False, sep='\\t')\n print(\"Dumped IP->population data to:\", ipdump_fn)\n except Exception:\n print(\"Failed to dump IP->population data.\")\n\ndef calc_dist(pt1, pt2):\n return distance(pt1, pt2).kilometers\n\ndef get_geonames_map(allcountries):\n geonames_header = ['geonameid', 'name', 'asciiname', 'alternatenames',\n 'latitude', 'longitude', 'feature class', 'feature code',\n 'country code', 'cc2', 'admin1 code', 'admin2 code', 'admin3 code', 'admin4 code',\n 'population', 'elevation', 'dem', 'timezone', 'modification date']\n country_idx = geonames_header.index('country code')\n pop_idx = geonames_header.index('population')\n lat_idx = geonames_header.index('latitude')\n lon_idx = geonames_header.index('longitude')\n name_idx = geonames_header.index('name')\n altname_idx = geonames_header.index('alternatenames')\n feature_idx = geonames_header.index('feature class')\n\n lookup = {}\n num_countries = 0\n num_places = 0\n num_pops = 0\n nonzero_pops = 0\n duplicates = 0\n with open(allcountries, 'r') as fin:\n tsvreader = csv.reader(fin, delimiter='\\t')\n for line in tsvreader:\n feature = line[feature_idx]\n try:\n population = int(line[pop_idx])\n except ValueError:\n population = -1\n if (feature == 'A' and population >= 0) or feature == 'P':\n pt = (float(line[lat_idx]), float(line[lon_idx]))\n names = [line[name_idx]]\n if line[altname_idx]:\n names.extend(line[altname_idx].split(','))\n country = line[country_idx]\n if country not in lookup:\n num_countries += 1\n lookup[country] = {}\n for n in names:\n if n in lookup[country]:\n if pt in lookup[country][n]:\n existing_pop = lookup[country][n][pt]\n if not population:\n continue\n elif existing_pop == population:\n continue\n elif not existing_pop:\n lookup[country][n][pt] = population\n num_pops += 1\n else:\n duplicates += 1\n else:\n lookup[country][n][pt] = population\n num_places += 1\n if num_places % 500000 == 0:\n print(num_places, \"added.\")\n if population >= 0:\n num_pops += 1\n if population == 0:\n nonzero_pops += 1\n else:\n lookup[country][n] = {pt:population}\n num_places += 1\n if num_places % 500000 == 0:\n print(num_places, \"added.\")\n if population >= 0:\n num_pops += 1\n if population == 0:\n nonzero_pops += 1\n print(\"{0} countries. {1} places. {2} places w/ population. {3} w/ pop 0. {4} duplicates\".format(\n num_countries, num_places, num_pops, nonzero_pops, duplicates))\n # add location-based lookup index for places w/ unknown cities but that still have points\n locs_to_add = {}\n for cc in lookup:\n for n in lookup[cc]:\n for loc in lookup[cc][n]:\n simple_loc = (int(loc[0]), int(loc[1]))\n if simple_loc not in locs_to_add:\n locs_to_add[simple_loc] = set()\n locs_to_add[simple_loc].add((cc, n))\n for l in locs_to_add:\n lookup[l] = locs_to_add[l]\n return lookup\n\ndef lookup_row(x, geonames, dist_threshold):\n country = x['country_code']\n city = x['city']\n pt = (float(x['lat']), float(x['lon']))\n # no city info, use lat-lon as backup\n if city.lower() == \"unknown\":\n return lookup_pt(pt, geonames, dist_threshold)\n # use city to geocode and then lat-lon to filter\n else:\n try:\n candidates = geonames[country][city]\n within_thres = []\n # find all potential place matches\n for cpt, cpop in candidates.items():\n if calc_dist(pt, cpt) < dist_threshold:\n within_thres.append(cpop)\n # return potential match with highest population (arbitrary choice but empirically seems to matter little)\n if within_thres:\n # Success: found a matching place w/i distance threshold\n # Possibilities:\n # >0 == have a real population\n # 0 if geonames listed that\n # -1 population if geonames didn't provide a number\n return max(within_thres)\n else:\n # found a matching name but was not close enough\n backup = lookup_pt(pt, geonames, dist_threshold)\n if backup > 0:\n return backup\n else:\n return -2\n except KeyError:\n # did not find a matching name\n return lookup_pt(pt, geonames, dist_threshold)\n\ndef lookup_pt(pt, geonames, dist_threshold):\n simple_pt = (int(pt[0]), int(pt[1]))\n closest_with_pop = float('inf')\n pop = -3\n for cc, name in geonames.get(simple_pt, []):\n for cpt, cpop in geonames[cc][name]:\n if cpop > 0:\n cand_dist = calc_dist(pt, cpt)\n if cand_dist < dist_threshold and cand_dist < closest_with_pop:\n closest_with_pop = cand_dist\n pop = cpop\n return pop\n\nif __name__ == \"__main__\":\n main()",
"import argparse\nimport datetime\nimport os\nimport pickle\n# hacky way to make sure utils is visible\nimport sys\n\nimport pandas as pd\n\nsys.path.append(os.path.abspath(os.path.abspath(os.path.dirname(__file__)) + '/../../..'))\n\nfrom src.utils import config\nfrom src.utils import read_redirects\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--languages\",\n default=config.languages,\n nargs=\"*\",\n help=\"List of languages to process\")\n parser.add_argument(\"--in_dir_traces\",\n default=config.srvy_req_folder,\n help=\"Folder with webrequest traces\")\n parser.add_argument(\"--in_dir_responses\",\n default=config.responses_folder,\n help=\"Folder with survey responses\")\n parser.add_argument(\"--redirect_dir\",\n default=config.redirect_folder,\n help=\"Folder with Wikipedia redirects\")\n parser.add_argument(\"--out_dir\",\n default=config.srvy_anon_folder,\n help=\"Folder for output joined responses/traces.\")\n args = parser.parse_args()\n\n if not os.path.isdir(args.out_dir):\n print(\"Creating directory: {0}\".format(os.path.abspath(args.out_dir)))\n os.mkdir(args.out_dir)\n\n\n geo_cols = [\"country\", \"timezone\"]\n editattempt_cols = [\"edit_count\", \"editor_interface\", \"is_anon\"]\n columns_to_keep = geo_cols + [\n \"requests\",\n \"submit_timestamp\",\n \">=18\",\n \"raw_information_depth\", \"raw_prior_knowledge\", \"raw_motivation\",\n \"information depth\", \"prior knowledge\", \"motivation\",\n \"age\", \"gender\", \"education\", \"locale\", \"native_lang_1\", \"native_lang_2\",\n \"dt_qsinitialization\",\n \"response_type\",\n \"wiki\",\n \"page_title\",\n \"page_id\",\n \"survey_request\",\n \"survey_dt_utc\",\n \"has_account\", 'attempted_edit']\n\n for lang in args.languages:\n print(\"**************\")\n print(\"* Processing \" + lang)\n print(\"**************\")\n with open(os.path.join(args.in_dir_traces, \"sample_{0}.csv\".format(lang)), \"r\") as f:\n lines = []\n assert next(f).strip().split('\\t') == ['userhash', 'geocoded_data', 'has_account', 'attempted_edit', 'requests']\n for l_count, line in enumerate(f, start=1):\n l = parse_row(line)\n if l is not None:\n lines.append(l)\n if l_count % 10000 == 0:\n print(\"processing line...\", l_count)\n print(\"traces processed: \", l_count)\n\n df_traces = pd.DataFrame(lines)\n print(\"traces kept\", len(df_traces))\n df_traces.drop_duplicates(subset=[\"userhash\"], inplace=True)\n print(\"traces without duplicates\", len(df_traces))\n\n df_responses = pd.read_csv(os.path.join(args.in_dir_responses, \"responses_with_el_{0}.csv\".format(lang)),\n sep=\"\\t\")\n print(\"responses with duplicates:\", len(df_responses))\n df_responses = df_responses[~df_responses['userhash'].isnull()]\n print(\"responses after removing null userhashes (missing EL):\", len(df_responses))\n df_responses.drop_duplicates(subset=[\"userhash\"], inplace=True)\n print(\"responses after removing remaining duplicates:\", len(df_responses))\n\n\n df_merged = pd.merge(df_traces, df_responses, left_on=[\"userhash\"],\n right_on=[\"userhash\"], how=\"inner\")\n print(\"Users in merged dataframe of survey responses and webrequest traces:\", len(df_merged))\n\n df_merged['requests'] = df_merged['requests'].apply(parse_requests_ts_and_redirects,\n d=read_redirects(lang, args.redirect_dir))\n df_merged['survey_request'] = df_merged.apply(extract_survey_request, lang=lang, axis=1)\n df_merged['wiki'] = df_merged.apply(lambda x: x['survey_request'].get('uri_host', lang), axis=1)\n df_merged['survey_dt_utc'] = df_merged['survey_request'].apply(lambda x: x.get('ts', None))\n print(\"Users w/o survey request identified:\", df_merged[df_merged['survey_dt_utc'].isnull()]['userhash'])\n df_merged.dropna(subset=[\"survey_dt_utc\"], inplace=True)\n print(\"After removing non-existing survey requests: \", len(df_merged))\n df_merged['page_title'] = df_merged['survey_request'].apply(lambda x: x['title'])\n df_merged['page_id'] = df_merged['survey_request'].apply(lambda x: x['page_id'])\n df_merged = df_merged.reset_index(drop=True)\n unmatched_countries = df_merged[\n df_merged['geocoded_data'].apply(lambda x: x['country']) != df_merged['country']]\n if len(unmatched_countries) > 0:\n print(\"Unmatched countries:\", unmatched_countries)\n\n print(\"Anonymizing survey...\")\n for geo_col in geo_cols:\n df_merged[geo_col] = df_merged['geocoded_data'].apply(lambda x: x.get(geo_col, None))\n df_merged = df_merged[columns_to_keep]\n pickle.dump(df_merged,\n open(os.path.join(args.out_dir, \"joined_responses_and_traces_anon_{0}.p\".format(lang)), \"wb\"))\n df_merged.to_csv(os.path.join(args.out_dir, \"joined_responses_and_traces_anon_{0}.csv\".format(lang)),\n index=False)\n print(\"finished\")\n\n\ndef parse_row(line):\n row = line.strip().split('\\t')\n if len(row) != 5:\n return None\n\n d = {'userhash': row[0],\n 'geocoded_data': eval(row[1]),\n 'has_account': bool(int(row[2])),\n 'attempted_edit': bool(int(row[3])),\n 'requests': parse_requests(row[4])\n }\n if d['requests'] is None:\n return None\n return d\n\n\ndef parse_requests(requests):\n ret = []\n for r in requests.split(config.request_delim):\n t = r.split('|')\n if (len(t) % 2) != 0: # should be list of (name, value) pairs and contain at least userhash,ts,title\n continue\n data_dict = {t[i]: t[i + 1] for i in range(0, len(t), 2)}\n ret.append(data_dict)\n try:\n ret.sort(key=lambda x: x['ts']) # sort by time\n except:\n return None\n\n return ret\n\n\ndef extract_survey_request(l, lang):\n quicksurvey_dt = datetime.datetime.strptime(str(l.dt_qsinitialization), '%Y-%m-%dT%H:%M:%S')\n timestamp_only = False\n if pd.isnull(l.page_id) and pd.isnull(l.page_title):\n timestamp_only = True\n else:\n page_title = str(l.page_title)\n page_id = int(l.page_id)\n if l.requests is not None:\n for req in reversed(l.requests):\n # same lang as survey was deployed\n if req['lang'] == lang:\n # same page title (no redirects) or same page id / lang (reflects redirects)\n pageview_ts = req[\"ts\"]\n if timestamp_only or ((req[\"title\"] == page_title) or (req[\"uri_path\"] == (\"/wiki/\" + page_title)) or (\n int(req['page_id']) == page_id)):\n if pageview_ts <= quicksurvey_dt:\n return req\n# print(\"Not matched: {0}; {1} requests.\".format(l.page_title, len(l.requests)))\n return {}\n\n\ndef parse_requests_ts_and_redirects(requests, d={}):\n if len(requests) == 0:\n return None\n for i, r in enumerate(requests):\n r['userhash'] = i\n r['ts'] = datetime.datetime.strptime(r['ts'], '%Y-%m-%d %H:%M:%S')\n if r['title'] in d:\n r['title'] = d[r['title']]\n return requests\n\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"pandas.merge",
"pandas.read_csv",
"pandas.Series"
],
[
"pandas.isnull",
"pandas.merge",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
gcba/IATos | [
"d42cffea313170bb249edcadb0776f7a6d368654"
] | [
"algos/prediction/transformers.py"
] | [
"import librosa\nimport numpy as np\n\nfrom PIL import Image\nfrom typing import Optional\nfrom sklearn.base import BaseEstimator, TransformerMixin\nfrom matplotlib.cm import ScalarMappable\n\n__all__ = [\n \"Denoising\",\n \"MelSpectogram\",\n \"ColoredSpectogram\",\n]\n\n\nclass BaseTransformer(BaseEstimator, TransformerMixin):\n def __init__(self):\n pass\n\n def fit(self, X, y=None):\n return self\n\n @classmethod\n def read_params(cls, params):\n return cls(**params)\n\n\nclass Denoising(BaseTransformer):\n \"\"\"Placeholder para la capa \"denoising\" actualmente en codigo MATLAB\"\"\"\n\n def transform(self, X: np.array, y: Optional[np.array] = None) -> np.array:\n \"\"\"Codigo aqui\"\"\"\n return X\n\n\nclass MelSpectogram(BaseTransformer):\n \"\"\"Transforma una señal en un espectograma con escala de Mel utilizando librosa\n \n Parameters\n ----------\n\n Los parametros para instanciar son los que se pasan a `librosa.feature.melspectogram`\n y a `librosa.power_to_db`.\n\n Returns\n -------\n\n np.array : Numpy array del espectograma con valores en decibeles.\n\n \"\"\"\n\n def __init__(\n self,\n sr: int,\n n_fft: int,\n hop_length: int,\n n_mels: int,\n fmin: int,\n fmax: int,\n ref: str,\n T: bool,\n as_ratio: bool,\n ):\n self.sr = sr\n self.n_fft = n_fft\n self.hop_length = hop_length\n self.n_mels = n_mels\n self.fmin = fmin\n self.fmax = fmax\n self.ref = ref\n self.T = T\n self.as_ratio = as_ratio\n\n def transform(self, X: np.array, y: Optional[np.array] = None) -> np.array:\n X_ = self._mel_spec(X)\n if self.T: # backward compatibility\n X_ = X_.T\n return librosa.power_to_db(X_, ref=getattr(np, self.ref))\n\n def _mel_spec(self, X: np.array) -> np.array:\n hop = self.hop_length\n if self.as_ratio: # backward compatibility\n hop = X.size // self.hop_length\n return librosa.feature.melspectrogram(\n y=X, sr=self.sr, hop_length=hop, n_mels=self.n_mels\n )\n\n\nclass ColoredSpectogram(BaseTransformer):\n \"\"\"Transforma una matriz de valores a una imagen con escala de colores.\n \n Parameters\n ----------\n cmap : str\n Escala de colores accesible desde `matplotlib.cm.get_cmap`.\n\n Returns\n -------\n PIL.Image : Imagen en modo RGB.\n\n \"\"\"\n\n def __init__(self, cmap: str):\n self.cmap = cmap\n\n def transform(self, X: np.array, y: Optional[np.array] = None) -> Image:\n X_ = ScalarMappable(cmap=self.cmap).to_rgba(X, bytes=True)\n return Image.fromarray(X_).convert(\"RGB\")\n"
] | [
[
"matplotlib.cm.ScalarMappable"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jacopok/notes | [
"805ebe1be49bbd14c6b46b24055f9fc7d1cd2586",
"805ebe1be49bbd14c6b46b24055f9fc7d1cd2586",
"805ebe1be49bbd14c6b46b24055f9fc7d1cd2586"
] | [
"ap_third_semester/compact_objects/figures/roche-lobe-radius.py",
"phd_courses/theoretical_low_energy_astroparticle/figures/make_all_figures.py",
"ap_second_semester/gravitational_physics/figures/binary_angular_GW_spectrum.py"
] | [
"#%%\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom astropy.visualization import astropy_mpl_style\nplt.style.use(astropy_mpl_style)\nfrom matplotlib import rc\nrc('font',**{'family':'serif','serif':['Palatino']})\nrc('text', usetex=True)\nrc('text.latex', preamble=r'''\\usepackage{amsmath}\n \\usepackage{physics}\n \\usepackage{siunitx}\n ''')\nTHR = .5\nWIDTH = 0\n\n# def weight(q):\n# if WIDTH>0:\n# offset = 1/2 - THR / WIDTH\n# return (np.piecewise(q, \n# condlist=[\n# q < THR - WIDTH / 2,\n# q > THR - WIDTH / 2 and q < THR + WIDTH / 2 ,\n# q > THR + WIDTH / 2,\n# ],\n# funclist=[\n# 0,\n# lambda x: x / WIDTH + offset,\n# 1\n# ]\n# ))\n# else:\n# return (np.piecewise(q,\n# condlist=[q < THR, q >= THR],\n# funclist=[0, 1]\n# ))\n\ndef f1(q):\n return (.46224 * (q / (1 + q))**(1 / 3))\n \ndef f2(q):\n return (.38 + .2 * np.log10(q))\n\n\ndef f(q):\n if q < 0.5:\n return (f1(q))\n else:\n return(f2(q))\n\nf = np.vectorize(f, signature='()->()')\n\n#%% \n\nqs = np.linspace(0, 8, num=1000)\n\nf_q = f(qs)\n\n# plt.plot(qs, f(qs))\n# plt.xlabel('$q = M_2 / M_1$')\n# plt.ylabel('$R_{\\\\text{{lobe}}} / a$')\n# plt.savefig('roche-lobe-radius.pdf', format = 'pdf')\n\n#%%\n\ndef a(q):\n return((1+q)**4 / q**2)\n\na_q = a(qs)\n\nplt.plot(qs, np.abs(np.gradient(f_q, qs) / f_q), label='$\\\\abs{\\\\Delta \\\\log f}$')\nplt.plot(qs, np.abs(np.gradient(a_q, qs) / a_q), label='$\\\\abs{\\\\Delta \\\\log a}$')\nplt.plot(qs, np.gradient(a_q, qs) / a_q + np.gradient(f_q, qs) / f_q, label='$\\\\Delta \\\\log a + \\\\Delta \\\\log f$', ls='--')\nplt.axvline(1, label='$q = 1$', ls=':', c='black')\n\n\nplt.xlabel('$q = M_2 / M_1$')\nplt.ylabel('relative variation')\nplt.legend()\nplt.yscale('log')\nplt.savefig('roche-lobe-relative-corrections.pdf')\nplt.show()\n\n#%%\n\nqs = np.linspace(0, 5/4, num=200)\n\ndef circ(q):\n return((.5 - .227 * np.log10(q))**4 * (1+q))\n\nplt.plot(qs, f(1 / qs), label='Roche Lobe radius')\nplt.plot(qs, circ(qs), label='Circularization radius')\nplt.xlabel('$q$')\nplt.ylim(0,1)\nplt.legend()\nplt.savefig('roche-vs-circularization.pdf')\n# %%\n",
"from tqdm import tqdm\nimport matplotlib.pyplot as plt\nfrom matplotlib import rc\nrc('font',**{'family':'serif','serif':['Palatino']})\nrc('text', usetex=True)\nrc('text.latex', preamble=r'''\\usepackage{amsmath}\n\\usepackage{physics}\n\\usepackage{siunitx}\n''')\nrc('figure', dpi=150)\n\n\ndef plot_and_save(plotting_func):\n plotting_func()\n plt.savefig(str(plotting_func.__name__).split(sep='.')[0] + '.pdf', bbox_inches='tight', pad_inches = 0)\n plt.close()\n\n\nif __name__ == \"__main__\":\n plotter_list = []\n for plotting_func in tqdm(plotter_list):\n plot_and_save(plotting_func)",
"import numpy as np\nimport matplotlib.pyplot as plt\nfrom astropy.visualization import astropy_mpl_style\nplt.style.use(astropy_mpl_style)\nimport matplotlib.animation as animation\nfrom matplotlib import rc\nrc('font',**{'family':'serif','serif':['Palatino']})\nrc('text', usetex=True)\nrc('text.latex', preamble=r'''\\usepackage{amsmath}\n \\usepackage{physics}\n \\usepackage{siunitx}\n ''')\n\nNMAX = 200\n\nthetas = np.linspace(0, 2 * np.pi, num=1000)\n\nplus_polarization_amplitude = (1 + np.cos(thetas)**2)/2\ncross_polarization_amplitude = np.cos(thetas)\n\nfig = plt.figure()\nax = fig.add_subplot(projection='polar')\nax.set_theta_zero_location(\"N\")\n# line, = ax.plot([], [])\n\ndef init():\n ax.set_ylim((0,1.1))\n line.set_data([], [])\n return (line,)\n\ndef total_amplitude(time, Nmax=NMAX):\n t = np.pi * time / Nmax\n amplitude = (((plus_polarization_amplitude * np.sin(t))** 2 + (cross_polarization_amplitude * np.cos(t))** 2))\n \n line.set_data(thetas, amplitude)\n\ndef averaged_amplitude():\n amplitude = (((plus_polarization_amplitude)** 2 + (cross_polarization_amplitude)** 2))\n return(amplitude)\n\n# anim = animation.FuncAnimation(fig, total_amplitude, range(NMAX), init_func=init, interval=50)\n\n# anim.save('angular_spectrum_no_sin.gif', writer='imagemagick', fps=60, dpi=200)\n\nax.plot(thetas, averaged_amplitude())\nax.set_title('$r(\\\\theta) \\\\propto \\\\dv{E}{\\\\Omega}$')\n\nplt.show(block=False)\n"
] | [
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.axvline",
"numpy.linspace",
"numpy.gradient",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.yscale",
"matplotlib.pyplot.savefig",
"numpy.vectorize",
"numpy.log10",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"matplotlib.pyplot.style.use",
"matplotlib.rc",
"matplotlib.pyplot.ylabel"
],
[
"matplotlib.pyplot.close",
"matplotlib.rc"
],
[
"numpy.linspace",
"numpy.cos",
"numpy.sin",
"matplotlib.pyplot.show",
"matplotlib.pyplot.style.use",
"matplotlib.rc",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
deepbluesea/transformers | [
"11a2317986aad6e9a72f542e31344cfb7c94cbab",
"11a2317986aad6e9a72f542e31344cfb7c94cbab"
] | [
"examples/distillation/distiller.py",
"transformers/tests/modeling_transfo_xl_test.py"
] | [
"# coding=utf-8\n# Copyright 2019-present, the HuggingFace Inc. team and Facebook, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\" The distiller to distil the student.\n Adapted in part from Facebook, Inc XLM model (https://github.com/facebookresearch/XLM)\n\"\"\"\nimport os\nimport math\nimport psutil\nimport time\nfrom tensorboardX import SummaryWriter\nfrom tqdm import trange, tqdm\nimport numpy as np\nimport psutil\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.optim import AdamW\nfrom torch.utils.data.distributed import DistributedSampler\nfrom torch.utils.data import RandomSampler, BatchSampler, DataLoader\n\nfrom transformers import WarmupLinearSchedule\n\nfrom utils import logger\nfrom lm_seqs_dataset import LmSeqsDataset\nfrom grouped_batch_sampler import GroupedBatchSampler, create_lengths_groups\n\nclass Distiller:\n def __init__(self,\n params: dict,\n dataset: LmSeqsDataset,\n token_probs: torch.tensor,\n student: nn.Module,\n teacher: nn.Module):\n logger.info('Initializing Distiller')\n self.params = params\n self.dump_path = params.dump_path\n self.multi_gpu = params.multi_gpu\n self.fp16 = params.fp16\n\n self.student = student\n self.teacher = teacher\n\n self.student_config = student.config\n self.vocab_size = student.config.vocab_size\n\n if params.n_gpu <= 1:\n sampler = RandomSampler(dataset)\n else:\n sampler = DistributedSampler(dataset)\n\n if params.group_by_size:\n groups = create_lengths_groups(lengths=dataset.lengths, k=params.max_model_input_size)\n sampler = GroupedBatchSampler(sampler=sampler, group_ids=groups, batch_size=params.batch_size)\n else:\n sampler = BatchSampler(sampler=sampler, batch_size=params.batch_size, drop_last=False)\n\n self.dataloader = DataLoader(dataset=dataset,\n batch_sampler=sampler,\n collate_fn=dataset.batch_sequences)\n\n self.temperature = params.temperature\n assert self.temperature > 0.\n\n self.alpha_ce = params.alpha_ce\n self.alpha_mlm = params.alpha_mlm\n self.alpha_clm = params.alpha_clm\n self.alpha_mse = params.alpha_mse\n self.alpha_cos = params.alpha_cos\n\n self.mlm = params.mlm\n if self.mlm:\n logger.info(f'Using MLM loss for LM step.')\n self.mlm_mask_prop = params.mlm_mask_prop\n assert 0.0 <= self.mlm_mask_prop <= 1.0\n assert params.word_mask + params.word_keep + params.word_rand == 1.0\n self.pred_probs = torch.FloatTensor([params.word_mask, params.word_keep, params.word_rand])\n self.pred_probs = self.pred_probs.to(f'cuda:{params.local_rank}') if params.n_gpu > 0 else self.pred_probs\n self.token_probs = token_probs.to(f'cuda:{params.local_rank}') if params.n_gpu > 0 else token_probs\n if self.fp16:\n self.pred_probs = self.pred_probs.half()\n self.token_probs = self.token_probs.half()\n else:\n logger.info(f'Using CLM loss for LM step.')\n\n self.epoch = 0\n self.n_iter = 0\n self.n_total_iter = 0\n self.n_sequences_epoch = 0\n self.total_loss_epoch = 0\n self.last_loss = 0\n self.last_loss_ce = 0\n self.last_loss_mlm = 0\n self.last_loss_clm = 0\n if self.alpha_mse > 0.: self.last_loss_mse = 0\n if self.alpha_cos > 0.: self.last_loss_cos = 0\n self.last_log = 0\n\n self.ce_loss_fct = nn.KLDivLoss(reduction='batchmean')\n self.lm_loss_fct = nn.CrossEntropyLoss(ignore_index=-1)\n if self.alpha_mse > 0.:\n self.mse_loss_fct = nn.MSELoss(reduction='sum')\n if self.alpha_cos > 0.:\n self.cosine_loss_fct = nn.CosineEmbeddingLoss(reduction='mean')\n\n logger.info('--- Initializing model optimizer')\n assert params.gradient_accumulation_steps >= 1\n self.num_steps_epoch = len(self.dataloader)\n num_train_optimization_steps = int(self.num_steps_epoch / params.gradient_accumulation_steps * params.n_epoch) + 1\n\n no_decay = ['bias', 'LayerNorm.weight']\n optimizer_grouped_parameters = [\n {'params': [p for n, p in student.named_parameters() if not any(nd in n for nd in no_decay) and p.requires_grad], 'weight_decay': params.weight_decay},\n {'params': [p for n, p in student.named_parameters() if any(nd in n for nd in no_decay) and p.requires_grad], 'weight_decay': 0.0}\n ]\n logger.info(\"------ Number of trainable parameters (student): %i\" % sum([p.numel() for p in self.student.parameters() if p.requires_grad]))\n logger.info(\"------ Number of parameters (student): %i\" % sum([p.numel() for p in self.student.parameters()]))\n self.optimizer = AdamW(optimizer_grouped_parameters,\n lr=params.learning_rate,\n eps=params.adam_epsilon,\n betas=(0.9, 0.98))\n\n warmup_steps = math.ceil(num_train_optimization_steps * params.warmup_prop)\n self.scheduler = WarmupLinearSchedule(self.optimizer,\n warmup_steps=warmup_steps,\n t_total=num_train_optimization_steps)\n\n if self.fp16:\n try:\n from apex import amp\n except ImportError:\n raise ImportError(\"Please install apex from https://www.github.com/nvidia/apex to use fp16 training.\")\n logger.info(f\"Using fp16 training: {self.params.fp16_opt_level} level\")\n self.student, self.optimizer = amp.initialize(self.student,\n self.optimizer,\n opt_level=self.params.fp16_opt_level)\n self.teacher = self.teacher.half()\n\n if self.multi_gpu:\n if self.fp16:\n from apex.parallel import DistributedDataParallel\n logger.info(\"Using apex.parallel.DistributedDataParallel for distributed training.\")\n self.student = DistributedDataParallel(self.student)\n else:\n from torch.nn.parallel import DistributedDataParallel\n logger.info(\"Using nn.parallel.DistributedDataParallel for distributed training.\")\n self.student = DistributedDataParallel(self.student,\n device_ids=[params.local_rank],\n output_device=params.local_rank,\n find_unused_parameters=True)\n\n self.is_master = params.is_master\n if self.is_master:\n logger.info('--- Initializing Tensorboard')\n self.tensorboard = SummaryWriter(log_dir=os.path.join(self.dump_path, 'log', 'train'))\n self.tensorboard.add_text(tag='config/training', text_string=str(self.params), global_step=0)\n self.tensorboard.add_text(tag='config/student', text_string=str(self.student_config), global_step=0)\n\n def prepare_batch_mlm(self,\n batch):\n \"\"\"\n Prepare the batch: from the token_ids and the lenghts, compute the attention mask and the masked label for MLM.\n\n Input:\n ------\n batch: `Tuple`\n token_ids: `torch.tensor(bs, seq_length)` - The token ids for each of the sequence. It is padded.\n lengths: `torch.tensor(bs)` - The lengths of each of the sequences in the batch.\n\n Output:\n -------\n token_ids: `torch.tensor(bs, seq_length)` - The token ids after the modifications for MLM.\n attn_mask: `torch.tensor(bs, seq_length)` - The attention mask for the self-attention.\n mlm_labels: `torch.tensor(bs, seq_length)` - The masked languge modeling labels. There is a -1 where there is nothing to predict.\n \"\"\"\n token_ids, lengths = batch\n token_ids, lengths = self.round_batch(x=token_ids, lengths=lengths)\n assert token_ids.size(0) == lengths.size(0)\n\n attn_mask = (torch.arange(token_ids.size(1), dtype=torch.long, device=lengths.device) < lengths[:, None])\n\n bs, max_seq_len = token_ids.size()\n mlm_labels = token_ids.new(token_ids.size()).copy_(token_ids)\n\n x_prob = self.token_probs[token_ids.flatten()]\n n_tgt = math.ceil(self.mlm_mask_prop * lengths.sum().item())\n tgt_ids = torch.multinomial(x_prob / x_prob.sum(), n_tgt, replacement=False)\n pred_mask = torch.zeros(bs * max_seq_len, dtype=torch.bool, device=token_ids.device) # previously `dtype=torch.uint8`, cf pytorch 1.2.0 compatibility\n pred_mask[tgt_ids] = 1\n pred_mask = pred_mask.view(bs, max_seq_len)\n\n pred_mask[token_ids == self.params.special_tok_ids['pad_token']] = 0\n\n # mask a number of words == 0 [8] (faster with fp16)\n if self.fp16:\n n1 = pred_mask.sum().item()\n if n1 > 8:\n pred_mask = pred_mask.view(-1)\n n2 = max(n1 % 8, 8 * (n1 // 8))\n if n2 != n1:\n pred_mask[torch.nonzero(pred_mask).view(-1)[:n1-n2]] = 0\n pred_mask = pred_mask.view(bs, max_seq_len)\n assert pred_mask.sum().item() % 8 == 0, pred_mask.sum().item()\n\n _token_ids_real = token_ids[pred_mask]\n _token_ids_rand = _token_ids_real.clone().random_(self.vocab_size)\n _token_ids_mask = _token_ids_real.clone().fill_(self.params.special_tok_ids['mask_token'])\n probs = torch.multinomial(self.pred_probs, len(_token_ids_real), replacement=True)\n _token_ids = _token_ids_mask * (probs == 0).long() + _token_ids_real * (probs == 1).long() + _token_ids_rand * (probs == 2).long()\n token_ids = token_ids.masked_scatter(pred_mask, _token_ids)\n\n mlm_labels[~pred_mask] = -1 # previously `mlm_labels[1-pred_mask] = -1`, cf pytorch 1.2.0 compatibility\n\n # sanity checks\n assert 0 <= token_ids.min() <= token_ids.max() < self.vocab_size\n\n return token_ids, attn_mask, mlm_labels\n\n def prepare_batch_clm(self,\n batch):\n \"\"\"\n Prepare the batch: from the token_ids and the lenghts, compute the attention mask and the labels for CLM.\n\n Input:\n ------\n batch: `Tuple`\n token_ids: `torch.tensor(bs, seq_length)` - The token ids for each of the sequence. It is padded.\n lengths: `torch.tensor(bs)` - The lengths of each of the sequences in the batch.\n\n Output:\n -------\n token_ids: `torch.tensor(bs, seq_length)` - The token ids after the modifications for MLM.\n attn_mask: `torch.tensor(bs, seq_length)` - The attention mask for the self-attention.\n clm_labels: `torch.tensor(bs, seq_length)` - The causal languge modeling labels. There is a -1 where there is nothing to predict.\n \"\"\"\n token_ids, lengths = batch\n token_ids, lengths = self.round_batch(x=token_ids, lengths=lengths)\n assert token_ids.size(0) == lengths.size(0)\n\n attn_mask = (torch.arange(token_ids.size(1), dtype=torch.long, device=lengths.device) < lengths[:, None])\n clm_labels = token_ids.new(token_ids.size()).copy_(token_ids)\n clm_labels[~attn_mask] = -1 # previously `clm_labels[1-attn_mask] = -1`, cf pytorch 1.2.0 compatibility\n\n # sanity checks\n assert 0 <= token_ids.min() <= token_ids.max() < self.vocab_size\n\n return token_ids, attn_mask, clm_labels\n\n def round_batch(self,\n x: torch.tensor,\n lengths: torch.tensor):\n \"\"\"\n For float16 only.\n Sub-sample sentences in a batch, and add padding, so that each dimension is a multiple of 8.\n\n Input:\n ------\n x: `torch.tensor(bs, seq_length)` - The token ids.\n lengths: `torch.tensor(bs, seq_length)` - The lengths of each of the sequence in the batch.\n\n Output:\n -------\n x: `torch.tensor(new_bs, new_seq_length)` - The updated token ids.\n lengths: `torch.tensor(new_bs, new_seq_length)` - The updated lengths.\n \"\"\"\n if not self.fp16 or len(lengths) < 8:\n return x, lengths\n\n # number of sentences == 0 [8]\n bs1 = len(lengths)\n bs2 = 8 * (bs1 // 8)\n assert bs2 > 0 and bs2 % 8 == 0\n if bs1 != bs2:\n idx = torch.randperm(bs1)[:bs2]\n lengths = lengths[idx]\n slen = lengths.max().item()\n x = x[idx, :slen]\n else:\n idx = None\n\n # sequence length == 0 [8]\n ml1 = x.size(1)\n if ml1 % 8 != 0:\n pad = 8 - (ml1 % 8)\n ml2 = ml1 + pad\n if self.mlm:\n pad_id = self.params.special_tok_ids['pad_token']\n else:\n pad_id = self.params.special_tok_ids['unk_token']\n padding_tensor = torch.zeros(bs2, pad, dtype=torch.long, device=x.device).fill_(pad_id)\n x = torch.cat([x, padding_tensor], 1)\n assert x.size() == (bs2, ml2)\n\n assert x.size(0) % 8 == 0\n assert x.size(1) % 8 == 0\n return x, lengths\n\n def train(self):\n \"\"\"\n The real training loop.\n \"\"\"\n if self.is_master: logger.info('Starting training')\n self.last_log = time.time()\n self.student.train()\n self.teacher.eval()\n\n for _ in range(self.params.n_epoch):\n if self.is_master: logger.info(f'--- Starting epoch {self.epoch}/{self.params.n_epoch-1}')\n if self.multi_gpu:\n torch.distributed.barrier()\n\n iter_bar = tqdm(self.dataloader, desc=\"-Iter\", disable=self.params.local_rank not in [-1, 0])\n for batch in iter_bar:\n if self.params.n_gpu > 0:\n batch = tuple(t.to(f'cuda:{self.params.local_rank}') for t in batch)\n\n if self.mlm:\n token_ids, attn_mask, lm_labels = self.prepare_batch_mlm(batch=batch)\n else:\n token_ids, attn_mask, lm_labels = self.prepare_batch_clm(batch=batch)\n self.step(input_ids=token_ids, attention_mask=attn_mask, lm_labels=lm_labels)\n\n iter_bar.update()\n iter_bar.set_postfix({'Last_loss': f'{self.last_loss:.2f}',\n 'Avg_cum_loss': f'{self.total_loss_epoch/self.n_iter:.2f}'})\n iter_bar.close()\n\n if self.is_master: logger.info(f'--- Ending epoch {self.epoch}/{self.params.n_epoch-1}')\n self.end_epoch()\n\n if self.is_master:\n logger.info(f'Save very last checkpoint as `pytorch_model.bin`.')\n self.save_checkpoint(checkpoint_name=f'pytorch_model.bin')\n logger.info('Training is finished')\n\n def step(self,\n input_ids: torch.tensor,\n attention_mask: torch.tensor,\n lm_labels: torch.tensor):\n \"\"\"\n One optimization step: forward of student AND teacher, backward on the loss (for gradient accumulation),\n and possibly a parameter update (depending on the gradient accumulation).\n\n Input:\n ------\n input_ids: `torch.tensor(bs, seq_length)` - The token ids.\n attention_mask: `torch.tensor(bs, seq_length)` - The attention mask for self attention.\n lm_labels: `torch.tensor(bs, seq_length)` - The language modeling labels (mlm labels for MLM and clm labels for CLM).\n \"\"\"\n if self.mlm:\n s_logits, s_hidden_states = self.student(input_ids=input_ids, attention_mask=attention_mask) # (bs, seq_length, voc_size)\n with torch.no_grad():\n t_logits, t_hidden_states = self.teacher(input_ids=input_ids, attention_mask=attention_mask) # (bs, seq_length, voc_size)\n else:\n s_logits, _, s_hidden_states = self.student(input_ids=input_ids, attention_mask=None) # (bs, seq_length, voc_size)\n with torch.no_grad():\n t_logits, _, t_hidden_states = self.teacher(input_ids=input_ids, attention_mask=None) # (bs, seq_length, voc_size)\n assert s_logits.size() == t_logits.size()\n\n #https://github.com/peterliht/knowledge-distillation-pytorch/blob/master/model/net.py#L100\n #https://github.com/peterliht/knowledge-distillation-pytorch/issues/2\n if self.params.restrict_ce_to_mask:\n mask = (lm_labels>-1).unsqueeze(-1).expand_as(s_logits) # (bs, seq_lenth, voc_size)\n else:\n mask = attention_mask.unsqueeze(-1).expand_as(s_logits) # (bs, seq_lenth, voc_size)\n s_logits_slct = torch.masked_select(s_logits, mask) # (bs * seq_length * voc_size) modulo the 1s in mask\n s_logits_slct = s_logits_slct.view(-1, s_logits.size(-1)) # (bs * seq_length, voc_size) modulo the 1s in mask\n t_logits_slct = torch.masked_select(t_logits, mask) # (bs * seq_length * voc_size) modulo the 1s in mask\n t_logits_slct = t_logits_slct.view(-1, s_logits.size(-1)) # (bs * seq_length, voc_size) modulo the 1s in mask\n assert t_logits_slct.size() == s_logits_slct.size()\n\n loss_ce = self.ce_loss_fct(F.log_softmax(s_logits_slct/self.temperature, dim=-1),\n F.softmax(t_logits_slct/self.temperature, dim=-1)) * (self.temperature)**2\n loss = self.alpha_ce*loss_ce\n\n if self.alpha_mlm > 0.:\n loss_mlm = self.lm_loss_fct(s_logits.view(-1, s_logits.size(-1)), lm_labels.view(-1))\n loss += self.alpha_mlm * loss_mlm\n if self.alpha_clm > 0.:\n shift_logits = s_logits[..., :-1, :].contiguous()\n shift_labels = lm_labels[..., 1:].contiguous()\n loss_clm = self.lm_loss_fct(shift_logits.view(-1, shift_logits.size(-1)),\n shift_labels.view(-1))\n loss += self.alpha_clm * loss_clm\n\n if self.alpha_mse > 0.:\n loss_mse = self.mse_loss_fct(s_logits_slct, t_logits_slct)/s_logits_slct.size(0) # Reproducing batchmean reduction\n loss += self.alpha_mse * loss_mse\n if self.alpha_cos > 0.:\n s_hidden_states = s_hidden_states[-1] # (bs, seq_length, dim)\n t_hidden_states = t_hidden_states[-1] # (bs, seq_length, dim)\n mask = attention_mask.unsqueeze(-1).expand_as(s_hidden_states) # (bs, seq_length, dim)\n assert s_hidden_states.size() == t_hidden_states.size()\n dim = s_hidden_states.size(-1)\n \n s_hidden_states_slct = torch.masked_select(s_hidden_states, mask) # (bs * seq_length * dim)\n s_hidden_states_slct = s_hidden_states_slct.view(-1, dim) # (bs * seq_length, dim)\n t_hidden_states_slct = torch.masked_select(t_hidden_states, mask) # (bs * seq_length * dim)\n t_hidden_states_slct = t_hidden_states_slct.view(-1, dim) # (bs * seq_length, dim)\n \n target = s_hidden_states_slct.new(s_hidden_states_slct.size(0)).fill_(1) # (bs * seq_length,)\n loss_cos = self.cosine_loss_fct(s_hidden_states_slct, t_hidden_states_slct, target)\n loss += self.alpha_cos * loss_cos\n\n self.total_loss_epoch += loss.item()\n self.last_loss = loss.item()\n self.last_loss_ce = loss_ce.item()\n if self.alpha_mlm > 0.:\n self.last_loss_mlm = loss_mlm.item()\n if self.alpha_clm > 0.:\n self.last_loss_clm = loss_clm.item()\n if self.alpha_mse > 0.:\n self.last_loss_mse = loss_mse.item()\n if self.alpha_cos > 0.:\n self.last_loss_cos = loss_cos.item()\n\n self.optimize(loss)\n\n self.n_sequences_epoch += input_ids.size(0)\n\n def optimize(self,\n loss):\n \"\"\"\n Normalization on the loss (gradient accumulation or distributed training), followed by\n backward pass on the loss, possibly followed by a parameter update (depending on the gradient accumulation).\n Also update the metrics for tensorboard.\n \"\"\"\n # Check for NaN\n if (loss != loss).data.any():\n logger.error('NaN detected')\n exit()\n\n if self.multi_gpu:\n loss = loss.mean()\n if self.params.gradient_accumulation_steps > 1:\n loss = loss / self.params.gradient_accumulation_steps\n\n if self.fp16:\n from apex import amp\n with amp.scale_loss(loss, self.optimizer) as scaled_loss:\n scaled_loss.backward()\n else:\n loss.backward()\n\n self.iter()\n if self.n_iter % self.params.gradient_accumulation_steps == 0:\n if self.fp16:\n torch.nn.utils.clip_grad_norm_(amp.master_params(self.optimizer), self.params.max_grad_norm)\n else:\n torch.nn.utils.clip_grad_norm_(self.student.parameters(), self.params.max_grad_norm)\n self.optimizer.step()\n self.optimizer.zero_grad()\n self.scheduler.step()\n\n def iter(self):\n \"\"\"\n Update global counts, write to tensorboard and save checkpoint.\n \"\"\"\n self.n_iter += 1\n self.n_total_iter += 1\n\n if self.n_total_iter % self.params.log_interval == 0:\n self.log_tensorboard()\n self.last_log = time.time()\n if self.n_total_iter % self.params.checkpoint_interval == 0:\n self.save_checkpoint()\n\n def log_tensorboard(self):\n \"\"\"\n Log into tensorboard. Only by the master process.\n \"\"\"\n if not self.is_master:\n return\n\n for param_name, param in self.student.named_parameters():\n self.tensorboard.add_scalar(tag='parameter_mean/' + param_name, scalar_value=param.data.mean(), global_step=self.n_total_iter)\n self.tensorboard.add_scalar(tag='parameter_std/' + param_name, scalar_value=param.data.std(), global_step=self.n_total_iter)\n if param.grad is None:\n continue\n self.tensorboard.add_scalar(tag=\"grad_mean/\" + param_name, scalar_value=param.grad.data.mean(),global_step=self.n_total_iter)\n self.tensorboard.add_scalar(tag=\"grad_std/\" + param_name, scalar_value=param.grad.data.std(), global_step=self.n_total_iter)\n\n self.tensorboard.add_scalar(tag=\"losses/cum_avg_loss_epoch\", scalar_value=self.total_loss_epoch/self.n_iter, global_step=self.n_total_iter)\n self.tensorboard.add_scalar(tag=\"losses/loss\", scalar_value=self.last_loss, global_step=self.n_total_iter)\n self.tensorboard.add_scalar(tag=\"losses/loss_ce\", scalar_value=self.last_loss_ce, global_step=self.n_total_iter)\n if self.alpha_mlm > 0.:\n self.tensorboard.add_scalar(tag=\"losses/loss_mlm\", scalar_value=self.last_loss_mlm, global_step=self.n_total_iter)\n if self.alpha_clm > 0.:\n self.tensorboard.add_scalar(tag=\"losses/loss_clm\", scalar_value=self.last_loss_clm, global_step=self.n_total_iter)\n if self.alpha_mse > 0.:\n self.tensorboard.add_scalar(tag=\"losses/loss_mse\", scalar_value=self.last_loss_mse, global_step=self.n_total_iter)\n if self.alpha_cos > 0.:\n self.tensorboard.add_scalar(tag=\"losses/loss_cos\", scalar_value=self.last_loss_cos, global_step=self.n_total_iter)\n self.tensorboard.add_scalar(tag=\"learning_rate/lr\", scalar_value=self.scheduler.get_lr()[0], global_step=self.n_total_iter)\n \n self.tensorboard.add_scalar(tag=\"global/memory_usage\", scalar_value=psutil.virtual_memory()._asdict()['used']/1_000_000, global_step=self.n_total_iter)\n self.tensorboard.add_scalar(tag=\"global/speed\", scalar_value=time.time()-self.last_log, global_step=self.n_total_iter)\n\n def end_epoch(self):\n \"\"\"\n Finally arrived at the end of epoch (full pass on dataset).\n Do some tensorboard logging and checkpoint saving.\n \"\"\"\n logger.info(f'{self.n_sequences_epoch} sequences have been trained during this epoch.')\n\n if self.is_master:\n self.save_checkpoint(checkpoint_name=f'model_epoch_{self.epoch}.pth')\n self.tensorboard.add_scalar(tag='epoch/loss', scalar_value=self.total_loss_epoch/self.n_iter, global_step=self.epoch)\n\n self.epoch += 1\n self.n_sequences_epoch = 0\n self.n_iter = 0\n self.total_loss_epoch = 0\n\n def save_checkpoint(self,\n checkpoint_name: str = 'checkpoint.pth'):\n \"\"\"\n Save the current state. Only by the master process.\n \"\"\"\n if not self.is_master:\n return\n mdl_to_save = self.student.module if hasattr(self.student, 'module') else self.student\n mdl_to_save.config.save_pretrained(self.dump_path)\n state_dict = mdl_to_save.state_dict()\n torch.save(state_dict, os.path.join(self.dump_path, checkpoint_name))\n",
"# coding=utf-8\n# Copyright 2018 The Google AI Language Team Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport unittest\nimport random\nimport shutil\nimport pytest\n\nfrom transformers import is_torch_available\n\nif is_torch_available():\n import torch\n from transformers import (TransfoXLConfig, TransfoXLModel, TransfoXLLMHeadModel)\n from transformers.modeling_transfo_xl import TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_MAP\nelse:\n pytestmark = pytest.mark.skip(\"Require Torch\")\n\nfrom .modeling_common_test import (CommonTestCases, ids_tensor)\nfrom .configuration_common_test import ConfigTester\n\nclass TransfoXLModelTest(CommonTestCases.CommonModelTester):\n\n all_model_classes = (TransfoXLModel, TransfoXLLMHeadModel) if is_torch_available() else ()\n test_pruning = False\n test_torchscript = False\n test_resize_embeddings = False\n\n class TransfoXLModelTester(object):\n\n def __init__(self,\n parent,\n batch_size=13,\n seq_length=7,\n mem_len=30,\n clamp_len=15,\n is_training=True,\n use_labels=True,\n vocab_size=99,\n cutoffs=[10, 50, 80],\n hidden_size=32,\n d_embed=32,\n num_attention_heads=4,\n d_head=8,\n d_inner=128,\n div_val=2,\n num_hidden_layers=5,\n scope=None,\n seed=1,\n ):\n self.parent = parent\n self.batch_size = batch_size\n self.seq_length = seq_length\n self.mem_len = mem_len\n self.key_len = seq_length + mem_len\n self.clamp_len = clamp_len\n self.is_training = is_training\n self.use_labels = use_labels\n self.vocab_size = vocab_size\n self.cutoffs = cutoffs\n self.hidden_size = hidden_size\n self.d_embed = d_embed\n self.num_attention_heads = num_attention_heads\n self.d_head = d_head\n self.d_inner = d_inner\n self.div_val = div_val\n self.num_hidden_layers = num_hidden_layers\n self.scope = scope\n self.seed = seed\n\n def prepare_config_and_inputs(self):\n input_ids_1 = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)\n input_ids_2 = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)\n\n lm_labels = None\n if self.use_labels:\n lm_labels = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)\n\n config = TransfoXLConfig(\n vocab_size_or_config_json_file=self.vocab_size,\n mem_len=self.mem_len,\n clamp_len=self.clamp_len,\n cutoffs=self.cutoffs,\n d_model=self.hidden_size,\n d_embed=self.d_embed,\n n_head=self.num_attention_heads,\n d_head=self.d_head,\n d_inner=self.d_inner,\n div_val=self.div_val,\n n_layer=self.num_hidden_layers)\n\n return (config, input_ids_1, input_ids_2, lm_labels)\n\n def set_seed(self):\n random.seed(self.seed)\n torch.manual_seed(self.seed)\n\n def create_transfo_xl_model(self, config, input_ids_1, input_ids_2, lm_labels):\n model = TransfoXLModel(config)\n model.eval()\n\n hidden_states_1, mems_1 = model(input_ids_1)\n hidden_states_2, mems_2 = model(input_ids_2, mems_1)\n outputs = {\n \"hidden_states_1\": hidden_states_1,\n \"mems_1\": mems_1,\n \"hidden_states_2\": hidden_states_2,\n \"mems_2\": mems_2,\n }\n return outputs\n\n def check_transfo_xl_model_output(self, result):\n self.parent.assertListEqual(\n list(result[\"hidden_states_1\"].size()),\n [self.batch_size, self.seq_length, self.hidden_size])\n self.parent.assertListEqual(\n list(result[\"hidden_states_2\"].size()),\n [self.batch_size, self.seq_length, self.hidden_size])\n self.parent.assertListEqual(\n list(list(mem.size()) for mem in result[\"mems_1\"]),\n [[self.mem_len, self.batch_size, self.hidden_size]] * self.num_hidden_layers)\n self.parent.assertListEqual(\n list(list(mem.size()) for mem in result[\"mems_2\"]),\n [[self.mem_len, self.batch_size, self.hidden_size]] * self.num_hidden_layers)\n\n\n def create_transfo_xl_lm_head(self, config, input_ids_1, input_ids_2, lm_labels):\n model = TransfoXLLMHeadModel(config)\n model.eval()\n\n lm_logits_1, mems_1 = model(input_ids_1)\n loss_1, _, mems_1 = model(input_ids_1, labels=lm_labels)\n lm_logits_2, mems_2 = model(input_ids_2, mems=mems_1)\n loss_2, _, mems_2 = model(input_ids_2, labels=lm_labels, mems=mems_1)\n\n outputs = {\n \"loss_1\": loss_1,\n \"mems_1\": mems_1,\n \"lm_logits_1\": lm_logits_1,\n \"loss_2\": loss_2,\n \"mems_2\": mems_2,\n \"lm_logits_2\": lm_logits_2,\n }\n return outputs\n\n def check_transfo_xl_lm_head_output(self, result):\n self.parent.assertListEqual(\n list(result[\"loss_1\"].size()),\n [self.batch_size, self.seq_length])\n self.parent.assertListEqual(\n list(result[\"lm_logits_1\"].size()),\n [self.batch_size, self.seq_length, self.vocab_size])\n self.parent.assertListEqual(\n list(list(mem.size()) for mem in result[\"mems_1\"]),\n [[self.mem_len, self.batch_size, self.hidden_size]] * self.num_hidden_layers)\n\n self.parent.assertListEqual(\n list(result[\"loss_2\"].size()),\n [self.batch_size, self.seq_length])\n self.parent.assertListEqual(\n list(result[\"lm_logits_2\"].size()),\n [self.batch_size, self.seq_length, self.vocab_size])\n self.parent.assertListEqual(\n list(list(mem.size()) for mem in result[\"mems_2\"]),\n [[self.mem_len, self.batch_size, self.hidden_size]] * self.num_hidden_layers)\n\n def prepare_config_and_inputs_for_common(self):\n config_and_inputs = self.prepare_config_and_inputs()\n (config, input_ids_1, input_ids_2, lm_labels) = config_and_inputs\n inputs_dict = {'input_ids': input_ids_1}\n return config, inputs_dict\n\n\n def setUp(self):\n self.model_tester = TransfoXLModelTest.TransfoXLModelTester(self)\n self.config_tester = ConfigTester(self, config_class=TransfoXLConfig, d_embed=37)\n\n def test_config(self):\n self.config_tester.run_common_tests()\n\n def test_transfo_xl_model(self):\n self.model_tester.set_seed()\n config_and_inputs = self.model_tester.prepare_config_and_inputs()\n output_result = self.model_tester.create_transfo_xl_model(*config_and_inputs)\n self.model_tester.check_transfo_xl_model_output(output_result)\n\n def test_transfo_xl_lm_head(self):\n self.model_tester.set_seed()\n config_and_inputs = self.model_tester.prepare_config_and_inputs()\n output_result = self.model_tester.create_transfo_xl_lm_head(*config_and_inputs)\n self.model_tester.check_transfo_xl_lm_head_output(output_result)\n\n @pytest.mark.slow\n def test_model_from_pretrained(self):\n cache_dir = \"/tmp/transformers_test/\"\n for model_name in list(TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_MAP.keys())[:1]:\n model = TransfoXLModel.from_pretrained(model_name, cache_dir=cache_dir)\n shutil.rmtree(cache_dir)\n self.assertIsNotNone(model)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n"
] | [
[
"torch.nn.functional.softmax",
"torch.zeros",
"torch.cat",
"torch.randperm",
"torch.utils.data.DataLoader",
"torch.FloatTensor",
"torch.no_grad",
"torch.nn.CrossEntropyLoss",
"torch.utils.data.distributed.DistributedSampler",
"torch.distributed.barrier",
"torch.nonzero",
"torch.masked_select",
"torch.optim.AdamW",
"torch.nn.CosineEmbeddingLoss",
"torch.nn.parallel.DistributedDataParallel",
"torch.nn.KLDivLoss",
"torch.nn.functional.log_softmax",
"torch.utils.data.RandomSampler",
"torch.nn.MSELoss",
"torch.utils.data.BatchSampler"
],
[
"torch.manual_seed"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
uibcdf/openpharmacophore | [
"4f563fa206f6e7c081502acab97bb795d27bdeb9"
] | [
"openpharmacophore/pharmacophore/dynophore.py"
] | [
"# OpenPharmacophore\nfrom openpharmacophore._private_tools.exceptions import InvalidFileFormat, NoLigandsError, OpenPharmacophoreTypeError\nfrom openpharmacophore.pharmacophore.pharmacophoric_point import UniquePharmacophoricPoint\nfrom openpharmacophore import StructuredBasedPharmacophore\nfrom openpharmacophore import Pharmacophore\nfrom openpharmacophore.utils.conformers import conformer_energy\nfrom openpharmacophore.pharmacophore.color_palettes import get_color_from_palette_for_feature\n# Third Party\nimport matplotlib.pyplot as plt\nimport MDAnalysis as mda\nfrom MDAnalysis.lib.util import NamedStream\nimport mdtraj as mdt\nimport numpy as np\nimport pandas as pd\nimport pyunitwizard as puw\nfrom rdkit.Chem.Draw import rdMolDraw2D\nfrom tqdm.auto import tqdm\n# Standard Library\nfrom collections import defaultdict\nimport copy\nimport bisect\nfrom io import StringIO\nimport tempfile\nfrom typing import List, Tuple, Optional\n\nclass Dynophore():\n \"\"\" Class to store and compute dynamic pharmacophores\n\n Parameters\n ----------\n\n trajectory : str or mdtraj.trajectory or MDAnalysis.universe\n A str with the file path containing the trajectory, an mdtraj trajectory object, \n or an MDAnalysis universe.\n\n Attributes\n ----------\n\n pharmacophores : list of openpharmacophore.StructuredBasedPharmacophore\n List with pharmacophores for each relevant frame in the trajectory. \n\n pharmacophore_indices : list of int\n Indices of the frame of the trajectory from which the pharmacophores were extracted.\n The index of each element of the list corresponds to the one in pharmacophores list.\n\n n_pharmacophores : int\n Number of different pharmacophores in the trajectory.\n\n \"\"\"\n def __init__(self, trajectory):\n self.pharmacophores = []\n self.pharmacophore_indices = []\n self.n_pharmacophores = 0\n self.unique_pharmacophoric_points = []\n\n # TODO: Load other types of file, including using a topology and tajectory\n if isinstance(trajectory, str):\n self._trajectory = self._load_trajectory_file(trajectory)\n elif isinstance(trajectory, mdt.Trajectory):\n self._trajectory_type = \"mdt\"\n self._trajectory = trajectory\n self._n_frames = self._trajectory.n_frames\n elif isinstance(trajectory, mda.Universe):\n self._trajectory_type = \"mda\"\n self._trajectory = trajectory\n self._n_frames = trajectory.trajectory.n_frames\n else:\n raise TypeError(\"Trajectory must be of type string, mdtraj.Trajectory or MdAnalysis.Universe\")\n \n \n self._saved_ligand = False\n self._averaged_coords = False\n\n def common_hits_approach(self, frame_list=None):\n \"\"\" Get a list of pharmacophore models from a trajectory using the common hits approach\n method.\n\n Notes\n -----\n\n This method is based on obtaining a list of representative pharmacophore models from a \n trajectory and then validate and score them using virtual screening. The best performant\n pharmacophore models are then returned.\n\n References\n ----------\n\n [1] Wieder, Marcus, Arthur Garon, Ugo Perricone, Stefan Boresch, Thomas Seidel, Anna Maria Almerico, \n and Thierry Langer. \"Common hits approach: combining pharmacophore modeling and molecular dynamics \n simulations.\" Journal of chemical information and modeling 57, no. 2 (2017): 365-385 \n\n \"\"\"\n if frame_list is None:\n frame_list = list(range(0, self._n_frames))\n\n self.pharmacophores_from_frames(frame_list, load_ligand=True)\n self._get_unique_pharmacophoric_points(avg_coordinates=False)\n rpms = self.representative_pharmacophore_models()\n\n pass\n\n def draw(self, file_name: str, img_size: Tuple[int, int] = (500,500), \n legend: str = \"\", freq_threshold: float = 0.2) -> None:\n \"\"\" Draw a 2d representation of the dynamic pharmacophore. This is a drawing of the\n ligand with the pharmacophoric features highlighted and the frequency if each\n one. \n\n Parameters\n ----------\n file_name : str\n Name or path og the file where the drawing will be saved. Must be a png file.\n\n img_size : 2-tuple of int, optional \n The size of the image (default=(500,500))\n\n legend : str, optional\n Image legend.\n\n freq_threshold : double , optional\n The minimun frequency of a pharmacophoric point to be drawn. Number\n between 0.0 and 1.0 (default=0.2). \n \"\"\"\n if freq_threshold < 0.0 or freq_threshold > 1.0:\n raise ValueError(\"Freqency threshold must be a value between 0 and 1\") \n\n if not file_name.endswith(\".png\"):\n raise InvalidFileFormat(\"File must be a png.\")\n\n # Extract a ligand\n if self.pharmacophores[0].ligand is None:\n raise NoLigandsError(\"Ligand could not be extracted\")\n ligand = copy.deepcopy(self.pharmacophores[0].ligand)\n ligand.RemoveAllConformers()\n\n atoms = []\n bond_colors = {}\n atom_highlights = defaultdict(list)\n highlight_radius = {}\n\n for up in self.unique_pharmacophoric_points:\n \n if up.frequency < freq_threshold:\n continue\n\n indices = up.atom_indices\n update_freq = True\n for idx in indices:\n\n # If an atom has more than one feature keep higher frequency value\n if idx in atoms:\n if ligand.GetAtomWithIdx(idx).HasProp(\"atomNote\"):\n freq = int(ligand.GetAtomWithIdx(idx).GetProp(\"atomNote\")[2:])\n if freq > up.frequency:\n update_freq = False\n\n atoms.append(idx)\n if \"hydrophobicity\" in up.feature_name:\n feat_name = \"hydrophobicity\"\n else:\n feat_name = \" \".join(up.feature_name.split()[0:2])\n \n atom_highlights[idx].append(get_color_from_palette_for_feature(feat_name))\n highlight_radius[idx] = 0.6\n\n # Draw aromatic rings bonds\n if up.short_name == \"R\":\n for neighbor in ligand.GetAtomWithIdx(idx).GetNeighbors():\n nbr_idx = neighbor.GetIdx()\n if nbr_idx not in indices:\n continue\n bond = ligand.GetBondBetweenAtoms(idx, nbr_idx).GetIdx()\n bond_colors[bond] = [get_color_from_palette_for_feature(\"aromatic ring\")]\n \n if update_freq:\n frequency = int(up.frequency * 100)\n ligand.GetAtomWithIdx(idx).SetProp(\"atomNote\", f\"f={frequency}\")\n\n drawing = rdMolDraw2D.MolDraw2DCairo(img_size[0], img_size[1])\n drawing.DrawMoleculeWithHighlights(ligand, legend, dict(atom_highlights), bond_colors, highlight_radius, {})\n drawing.FinishDrawing()\n drawing.WriteDrawingText(file_name)\n\n def first_and_last_pharmacophore(self) -> None:\n \"\"\" Derive a pharmacophore model for the first and last frames of a trajectory.\n\n References\n ----------\n [1] Wieder, Marcus, Ugo Perricone, Thomas Seidel, Stefan Boresch, and Thierry Langer. \n \"Comparing pharmacophore models derived from crystal structures and from molecular \n dynamics simulations.\" Monatshefte für Chemie-Chemical Monthly 147, no. 3 (2016): \n 553-563.\n \"\"\"\n if self._trajectory_type == \"mdt\":\n get_pharmacophore = self._pharmacophore_from_mdtraj\n elif self._trajectory_type == \"mda\":\n get_pharmacophore = self._pharmacohore_from_mdanalysis\n\n initial_pharmacophore = get_pharmacophore(0, True, True)\n end_pharmacophore = get_pharmacophore(-1, True, True)\n last_frame_index = self._trajectory.n_frames\n self.pharmacophores = [\n initial_pharmacophore,\n end_pharmacophore\n ]\n self.pharmacophore_indices = [0, last_frame_index]\n self.n_pharmacophores = 2\n\n def pharmacophore_by_frequency(self, threshold: float) -> Pharmacophore:\n \"\"\" Derive a unique pharmacophore model with the pharmacophoric points\n that have a frequency >= to threshold.\n\n Parameters\n ---------\n threshold : float\n The value of frequency from which points are considered part of\n the pharmacophore model. Must be a value between 0 and 1-\n\n Returns\n -------\n openpharmcophore.Pharmacophore\n Pharmacophore model with the unique pharmacophoric points.\n\n References\n ----------\n [1] Wieder, Marcus, Ugo Perricone, Thomas Seidel, and Thierry Langer. \"Pharmacophore models \n derived from molecular dynamics simulations of protein-ligand complexes: A case study.\" \n Natural product communications 11, no. 10 (2016): 1934578X1601101019.\n \"\"\"\n if threshold < 0 or threshold > 1:\n raise ValueError(\"Threshold must be a number between 0 and 1\")\n \n if len(self.unique_pharmacophoric_points) == 0:\n self._get_unique_pharmacophoric_points(avg_coordinates=True)\n \n points = [p for p in self.unique_pharmacophoric_points if p.frequency >= threshold]\n return Pharmacophore(points)\n\n def pharmacophore_from_unique_points(self, unique_points: List[str]) -> Pharmacophore:\n \"\"\" Get a pharmacophore which consists of the passed unique pharmacophoric\n points.\n\n Parameters\n ----------\n unique_points: list of str\n List with the name of the unique pharmacophoric points.\n\n Returns\n -------\n openpharmcophore.Pharmacophore\n Pharmacophore model with the specified points.\n \"\"\"\n if len(self.unique_pharmacophoric_points) == 0 or not self._averaged_coords:\n self._get_unique_pharmacophoric_points(avg_coordinates=True)\n points = [point for point in self.unique_pharmacophoric_points if point.feature_name in unique_points]\n return Pharmacophore(pharmacophoric_points=points)\n\n def pharmacophores_from_frames(self, frames: List[int], load_ligand: bool = True) -> None:\n \"\"\" Get pharmacophores for the specified frames in a trajectory\n\n Parameters\n ----------\n frames : list of int\n Indices of the frames for which pharmacophores will be derived.\n\n \"\"\"\n if self._trajectory_type == \"mdt\":\n get_pharmacophore = self._pharmacophore_from_mdtraj\n elif self._trajectory_type == \"mda\":\n get_pharmacophore = self._pharmacohore_from_mdanalysis\n \n self.pharmacophores.clear()\n self.pharmacophore_indices.clear()\n for ii in tqdm(frames):\n self.pharmacophores.append(get_pharmacophore(ii, load_ligand=load_ligand))\n self.pharmacophore_indices.append(ii)\n self.n_pharmacophores = len(self.pharmacophores)\n \n def pharmacophoric_point_frequency(self) -> pd.DataFrame:\n \"\"\" Get a dataframe with all unique pharmacophoric points and its frequency.\n\n Returns\n -------\n pandas.DataFrame\n Dataframe with the following columns: feature name, frequency and atom\n indices.\n \"\"\"\n if len(self.unique_pharmacophoric_points) == 0 or not self._averaged_coords:\n self._get_unique_pharmacophoric_points(avg_coordinates=True)\n \n names = []\n frequencies = []\n indices = []\n for point in self.unique_pharmacophoric_points:\n names.append(point.feature_name)\n frequencies.append(point.frequency)\n indices.append(point.atom_indices)\n\n frequency = pd.DataFrame().from_dict({\n \"Feature Name\": names,\n \"Frequency\": frequencies,\n \"Atoms Indices\": indices\n })\n frequency.sort_values(by=[\"Frequency\"], ascending=False, inplace=True)\n frequency.reset_index(inplace=True)\n frequency.drop(columns=[\"index\"], inplace=True)\n return frequency\n\n def point_frequency_plot(self, threshold: float = 0.0, n_bins: int = 10, \n ax: Optional[plt.Axes] = None):\n \"\"\" Plot of pharmacophoric points frequency vs time. \n \n Each pharmacophoric point will appear as a different line in the plot.\n\n Parameters\n ----------\n threshold : double, default=0.0\n The value of overall frequency from which points will form part of the \n plot. If there are a lot of points with really low frequency, setting\n the threshold value can help with visualization.\n\n n_bins : int, default=10\n Number of bins to discretize the timesteps. \n\n ax : matplotlib.axes._subplots.AxesSubplot, optional.\n An axes object where the plot will be drawn.\n \"\"\"\n if len(self.unique_pharmacophoric_points) == 0 or not self._averaged_coords:\n self._get_unique_pharmacophoric_points(avg_coordinates=True)\n\n if threshold < 0 or threshold > 1:\n raise ValueError(\"Threshold must be a number between 0 and 1\")\n\n if ax is None:\n fig, ax = plt.subplots(figsize=(10, 7))\n n_timesteps = self._n_frames\n bins = np.arange(0, n_timesteps + 1, n_timesteps/n_bins)\n\n for point in self.unique_pharmacophoric_points:\n if point.frequency < threshold:\n continue\n point_timesteps = np.array(point.timesteps)\n discretized_timesteps = np.digitize(point_timesteps, bins)\n\n counts = np.zeros_like(bins)\n\n for i in range(bins.shape[0]):\n c = np.count_nonzero(discretized_timesteps == i)\n counts[i] = c\n \n ax.plot(bins, counts, label=point.feature_name)\n\n ax.legend()\n ax.set_xlabel(\"Timesteps\")\n ax.set_ylabel(\"Count\")\n plt.show()\n\n return ax\n \n def representative_pharmacophore_models(self) -> List[StructuredBasedPharmacophore]:\n \"\"\" Get all representative pharmacophore models (RPM) in a trajectory. \n \n RPMs are pharmacophore models that have the same pharmacophoric points, \n\n Returns\n -------\n rpms : list of openpharmacophore.StructuredBasedPharmacophore\n The representative pharmacophore models\n\n Note\n -----\n Pharmacophoric points are considered equal based only on feature type and the atoms to \n which this points belong to. Coordinates are not taken into account.\n\n The coordinates of the pharmacophoric points are those that belong to the median energy of\n the ligand.\n\n References\n ----------\n [1] Wieder, Marcus, Arthur Garon, Ugo Perricone, Stefan Boresch, Thomas Seidel, Anna Maria Almerico, \n and Thierry Langer. \"Common hits approach: combining pharmacophore modeling and molecular dynamics \n simulations.\" Journal of chemical information and modeling 57, no. 2 (2017): 365-385 \n \n \"\"\"\n if len(self.unique_pharmacophoric_points) == 0 or self._averaged_coords:\n self._get_unique_pharmacophoric_points(avg_coordinates=False)\n self._averaged_coords = False\n \n rpms_indices = self._get_rpms_indices()\n \n return self._pharmacophores_from_ligand_median_energy(rpms_indices)\n\n def _get_rpms_indices(self) -> List[List[int]]:\n \"\"\" Get the indices of the representative pharmacophore models.\n \n If an empty list is returned it means that all pharmacophore models in the trajectory are different.\n \n Returns\n --------\n rpms_indices : list of list of int\n A list where each sublist contains the indices of each representative pharmacophore\n model. This indices correspond to the attribute pharmacophores of the Dynophore\n class.\n \"\"\"\n # Compute a matrix where each row represents a feature vector of a pharmacophore\n n_pharmacophores = self.n_pharmacophores\n n_features = len(self.unique_pharmacophoric_points) \n feature_matrix = np.zeros((n_pharmacophores, n_features), dtype=np.int32)\n for ii, pharmacophore in enumerate(self.pharmacophores):\n for point in pharmacophore:\n for jj, unique_point in enumerate(self.unique_pharmacophoric_points):\n if point.is_equal(unique_point):\n feature_matrix[ii, jj] = 1\n break\n \n # Find similar pharmacophores in the matrix\n rpms_indices = []\n skip = []\n for ii in range(n_pharmacophores):\n rpm = [ii]\n for jj in range(ii + 1, n_pharmacophores):\n if jj in skip:\n continue\n if np.all(feature_matrix[ii, :] == feature_matrix[jj, :]):\n rpm.append(jj)\n skip.append(jj)\n # Keep only models that have a frequency higher than 2\n if len(rpm) > 2:\n rpms_indices.append(rpm)\n \n \n return rpms_indices\n \n def _pharmacophores_from_ligand_median_energy(self, rpms_indices)-> List[List[int]]:\n \"\"\" Get the representative pharmacophore models that correspond to the pharmacophore\n with ligand median energy.\n\n Parameters\n ----------\n rpms_indices : list of list of int\n A list where each sublist contains the indices of each representative pharmacophore\n model. This indices correspond to the attribute pharmacophores of the Dynophore\n class.\n \n Returns\n -------\n rpms : list of openpharmacophore.StructuredBasedPharmacophore\n The representative pharmacophore models\n \"\"\"\n rpms = []\n for indices in rpms_indices:\n energies = []\n for index in indices:\n energy = (conformer_energy(self.pharmacophores[index].ligand), index)\n bisect.insort(energies, energy)\n # Take the pharmacophore with median energy\n median_energy_index = energies[int(len(energies) / 2)][1]\n rpms.append(self.pharmacophores[median_energy_index])\n \n return rpms\n\n \n def _load_trajectory_file(self, file_name: str) -> mdt.Trajectory:\n \"\"\" Load a trajectory file from a MD simulation\n\n Parameters\n ----------\n file_name : str\n Name of the file containing the trajectory.\n\n Returns\n -------\n traj : \n The trajectory object. \n \"\"\"\n if file_name.endswith(\"h5\"):\n traj = mdt.load(file_name)\n self._trajectory_type = \"mdt\"\n else:\n raise NotImplementedError\n\n return traj\n \n def _get_unique_pharmacophoric_points(self, avg_coordinates: bool = True) -> None:\n \"\"\" Get all unique pharmacophoric points across all the pharmacophore models \n derived from the trajectory. \n\n Parameters\n ----------\n avg_coordinates : bool\n Whether to average the coordinates of the pharmacophoric points.\n \n Notes\n -----\n Two points are considered equal if they have the same feature type and\n are associated with the same atom in the ligand.\n \"\"\"\n if avg_coordinates:\n self._averaged_coords = True\n\n if self.n_pharmacophores == 0:\n self.pharmacophores_from_frames(list(range(0, self._n_frames)))\n \n all_points = []\n for ii, pharmacophore in enumerate(self.pharmacophores):\n for pharmacophoric_point in pharmacophore:\n pharmacophoric_point.pharmacophore_index = ii\n all_points.append(pharmacophoric_point)\n \n self.unique_pharmacophoric_points.clear()\n # Get all unique parmacophoric points while also updating the count, \n # timesteps where they appear and calculating the average centroid.\n for point in all_points:\n is_unique = True\n for unique_p in self.unique_pharmacophoric_points:\n if point.is_equal(unique_p):\n timestep = point.pharmacophore_index\n if not timestep in unique_p.timesteps:\n unique_p.timesteps.append(timestep)\n unique_p.count += 1\n if avg_coordinates:\n unique_p.center += point.center\n is_unique = False\n break\n if is_unique:\n self.unique_pharmacophoric_points.append(UniquePharmacophoricPoint(point, point.pharmacophore_index))\n \n names = []\n for point in self.unique_pharmacophoric_points:\n if avg_coordinates:\n # Normalize centroid\n point.center /= point.count \n point.frequency = point.count / self.n_pharmacophores\n # Get a unique name for each point\n feat_num = 1\n full_name = point.feature_name + \" \" + str(feat_num)\n if full_name not in names:\n names.append(full_name)\n point.feature_name = full_name\n else:\n while True:\n feat_num += 1\n full_name = point.feature_name + \" \" + str(feat_num)\n if full_name not in names:\n names.append(full_name)\n point.feature_name = full_name\n break\n\n def _pharmacophore_from_mdtraj(self, frame_num: int, load_mol_system: bool=False, \n load_ligand: bool=False) -> StructuredBasedPharmacophore:\n \"\"\" Derive a pharmacophore for a single frame of an mdtraj Trajectory object.\n\n Parameters\n ----------\n frame_num : int\n The index number of the frame from which the pharmacophore will be derived.\n \n load_mol_system : bool, default=False\n If true the receptor will be stored in the pharmacophore object.\n \n load_ligand : bool, default=False\n If true the ligand will be stored in the pharmacophore object.\n \"\"\"\n # mdtraj trajectories cannot be passed to SringIO objects nor saved as string. So with this\n # method, temporary pdb files will be created that can be read by the StructuredBasedPharmacophore \n # class.\n if not isinstance(frame_num, int):\n raise OpenPharmacophoreTypeError(\"Frame number must be an integer\")\n frame = self._trajectory[frame_num]\n \n with tempfile.NamedTemporaryFile() as original_file:\n frame.save_pdb(original_file.name)\n original_file.seek(0) \n lines_original = original_file.readlines()\n \n # The pdb mdtraj generates needs to be edited so that pybel can read it.\n # The third line that contains \"MODEL\" needs to be removed for the structured \n # based pharmacophore to work.\n with tempfile.NamedTemporaryFile() as modified_file:\n for line in lines_original:\n if not line.startswith(b'MODEL'):\n modified_file.write(line)\n modified_file.truncate()\n modified_file.seek(0)\n pharmacophore = StructuredBasedPharmacophore.from_pdb(modified_file, \n radius=1.0, ligand_id=None, hydrophobics=\"plip\", \n load_mol_system=load_mol_system, load_ligand=load_ligand)\n \n return pharmacophore\n \n def _pharmacohore_from_mdanalysis(self, frame_num: int, load_mol_system: bool = False, \n load_ligand: bool = False) -> StructuredBasedPharmacophore:\n \"\"\" Derive a pharmacophore for a single frame of an MdAnalysis Universe object.\n\n Parameters\n ----------\n frame_num : int\n The index number of the frame from which the pharmacophore will be derived.\n \n load_mol_system: bool, default=False\n If true the receptor will be stored in the pharmacophore object.\n \n load_ligand: bool, default=False\n If true the ligand will be stored in the pharmacophore object.\n \"\"\"\n if not isinstance(frame_num, int):\n raise OpenPharmacophoreTypeError(\"Frame number must be an integer\")\n stream = StringIO()\n pdb_stream = NamedStream(stream, \"output.pdb\")\n atoms = self._trajectory.select_atoms(\"all\")\n atoms.write(pdb_stream, frames=self._trajectory.trajectory[[frame_num]])\n pharmacophore = StructuredBasedPharmacophore.from_pdb(pdb_stream, \n radius=1.0, ligand_id=None, hydrophobics=\"plip\", \n load_mol_system=load_mol_system, load_ligand=load_ligand)\n \n return pharmacophore\n\n def __repr__(self) -> str:\n return f\"{self.__class__.__name__}(n_pharmacophores={self.n_pharmacophores}; n_frames={self._n_frames})\"\n\n \n \n"
] | [
[
"numpy.arange",
"matplotlib.pyplot.subplots",
"pandas.DataFrame",
"numpy.all",
"numpy.zeros_like",
"numpy.count_nonzero",
"numpy.digitize",
"numpy.array",
"numpy.zeros",
"matplotlib.pyplot.show"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
scott-mao/EOD | [
"f10e64de86c0f356ebf5c7e923f4042eec4207b1",
"f10e64de86c0f356ebf5c7e923f4042eec4207b1",
"f10e64de86c0f356ebf5c7e923f4042eec4207b1"
] | [
"eod/utils/general/saver_helper.py",
"eod/data/samplers/sampler.py",
"eod/tasks/det/data/datasets/coco_dataset.py"
] | [
"# Standard Library\nimport json\nimport os\nimport shutil\n\n# Import from third library\nimport torch\n\n# Import from local\nfrom .log_helper import default_logger as logger\nfrom .registry_factory import SAVER_REGISTRY\n\n\n__all__ = ['Saver']\n\n\n@SAVER_REGISTRY.register('base')\nclass Saver(object):\n def __init__(self, save_cfg, yml_path=None, work_dir='./'):\n # checkpoint dir\n self.save_cfg = self.prepend_work_dir(save_cfg, work_dir)\n self.work_dir = work_dir\n self.save_dir = save_cfg['save_dir']\n os.makedirs(self.save_dir, exist_ok=True)\n if yml_path is not None and 's3://' not in yml_path: # TODO, save cpeh data\n yml_name = os.path.basename(yml_path)\n dst_path = os.path.join(self.save_dir, yml_name)\n shutil.copy(yml_path, dst_path)\n\n self.auto_resume = self.save_cfg.get('auto_resume', False)\n self.running_config_file = os.path.join(self.save_dir, 'running_config.json')\n\n def prepend_work_dir(self, save_cfg, work_dir):\n\n def osp(path):\n return os.path.join(work_dir, path)\n\n save_cfg['save_dir'] = osp(save_cfg['save_dir'])\n save_cfg['results_dir'] = osp(save_cfg['results_dir'])\n\n return save_cfg\n\n @staticmethod\n def get_model_from_ckpt(ckpt_path):\n return Saver.load_checkpoint(ckpt_path)['model']\n\n def load_pretrain_or_resume(self):\n if self.auto_resume:\n last_checkpoint_path = self.find_last_checkpoint()\n if last_checkpoint_path is not None:\n logger.warning('Load checkpoint from {}'.format(last_checkpoint_path))\n return self.load_checkpoint(last_checkpoint_path)\n else:\n logger.warning('Not found any valid checkpoint yet')\n\n if 'resume_model' in self.save_cfg:\n logger.warning('Load checkpoint from {}'.format(self.save_cfg['resume_model']))\n state = self.load_checkpoint(self.save_cfg['resume_model'])\n return state\n elif 'pretrain_model' in self.save_cfg:\n state = self.load_checkpoint(self.save_cfg['pretrain_model'])\n logger.warning('Load checkpoint from {}'.format(self.save_cfg['pretrain_model']))\n output = {}\n if 'ema' in state:\n if \"ema_state_dict\" in state['ema']:\n logger.info(\"Load ema pretrain model\")\n st = state['ema']['ema_state_dict']\n else:\n st = state['model']\n else:\n st = state['model']\n output['model'] = st\n return output\n else:\n logger.warning('Load nothing! No weights provided {}')\n return {'model': {}}\n\n @staticmethod\n def load_checkpoint(ckpt_path):\n \"\"\"Load state_dict from checkpoint\"\"\"\n\n def remove_prefix(state_dict, prefix):\n \"\"\"Old style model is stored with all names of parameters share common prefix 'module.'\"\"\"\n f = lambda x: x.split(prefix, 1)[-1] if x.startswith(prefix) else x\n return {f(key): value for key, value in state_dict.items()}\n\n # assert os.path.exists(ckpt_path), f'No such file: {ckpt_path}'\n device = torch.cuda.current_device()\n ckpt_dict = torch.load(ckpt_path, map_location=lambda storage, loc: storage.cuda(device))\n\n if 'model' in ckpt_dict:\n state_dict = ckpt_dict['model']\n elif 'state_dict' in ckpt_dict:\n state_dict = ckpt_dict['state_dict']\n else:\n state_dict = ckpt_dict\n\n state_dict = remove_prefix(state_dict, 'module.')\n ckpt_dict['model'] = state_dict\n\n return ckpt_dict\n\n def lns_latest_ckpt(self, ckpt_path, new_path):\n try:\n pwd = os.getcwd()\n absolute_ckpt_path = os.path.join(pwd, ckpt_path)\n absolute_new_path = os.path.join(pwd, new_path)\n if os.path.exists(absolute_new_path):\n os.system(f'rm {absolute_new_path}')\n os.system(f\"ln -s {absolute_ckpt_path} {absolute_new_path}\")\n except Exception as e:\n logger.warning(f'Failed to ln -s {ckpt_path} {new_path}')\n logger.warning(e)\n\n def save(self, epoch, iter, **kwargs):\n \"\"\"Save model checkpoint for one epoch\"\"\"\n os.makedirs(self.save_dir, exist_ok=True)\n # Assume we warmup for a epochs and training a+b epochs in total,\n # then our checkpoints are named of ckpt_e{-a+1}.pth ~ ckpt_e{b}.pth\n # if best in kwargs, we save the best ckpt as ckpt_best.path.auto\n if 'suffix' in kwargs:\n suffix = kwargs['suffix']\n ckpt_path = os.path.join(self.save_dir, 'ckpt_e{}-{}.pth'.format(epoch, suffix))\n elif 'auto_save' in kwargs:\n ckpt_path = os.path.join(self.save_dir, 'ckpt_{}.pth'.format(kwargs['auto_save']))\n else:\n ckpt_path = os.path.join(self.save_dir, 'ckpt_e{}.pth'.format(epoch))\n # since epoch not in kwargs\n kwargs['epoch'] = epoch\n kwargs['iter'] = iter\n kwargs['metric_val'] = kwargs.get('metric_val', -1)\n lns_latest_ckpt = kwargs.pop('lns', True)\n torch.save(kwargs, ckpt_path)\n if lns_latest_ckpt:\n latest_path = os.path.join(self.save_dir, 'ckpt_latest.pth')\n self.lns_latest_ckpt(ckpt_path, latest_path)\n return ckpt_path\n\n def save_model_arch(self, model):\n \"\"\"Save model structure\"\"\"\n os.makedirs(self.save_dir, exist_ok=True)\n meta_path = os.path.join(self.save_dir, 'model_arch.txt')\n with open(meta_path, 'w') as fid:\n fid.write(str(model))\n\n def save_running_config(self, config):\n with open(self.running_config_file, 'w') as rcf:\n json.dump(config, rcf, indent=2)\n\n def find_last_checkpoint(self):\n last_ckpt_path = os.path.join(self.save_dir, \"ckpt_latest.pth\")\n if os.path.exists(last_ckpt_path):\n return last_ckpt_path\n else:\n return None\n",
"# Standard Library\nimport math\nfrom collections import defaultdict\n\n# Import from third library\nimport numpy as np\nimport torch\nfrom torch.utils.data.sampler import Sampler\n\nfrom eod.utils.env.dist_helper import env, get_rank, get_world_size\nfrom eod.utils.general.log_helper import default_logger as logger\nfrom eod.utils.general.registry_factory import SAMPLER_REGISTRY\n\n\n__all__ = ['DistributedSampler', 'LocalSampler', 'TestDistributedSampler']\n\n\n@SAMPLER_REGISTRY.register('dist')\nclass DistributedSampler(Sampler):\n \"\"\"\n Sampler that restricts data loading to a subset of the dataset.\n\n .. note:\n Dataset is assumed to be of constant size.\n\n Arguments:\n dataset (Dataset): dataset used for sampling.\n num_replicas (int): number of processes participating in distributed training, optional.\n rank (int): rank of the current process within num_replicas, optional.\n \"\"\"\n\n def __init__(self, dataset, num_replicas=None, rank=None, fix_seed=False):\n \"\"\"\n Arguments:\n - dataset (:obj:`dataset`): instance of dataset object\n \"\"\"\n if num_replicas is None:\n num_replicas = env.world_size\n if rank is None:\n rank = env.rank\n\n self.dataset = dataset\n self.num_replicas = num_replicas\n self.rank = rank\n self.epoch = 0\n self.num_samples = int(math.ceil(len(self.dataset) * 1.0 / self.num_replicas))\n self.total_size = self.num_samples * self.num_replicas\n self.fix_seed = fix_seed\n\n def __iter__(self):\n # deterministically shuffle based on epoch\n g = torch.Generator()\n g.manual_seed(self.epoch * (not self.fix_seed))\n indices = list(torch.randperm(len(self.dataset), generator=g))\n\n # add extra samples to make it evenly divisible\n # indices += indices[:(self.total_size - len(indices))]\n padding_size = self.total_size - len(indices)\n if padding_size <= len(indices):\n indices += indices[:padding_size]\n else:\n indices += (indices * math.ceil(padding_size / len(indices)))[:padding_size]\n assert len(indices) == self.total_size\n\n # subsample\n offset = self.num_samples * self.rank\n indices = indices[offset:offset + self.num_samples]\n assert len(indices) == self.num_samples\n\n return iter(indices)\n\n def __len__(self):\n return self.num_samples\n\n def set_epoch(self, epoch):\n self.epoch = epoch\n\n\n@SAMPLER_REGISTRY.register('local')\nclass LocalSampler(Sampler):\n def __init__(self, dataset, rank=None):\n if rank is None:\n rank = env.rank\n self.dataset = dataset\n self.rank = rank\n self.epoch = 0\n self.num_samples = len(self.dataset)\n\n def __iter__(self):\n # deterministically shuffle based on epoch\n g = torch.Generator()\n g.manual_seed(self.epoch + self.rank)\n indices = list(torch.randperm(self.num_samples, generator=g))\n return iter(indices)\n\n def set_epoch(self, epoch):\n self.epoch = epoch\n\n def __len__(self):\n return self.num_samples\n\n\n@SAMPLER_REGISTRY.register('dist_test')\nclass TestDistributedSampler(Sampler):\n \"\"\"\n Sampler that restricts data loading to a subset of the dataset, but won't align the total data\n size to be divisible by world_size bacause this will lead to duplicate detecton results\n \"\"\"\n\n def __init__(self, dataset, num_replicas=None, rank=None):\n \"\"\"\n Arguments:\n - dataset (:obj:`dataset`): instance of dataset object\n \"\"\"\n if num_replicas is None:\n num_replicas = env.world_size\n if rank is None:\n rank = env.rank\n\n self.dataset = dataset\n self.num_replicas = num_replicas\n self.rank = rank\n self.epoch = 0\n self.num_samples = len(range(rank, len(self.dataset), num_replicas))\n self.total_size = len(self.dataset)\n\n def __iter__(self):\n indices = torch.arange(len(self.dataset))\n indices = indices[self.rank::self.num_replicas]\n assert len(indices) == self.num_samples\n return iter(indices)\n\n def __len__(self):\n return self.num_samples\n\n def set_epoch(self, epoch):\n self.epoch = epoch\n\n\n@SAMPLER_REGISTRY.register('repeat_factor')\nclass DistributedRepeatFactorReSampler(Sampler):\n \"\"\" Suitable for long-tail distribution datasets.\n Refer to `LVIS <https://arxiv.org/abs/1908.03195>`_ paper\n \"\"\"\n def __init__(self, dataset, t=0.001, ri_mode='random_round', pn=0.5,\n ri_if_empty=1, num_replicas=None, static_size=True, rank=None):\n \"\"\"\n Arguments:\n - dataset (:obj:`Dataset`): dataset used for sampling.\n - t (:obj:`float`): thresh- old that intuitively controls the point at which oversampling kicks in\n - ri_mode (:obj:`str`): choices={floor, round, random_round, ceil, c_ceil_r_f_floor}, method to compute\n repeat factor for one image\n - pn (:obj:`float`): power number\n - num_replicas (int): number of processes participating in distributed training, optional.\n - rank (int): rank of the current process within num_replicas, optional.\n \"\"\"\n if num_replicas is None:\n num_replicas = get_world_size()\n if rank is None:\n rank = get_rank()\n\n self.dataset = dataset\n self.num_replicas = num_replicas\n self.rank = rank\n self.epoch = 0\n self.num_samples = int(math.ceil(len(self.dataset) * 1.0 / self.num_replicas))\n self.total_size = self.num_samples * self.num_replicas\n self.original_num_samples = self.num_samples\n self.t = t\n self.ri_mode = ri_mode\n self.ri_if_empty = int(ri_if_empty)\n self.pn = pn\n self.static_size = static_size\n self._prepare()\n logger.info('init re-sampler, ri mode: {}'.format(self.ri_mode))\n\n def _prepare(self):\n # prepare re-sampling factor for category\n rc = defaultdict(int)\n img_num_per_class = defaultdict(int)\n for cls, img_num in sorted(self.dataset.num_images_per_class.items()):\n f = img_num / len(self.dataset)\n img_num_per_class[cls] = img_num\n rc[cls] = max(1, math.pow(self.t / f, self.pn))\n logger.info('class id {}, image count {}, rc {}'.format(cls, img_num, rc[cls]))\n self.rc = rc\n\n def _compute_ri(self, img_index):\n classes = self.dataset.get_image_classes(img_index)\n ris = [self.rc[cls] for cls in classes]\n if len(ris) == 0:\n return self.ri_if_empty\n if self.ri_mode == 'floor':\n ri = int(max(ris))\n elif self.ri_mode == 'round':\n ri = round(max(ris))\n elif self.ri_mode == 'random_round':\n ri_max = max(ris)\n p = ri_max - int(ri_max)\n if np.random.rand() < p:\n ri = math.ceil(ri_max)\n else:\n ri = int(ri_max)\n elif self.ri_mode == 'ceil':\n ri = math.ceil(max(ris))\n elif self.ri_mode == 'c_ceil_r_f_floor':\n max_ind = np.argmax(ris)\n assert hasattr(self.dataset, 'lvis'), 'Only lvis dataset supportted for c_ceil_r_f_floor mode'\n img_id = self.dataset.img_ids[img_index]\n meta_annos = self.dataset.lvis.img_ann_map[img_id]\n f = self.dataset.lvis.cats[meta_annos[max_ind]['category_id']]['frequency']\n assert f in ['f', 'c', 'r']\n if f in ['r', 'f']:\n ri = int(max(ris))\n else:\n ri = math.ceil(max(ris))\n else:\n raise NotImplementedError\n return ri\n\n def _get_new_indices(self):\n indices = []\n for idx in range(len(self.dataset)):\n ri = self._compute_ri(idx)\n indices += [idx] * ri\n\n logger.info('dataset size {}, indexes size {}'.format(len(self.dataset), len(indices)))\n return indices\n\n def __iter__(self):\n # deterministically shuffle based on epoch\n\n # generate a perm based using class-aware balance for this epoch\n indices = self._get_new_indices()\n\n # override num_sample total size\n self.num_samples = int(math.ceil(len(indices) * 1.0 / self.num_replicas))\n self.total_size = self.num_samples * self.num_replicas\n\n indices = np.random.RandomState(seed=self.epoch).permutation(np.array(indices))\n indices = list(indices)\n\n # add extra samples to make it evenly divisible\n indices += indices[:(self.total_size - len(indices))]\n assert len(indices) == self.total_size\n\n # subsample\n offset = self.num_samples * self.rank\n indices = indices[offset:offset + self.num_samples]\n assert len(indices) == self.num_samples\n\n # convert to int because this array will be converted to torch.tensor,\n # but torch.as_tensor dosen't support numpy.int64\n # a = torch.tensor(np.float64(1)) # works\n # b = torch.tensor(np.int64(1)) # fails\n indices = list(map(lambda x: int(x), indices))\n return iter(indices)\n\n def __len__(self):\n return self.original_num_samples\n\n def set_epoch(self, epoch):\n self.epoch = epoch\n",
"from __future__ import division\n\n# Standard Library\nimport copy\nimport os\nfrom collections import defaultdict\n\n# Import from third library\nimport numpy as np\nimport torch\nfrom easydict import EasyDict\nfrom PIL import Image\nfrom pycocotools import mask as maskUtils\nfrom pycocotools.coco import COCO\nfrom torch.nn.modules.utils import _pair\nimport pickle as pk\nfrom eod.utils.env.dist_helper import env\nimport cv2\n\n\nfrom eod.utils.general.context import no_print\nfrom eod.utils.general.registry_factory import DATASET_REGISTRY\nfrom eod.utils.general.global_flag import ALIGNED_FLAG\nfrom eod.data.datasets.base_dataset import BaseDataset\nfrom eod.data.data_utils import get_image_size\n\n\n__all__ = ['CocoDataset']\n\n\nCLASS_NAMES = [\n \"__background__\", \"person\", \"bicycle\", \"car\", \"motorcycle\", \"airplane\",\n \"bus\", \"train\", \"truck\", \"boat\", \"traffic light\", \"fire hydrant\",\n \"stop sign\", \"parking meter\", \"bench\", \"bird\", \"cat\", \"dog\", \"horse\",\n \"sheep\", \"cow\", \"elephant\", \"bear\", \"zebra\", \"giraffe\", \"backpack\",\n \"umbrella\", \"handbag\", \"tie\", \"suitcase\", \"frisbee\", \"skis\", \"snowboard\",\n \"sports ball\", \"kite\", \"baseball bat\", \"baseball glove\", \"skateboard\",\n \"surfboard\", \"tennis racket\", \"bottle\", \"wine glass\", \"cup\", \"fork\",\n \"knife\", \"spoon\", \"bowl\", \"banana\", \"apple\", \"sandwich\", \"orange\",\n \"broccoli\", \"carrot\", \"hot dog\", \"pizza\", \"donut\", \"cake\", \"chair\",\n \"couch\", \"potted plant\", \"bed\", \"dining table\", \"toilet\", \"tv\", \"laptop\",\n \"mouse\", \"remote\", \"keyboard\", \"cell phone\", \"microwave\", \"oven\",\n \"toaster\", \"sink\", \"refrigerator\", \"book\", \"clock\", \"vase\", \"scissors\",\n \"teddy bear\", \"hair drier\", \"toothbrush\"\n]\n\nCLASS_PS_NAMES = CLASS_NAMES + [\n 'banner', 'blanket', 'bridge', 'cardboard',\n 'counter', 'curtain', 'door-stuff', 'floor-wood', 'flower', 'fruit',\n 'gravel', 'house', 'light', 'mirror-stuff', 'net', 'pillow', 'platform',\n 'playingfield', 'railroad', 'river', 'road', 'roof', 'sand', 'sea', 'shelf',\n 'snow', 'stairs', 'tent', 'towel', 'wall-brick', 'wall-stone', 'wall-tile',\n 'wall-wood', 'water-other', 'window-blind', 'window-other', 'tree-merged',\n 'fence-merged', 'ceiling-merged', 'sky-other-merged', 'cabinet-merged',\n 'table-merged', 'floor-other-merged', 'pavement-merged', 'mountain-merged',\n 'grass-merged', 'dirt-merged', 'paper-merged', 'food-other-merged',\n 'building-other-merged', 'rock-merged', 'wall-other-merged', 'rug-merged'\n]\n\n\n@DATASET_REGISTRY.register('coco')\nclass CocoDataset(BaseDataset):\n \"\"\"COCO Dataset\n \"\"\"\n _loader = COCO\n\n def __init__(self,\n meta_file,\n image_reader,\n transformer,\n source='coco',\n class_names=None,\n has_keypoint=False,\n has_mask=False,\n has_grid=False,\n box2mask=False,\n has_semantic_seg=False,\n semantic_seg_prefix=None,\n unknown_erasing=False,\n use_ignore=False,\n evaluator=None,\n cache=None,\n clip_box=True):\n with no_print():\n self.coco = self._loader(meta_file)\n\n category_ids = self.coco.cats.keys()\n\n self.classes = ['__background__'] + [\n self.coco.cats[c]['name'] for c in sorted(category_ids)\n ]\n # map coco discrete category ids to contiguous ids\n self.category_to_class = {\n c: i + 1\n for i, c in enumerate(sorted(category_ids))\n }\n self.class_to_category = {\n i + 1: c\n for i, c in enumerate(sorted(category_ids))\n }\n\n super(CocoDataset, self).__init__(meta_file, image_reader, transformer,\n evaluator=evaluator, class_names=self.classes)\n self.has_keypoint = has_keypoint\n self.has_mask = has_mask\n self.has_grid = has_grid\n self.box2mask = box2mask\n self.use_ignore = use_ignore\n self.has_semantic_seg = has_semantic_seg\n self.semantic_seg_prefix = semantic_seg_prefix\n self.unknown_erasing = unknown_erasing\n self.clip_box = clip_box\n\n if self.box2mask:\n assert self.has_mask\n\n for img in self.coco.imgs.values():\n img['aspect_ratio'] = float(img['height']) / img['width']\n self.img_ids = list(set([_['image_id'] for _ in self.coco.anns.values()]))\n\n if len(self.img_ids) == 0:\n # len of img ids is 0 (might because this is test info), load from images'\n self.img_ids = list(self.coco.imgs.keys())\n\n self.img_ids = sorted(self.img_ids)\n self.aspect_ratios = [self.coco.imgs[ix]['aspect_ratio'] for ix in self.img_ids]\n\n self.cache = cache\n if self.cache is not None:\n cache_dir = self.cache.get('cache_dir', './')\n os.makedirs(cache_dir, exist_ok=True)\n cache_name = self.cache.get('cache_name', 'cache.pkl')\n cache_file = os.path.join(cache_dir, cache_name)\n if not os.path.exists(cache_file):\n self.cache_image = {}\n self.cache_dataset()\n if env.is_master():\n with open(cache_file, \"wb\") as f:\n pk.dump(self.cache_image, f)\n else:\n with open(cache_file, \"rb\") as f:\n self.cache_image = pk.load(f)\n\n def cache_dataset(self):\n from multiprocessing.pool import ThreadPool\n NUM_THREADs = min(8, os.cpu_count())\n pool = ThreadPool(NUM_THREADs)\n pool.map(self.set_cache_images, self.img_ids)\n pool.close()\n pool.join()\n\n def set_cache_images(self, img_id):\n meta_img = self.coco.imgs[img_id]\n filename = self.get_filename(meta_img)\n with open(os.path.join(self.image_reader.image_directory(), filename), \"rb\") as f:\n img = np.frombuffer(f.read(), np.uint8)\n self.cache_image[filename] = img\n\n def get_cache_image(self, meta_img):\n filename = self.get_filename(meta_img)\n return self.cache_image[filename]\n\n def __len__(self):\n return len(self.img_ids)\n\n def get_filename(self, meta_img):\n return meta_img['file_name']\n\n def get_image_classes(self, img_index):\n \"\"\"For Repeat Factor Sampler\n \"\"\"\n img_id = self.img_ids[img_index]\n img_anns = self.coco.imgToAnns[img_id]\n return [\n self.category_to_class[ann['category_id']]\n for ann in img_anns if not ann.get('iscrowd', False)\n ]\n\n @property\n def images_per_category(self):\n \"\"\"The results is volatile since it may consume too much memory\n \"\"\"\n coco_img_id_to_img_index = {\n img_id: img_index\n for img_index, img_id in enumerate(self.img_ids)\n }\n to_img_index = lambda img_ids: set([coco_img_id_to_img_index[img_id] for img_id in img_ids])\n return {\n cat: list(to_img_index(img_ids))\n for cat, img_ids in self.coco.catToImgs.items()\n }\n\n @property\n def num_images_per_category(self):\n if hasattr(self, '_num_images_per_category'):\n return self._num_images_per_category\n self._num_images_per_category = {\n cat: len(set(img_list))\n for cat, img_list in self.coco.catToImgs.items()\n }\n return self._num_images_per_category\n\n @property\n def num_instances_per_category(self):\n if hasattr(self, '_num_instances_per_category'):\n return self._num_instances_per_category\n self._num_instances_per_category = defaultdict(int)\n for ann in self.coco.anns.values():\n self._num_instances_per_category[ann['category_id']] += 1\n return self._num_images_per_category\n\n @property\n def images_per_class(self):\n return {\n self.category_to_class[cat]: images\n for cat, images in self.images_per_category.items()\n }\n\n @property\n def num_images_per_class(self):\n \"\"\" For Class Aware Balanced Sampler and Repeat Factor Sampler\n \"\"\"\n return {\n self.category_to_class[cat]: num\n for cat, num in self.num_images_per_category.items()\n }\n\n @property\n def num_instances_per_class(self):\n return {\n self.category_to_class[cat]: num\n for cat, num in self.num_instances_per_category.items()\n }\n\n def get_input(self, idx):\n \"\"\" parse annotation into input dict\n \"\"\"\n img_id = self.img_ids[idx]\n meta_img = self.coco.imgs[img_id]\n meta_annos = self.coco.imgToAnns[img_id]\n # filename = os.path.join(self.image_dir, meta_img['file_name'])\n filename = self.get_filename(meta_img)\n\n gt_bboxes, ig_bboxes = [], []\n gt_keyps = [] if self.has_keypoint else None\n gt_masks = [] if self.has_mask else None\n gt_semantic_seg = np.zeros((meta_img['height'], meta_img['width']),\n dtype=np.uint8) if self.has_semantic_seg else None\n if self.has_semantic_seg:\n gt_semantic_seg = np.array(\n Image.open(os.path.join(self.semantic_seg_prefix, os.path.basename(filename).replace('jpg', 'png'))))\n\n for ann in meta_annos:\n # to keep compatible with lvis dataset\n if ann.get('iscrowd', False):\n if self.use_ignore:\n # avoid inplace changing the original data\n bbox = copy.copy(ann['bbox'])\n bbox[2] += bbox[0] - ALIGNED_FLAG.offset\n bbox[3] += bbox[1] - ALIGNED_FLAG.offset\n ig_bboxes.append(bbox)\n continue\n label = self.category_to_class[ann['category_id']]\n bbox = ann['bbox'] + [label]\n bbox[2] += bbox[0] - ALIGNED_FLAG.offset\n bbox[3] += bbox[1] - ALIGNED_FLAG.offset\n gt_bboxes.append(bbox)\n\n if len(ig_bboxes) == 0:\n ig_bboxes = self._fake_zero_data(1, 4)\n if len(gt_bboxes) == 0:\n gt_bboxes = self._fake_zero_data(1, 5)\n\n ig_bboxes = torch.as_tensor(ig_bboxes, dtype=torch.float32) if self.use_ignore else None\n gt_bboxes = torch.as_tensor(gt_bboxes, dtype=torch.float32)\n gt_keyps = torch.stack(gt_keyps) if gt_keyps else None\n gt_masks = gt_masks if gt_masks else None # cannot be a tensor\n\n try:\n if self.cache is not None:\n img = self.get_cache_image(meta_img)\n img = cv2.imdecode(img, cv2.IMREAD_COLOR)\n if self.image_reader.color_mode != 'BGR':\n cvt_color = getattr(cv2, 'COLOR_BGR2{}'.format(self.image_reader.color_mode))\n img = cv2.cvtColor(img, cvt_color)\n else:\n img = self.image_reader(filename)\n except: # noqa \n img = self.image_reader(filename)\n\n input = EasyDict({\n 'image': img,\n 'gt_bboxes': gt_bboxes,\n 'gt_ignores': ig_bboxes,\n 'gt_keyps': gt_keyps,\n 'gt_masks': gt_masks,\n 'gt_semantic_seg': gt_semantic_seg,\n 'flipped': False,\n 'filename': filename,\n 'image_id': img_id,\n 'dataset_idx': idx\n })\n return input\n\n def __getitem__(self, idx):\n \"\"\"\n Get a single image data: from dataset\n\n Arguments:\n - idx (:obj:`int`): index of image, 0 <= idx < len(self)\n\n Returns:\n - input (:obj:`dict`)\n\n Output example::\n\n {\n # (FloatTensor): [1, 3, h, w], RGB format\n 'image': ..,\n # (list): [resized_h, resized_w, scale_factor, origin_h, origin_w]\n 'image_info': ..,\n # (FloatTensor): [N, 5] (x1, y1, x2, y2, label)\n 'gt_bboxes': ..,\n # (FloatTensor): [N, 4] (x1, y1, x2, y2)\n 'ig_bboxes': ..,\n # (FloatTensor): [N, num_keyps, 3] (x, y, flag)\n 'gt_keyps': <tensor>,\n # (list of list of ndarray): [N] [polygons]\n 'gt_masks': [],\n # (list of tuple): [(1, 2), (3, 4), ...], for visualization\n 'keyp_pairs': [],\n # (str): image name\n 'filename': ..\n }\n \"\"\"\n input = self.get_input(idx)\n image_h, image_w = get_image_size(input.image)\n input = self.transformer(input)\n\n if self.has_grid:\n gt_grids = self._generate_grids(input.gt_bboxes)\n input.gt_grids = torch.as_tensor(gt_grids, dtype=torch.float32)\n\n scale_factor = input.get('scale_factor', 1)\n new_image_h, new_image_w = get_image_size(input.image)\n pad_w, pad_h = input.get('dw', 0), input.get('dh', 0)\n image_info = [new_image_h, new_image_w, scale_factor, image_h, image_w, input.flipped, pad_w, pad_h]\n input.image_info = image_info\n return input\n\n def _fake_zero_data(self, *size):\n return torch.zeros(size)\n\n def dump(self, output):\n \"\"\"\n Write predicted results into files.\n\n .. note::\n\n Masks are binaried by threshold 0.5 and converted to rle string\n\n Arguments:\n - writer: output stream to write results\n - image_info (FloatTensor): [B, 5] (resized_h, resized_w, scale_factor, origin_h, origin_w)\n - bboxes (FloatTensor): [N, 7] (batch_idx, x1, y1, x2, y2, score, cls)\n - keypoints (FloatTensor): [N, K, 3], (x, y, score)\n - masks (list FloatTensor): [N, h, w]\n \"\"\"\n # filenames = output['filenames']\n # image_info = self.tensor2numpy(output['image_info'])\n image_info = output['image_info']\n bboxes = self.tensor2numpy(output['dt_bboxes'])\n keypoints = self.tensor2numpy(output.get('dt_keyps', None))\n masks = self.tensor2numpy(output.get('dt_masks', None))\n image_ids = output['image_id']\n if len(bboxes) == 0:\n return\n\n dump_results = []\n for b_ix in range(len(image_info)):\n info = image_info[b_ix]\n img_h, img_w = map(int, info[3: 5])\n img_id = image_ids[b_ix]\n\n scores = bboxes[:, 5]\n keep_ix = np.where(bboxes[:, 0] == b_ix)[0]\n keep_ix = sorted(keep_ix, key=lambda ix: scores[ix], reverse=True)\n\n scale_h, scale_w = _pair(info[2])\n img_bboxes = bboxes[keep_ix]\n # sub pad\n pad_w, pad_h = info[6], info[7]\n img_bboxes[:, [1, 3]] -= pad_w\n img_bboxes[:, [2, 4]] -= pad_h\n # clip\n if self.clip_box:\n np.clip(img_bboxes[:, [1, 3]], 0, info[1], out=img_bboxes[:, [1, 3]])\n np.clip(img_bboxes[:, [2, 4]], 0, info[0], out=img_bboxes[:, [2, 4]])\n img_bboxes[:, 1] /= scale_w\n img_bboxes[:, 2] /= scale_h\n img_bboxes[:, 3] /= scale_w\n img_bboxes[:, 4] /= scale_h\n\n x = img_bboxes[:, 1][:, None]\n y = img_bboxes[:, 2][:, None]\n w = (img_bboxes[:, 3] - img_bboxes[:, 1] + ALIGNED_FLAG.offset)[:, None]\n h = (img_bboxes[:, 4] - img_bboxes[:, 2] + ALIGNED_FLAG.offset)[:, None]\n res_bboxes = np.concatenate([x, y, w, h], axis=1)\n\n if keypoints is not None:\n img_keyps = keypoints[keep_ix]\n img_keyps[..., 0] /= scale_w\n img_keyps[..., 1] /= scale_h\n img_keyps = img_keyps.reshape(img_keyps.shape[0], -1)\n\n for idx in range(len(img_bboxes)):\n res = {'image_id': img_id}\n box_score, cls = img_bboxes[idx][5:7]\n res['bbox'] = res_bboxes[idx].tolist()\n res['score'] = float(box_score)\n res['category_id'] = self.class_to_category.get(int(cls), int(cls))\n\n if keypoints is not None:\n res['keypoints'] = img_keyps[idx].tolist()\n\n if masks is not None:\n # We have resized mask to origin image size in mask_head fo fast inference,\n mask = np.asfortranarray(masks[keep_ix[idx]], dtype=np.uint8)\n rle = maskUtils.encode(mask)\n if isinstance(rle['counts'], bytes):\n rle['counts'] = str(rle['counts'], encoding='utf-8')\n res['segmentation'] = rle\n\n dump_results.append(res)\n return dump_results\n"
] | [
[
"torch.save",
"torch.cuda.current_device"
],
[
"torch.Generator",
"torch.randperm",
"numpy.argmax",
"numpy.random.rand",
"numpy.array",
"numpy.random.RandomState"
],
[
"torch.zeros",
"numpy.clip",
"numpy.asfortranarray",
"numpy.concatenate",
"torch.nn.modules.utils._pair",
"torch.stack",
"numpy.zeros",
"numpy.where",
"torch.as_tensor"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
dynamicguy/imgaug | [
"f58c06323eb04416c76de1f18952ca5875caf883"
] | [
"imgaug/augmenters/weather.py"
] | [
"\"\"\"\nAugmenters that create wheather effects.\n\nDo not import directly from this file, as the categorization is not final.\nUse instead::\n\n from imgaug import augmenters as iaa\n\nand then e.g.::\n\n seq = iaa.Sequential([iaa.Snowflakes()])\n\nList of augmenters:\n\n * FastSnowyLandscape\n * Clouds\n * Fog\n * CloudLayer\n * Snowflakes\n * SnowflakesLayer\n\n\"\"\"\nfrom __future__ import print_function, division, absolute_import\n\nimport numpy as np\nimport cv2\n\nfrom . import meta, arithmetic, blur, contrast\nfrom .. import imgaug as ia\nfrom .. import parameters as iap\n\n\nclass FastSnowyLandscape(meta.Augmenter):\n \"\"\"\n Augmenter to convert non-snowy landscapes to snowy ones.\n\n This expects to get an image that roughly shows a landscape.\n\n This is based on the method proposed by\n https://medium.freecodecamp.org/image-augmentation-make-it-rain-make-it-snow-how-to-modify-a-photo-with-machine-learning-163c0cb3843f?gi=bca4a13e634c\n\n Parameters\n ----------\n lightness_threshold : number or tuple of number or list of number\\\n or imgaug.parameters.StochasticParameter, optional\n All pixels with lightness in HLS colorspace below this value will have their lightness increased by\n `lightness_multiplier`.\n\n * If an int, then that value will be used for all images.\n * If a tuple ``(a, b)``, then a value from the discrete range ``[a .. b]`` will be used.\n * If a list, then a random value will be sampled from that list per image.\n * If a StochasticParameter, then a value will be sampled per image from that parameter.\n\n lightness_multiplier : number or tuple of number or list of number\\\n or imgaug.parameters.StochasticParameter, optional\n Multiplier for pixel's lightness value in HLS colorspace. Affects all pixels selected via `lightness_threshold`.\n\n * If a number, then that value will be used for all images.\n * If a tuple ``(a, b)``, then a value from the continuous range ``[a, b]`` will be used.\n * If a list, then a random value will be sampled from that list per image.\n * If a StochasticParameter, then a value will be sampled per image from that parameter.\n\n name : None or str, optional\n See :func:`imgaug.augmenters.meta.Augmenter.__init__`.\n\n deterministic : bool, optional\n See :func:`imgaug.augmenters.meta.Augmenter.__init__`.\n\n random_state : None or int or numpy.random.RandomState, optional\n See :func:`imgaug.augmenters.meta.Augmenter.__init__`.\n\n Examples\n --------\n >>> aug = iaa.FastSnowyLandscape(lightness_threshold=140, lightness_multiplier=2.5)\n\n Search for all pixels in the image with a lightness value in HLS colorspace of less than 140 and increase their\n lightness by a factor of 2.5. This is the configuration proposed in the original article (see link above).\n\n >>> aug = iaa.FastSnowyLandscape(lightness_threshold=[128, 200], lightness_multiplier=(1.5, 3.5))\n\n Search for all pixels in the image with a lightness value in HLS colorspace of less than 128 or less than 200\n (one of these values is picked per image) and multiply their lightness by a factor of ``x`` with ``x`` being\n sampled from ``uniform(1.5, 3.5)`` (once per image).\n\n >>> aug = iaa.FastSnowyLandscape(lightness_threshold=(100, 255), lightness_multiplier=(1.0, 4.0))\n\n Similar to above, but the lightness threshold is sampled from ``uniform(100, 255)`` (per image) and the multiplier\n from ``uniform(1.0, 4.0)`` (per image). This seems to produce good and varied results.\n\n \"\"\"\n\n def __init__(self, lightness_threshold=(100, 255), lightness_multiplier=(1.0, 4.0), name=None, deterministic=False,\n random_state=None):\n super(FastSnowyLandscape, self).__init__(name=name, deterministic=deterministic, random_state=random_state)\n\n self.lightness_threshold = iap.handle_continuous_param(lightness_threshold, \"lightness_threshold\",\n value_range=(0, 255),\n tuple_to_uniform=True,\n list_to_choice=True)\n self.lightness_multiplier = iap.handle_continuous_param(lightness_multiplier, \"lightness_multiplier\",\n value_range=(0, None), tuple_to_uniform=True,\n list_to_choice=True)\n\n def _draw_samples(self, augmentables, random_state):\n nb_augmentables = len(augmentables)\n rss = ia.derive_random_states(random_state, 2)\n thresh_samples = self.lightness_threshold.draw_samples((nb_augmentables,), rss[1])\n lmul_samples = self.lightness_multiplier.draw_samples((nb_augmentables,), rss[0])\n return thresh_samples, lmul_samples\n\n def _augment_images(self, images, random_state, parents, hooks):\n input_dtypes = meta.copy_dtypes_for_restore(images, force_list=True)\n thresh_samples, lmul_samples = self._draw_samples(images, random_state)\n result = images\n\n for i, (image, input_dtype, thresh, lmul) in enumerate(zip(images, input_dtypes, thresh_samples, lmul_samples)):\n image_hls = cv2.cvtColor(image, cv2.COLOR_RGB2HLS).astype(np.float64)\n lightness = image_hls[..., 1]\n\n lightness[lightness < thresh] *= lmul\n\n image_hls = meta.clip_augmented_image_(image_hls, 0, 255) # TODO make value range more flexible\n image_hls = meta.restore_augmented_image_dtype_(image_hls, input_dtype)\n image_rgb = cv2.cvtColor(image_hls, cv2.COLOR_HLS2RGB)\n\n result[i] = image_rgb\n\n return result\n\n def _augment_heatmaps(self, heatmaps, random_state, parents, hooks):\n return heatmaps\n\n def _augment_keypoints(self, keypoints_on_images, random_state, parents, hooks):\n return keypoints_on_images\n\n def get_parameters(self):\n return [self.lightness_threshold, self.lightness_multiplier]\n\n\n# TODO add vertical gradient alpha to have clouds only at skylevel/groundlevel\n# TODO add configurable parameters\ndef Clouds(name=None, deterministic=False, random_state=None):\n \"\"\"\n Augmenter to draw clouds in images.\n\n This is a wrapper around ``CloudLayer``. It executes 1 to 2 layers per image, leading to varying densities\n and frequency patterns of clouds.\n\n This augmenter seems to be fairly robust w.r.t. the image size. Tested with ``96x128``, ``192x256``\n and ``960x1280``.\n\n Parameters\n ----------\n name : None or str, optional\n See :func:`imgaug.augmenters.meta.Augmenter.__init__`.\n\n deterministic : bool, optional\n See :func:`imgaug.augmenters.meta.Augmenter.__init__`.\n\n random_state : None or int or numpy.random.RandomState, optional\n See :func:`imgaug.augmenters.meta.Augmenter.__init__`.\n\n Examples\n --------\n >>> aug = iaa.Clouds()\n\n Creates an augmenter that adds clouds to images.\n\n \"\"\"\n if name is None:\n name = \"Unnamed%s\" % (ia.caller_name(),)\n\n return meta.SomeOf((1, 2), children=[\n CloudLayer(\n intensity_mean=(196, 255), intensity_freq_exponent=(-2.5, -2.0), intensity_coarse_scale=10,\n alpha_min=0, alpha_multiplier=(0.25, 0.75), alpha_size_px_max=(2, 8), alpha_freq_exponent=(-2.5, -2.0),\n sparsity=(0.8, 1.0), density_multiplier=(0.5, 1.0)\n ),\n CloudLayer(\n intensity_mean=(196, 255), intensity_freq_exponent=(-2.0, -1.0), intensity_coarse_scale=10,\n alpha_min=0, alpha_multiplier=(0.5, 1.0), alpha_size_px_max=(64, 128), alpha_freq_exponent=(-2.0, -1.0),\n sparsity=(1.0, 1.4), density_multiplier=(0.8, 1.5)\n )\n ], random_order=False, name=name, deterministic=deterministic, random_state=random_state)\n\n\n# TODO add vertical gradient alpha to have fog only at skylevel/groundlevel\n# TODO add configurable parameters\ndef Fog(name=None, deterministic=False, random_state=None):\n \"\"\"\n Augmenter to draw fog in images.\n\n This is a wrapper around ``CloudLayer``. It executes a single layer per image with a configuration leading\n to fairly dense clouds with low-frequency patterns.\n\n This augmenter seems to be fairly robust w.r.t. the image size. Tested with ``96x128``, ``192x256``\n and ``960x1280``.\n\n Parameters\n ----------\n name : None or str, optional\n See :func:`imgaug.augmenters.meta.Augmenter.__init__`.\n\n deterministic : bool, optional\n See :func:`imgaug.augmenters.meta.Augmenter.__init__`.\n\n random_state : None or int or numpy.random.RandomState, optional\n See :func:`imgaug.augmenters.meta.Augmenter.__init__`.\n\n Examples\n --------\n >>> aug = iaa.Fog()\n\n Creates an augmenter that adds fog to images.\n\n \"\"\"\n if name is None:\n name = \"Unnamed%s\" % (ia.caller_name(),)\n\n return CloudLayer(\n intensity_mean=(220, 255), intensity_freq_exponent=(-2.0, -1.5), intensity_coarse_scale=2,\n alpha_min=(0.7, 0.9), alpha_multiplier=0.3, alpha_size_px_max=(2, 8), alpha_freq_exponent=(-4.0, -2.0),\n sparsity=0.9, density_multiplier=(0.4, 0.9),\n name=name, deterministic=deterministic, random_state=random_state\n )\n\n\n# TODO add perspective transform to each cloud layer to make them look more distant?\n# TODO alpha_mean and density overlap - remove one of them\nclass CloudLayer(meta.Augmenter):\n \"\"\"\n Augmenter to add a single layer of clouds to an image.\n\n Parameters\n ----------\n intensity_mean : number or tuple of number or list of number or imgaug.parameters.StochasticParameter\n Mean intensity of the clouds (i.e. mean color). Recommended to be around ``(190, 255)``.\n\n * If a number, then that value will be used for all images.\n * If a tuple ``(a, b)``, then a value from the continuous range ``[a, b]`` will be used.\n * If a list, then a random value will be sampled from that list per image.\n * If a StochasticParameter, then a value will be sampled per image from that parameter.\n\n intensity_freq_exponent : number or tuple of number or list of number\\\n or imgaug.parameters.StochasticParameter\n Exponent of the frequency noise used to add fine intensity to the mean intensity.\n Recommended to be somewhere around ``(-2.5, -1.5)``.\n See :func:`imgaug.parameters.FrequencyNoise.__init__` for details.\n\n intensity_coarse_scale : number or tuple of number or list of number\\\n or imgaug.parameters.StochasticParameter\n Standard deviation of the gaussian distribution used to add more localized intensity to the mean intensity.\n Sampled in low resolution space, i.e. affects final intensity on a coarse level. Recommended to be\n around ``(0, 10)``.\n\n * If a number, then that value will be used for all images.\n * If a tuple ``(a, b)``, then a value from the continuous range ``[a, b]`` will be used.\n * If a list, then a random value will be sampled from that list per image.\n * If a StochasticParameter, then a value will be sampled per image from that parameter.\n\n alpha_min : number or tuple of number or list of number or imgaug.parameters.StochasticParameter\n Minimum alpha when blending cloud noise with the image. High values will lead to clouds being \"everywhere\".\n Recommended to usually be at around ``0.0`` for clouds and ``>0`` for fog.\n\n * If a number, then that value will be used for all images.\n * If a tuple ``(a, b)``, then a value from the continuous range ``[a, b]`` will be used.\n * If a list, then a random value will be sampled from that list per image.\n * If a StochasticParameter, then a value will be sampled per image from that parameter.\n\n alpha_multiplier : number or tuple of number or list of number or imgaug.parameters.StochasticParameter\n Multiplier for the sampled alpha values. High values will lead to denser clouds wherever they are visible.\n Recommended to be at around ``(0.3, 1.0)``. Note that this parameter currently overlaps with\n `density_multiplier`, which is applied a bit later to the alpha mask.\n\n * If a number, then that value will be used for all images.\n * If a tuple ``(a, b)``, then a value from the continuous range ``[a, b]`` will be used.\n * If a list, then a random value will be sampled from that list per image.\n * If a StochasticParameter, then a value will be sampled per image from that parameter.\n\n alpha_size_px_max : number or tuple of number or list of number or imgaug.parameters.StochasticParameter\n Controls the image size at which the alpha mask is sampled. Lower values will lead to coarser alpha masks\n and hence larger clouds (and empty areas).\n See :func:`imgaug.parameters.FrequencyNoise.__init__` for details.\n\n alpha_freq_exponent : number or tuple of number or list of number or imgaug.parameters.StochasticParameter\n Exponent of the frequency noise used to sample the alpha mask. Similarly to `alpha_size_max_px`, lower values\n will lead to coarser alpha patterns. Recommended to be somewhere around ``(-4.0, -1.5)``.\n See :func:`imgaug.parameters.FrequencyNoise.__init__` for details.\n\n sparsity : number or tuple of number or list of number or imgaug.parameters.StochasticParameter\n Exponent applied late to the alpha mask. Lower values will lead to coarser cloud patterns, higher values\n to finer patterns. Recommended to be somewhere around ``1.0``. Do not deviate far from that values, otherwise\n the alpha mask might get weird patterns with sudden fall-offs to zero that look very unnatural.\n\n * If a number, then that value will be used for all images.\n * If a tuple ``(a, b)``, then a value from the continuous range ``[a, b]`` will be used.\n * If a list, then a random value will be sampled from that list per image.\n * If a StochasticParameter, then a value will be sampled per image from that parameter.\n\n density_multiplier : number or tuple of number or list of number or imgaug.parameters.StochasticParameter\n Late multiplier for the alpha mask, similar to `alpha_multiplier`. Set this higher to get \"denser\" clouds\n wherever they are visible. Recommended to be around ``(0.5, 1.5)``.\n\n * If a number, then that value will be used for all images.\n * If a tuple ``(a, b)``, then a value from the continuous range ``[a, b]`` will be used.\n * If a list, then a random value will be sampled from that list per image.\n * If a StochasticParameter, then a value will be sampled per image from that parameter.\n\n name : None or str, optional\n See :func:`imgaug.augmenters.meta.Augmenter.__init__`.\n\n deterministic : bool, optional\n See :func:`imgaug.augmenters.meta.Augmenter.__init__`.\n\n random_state : None or int or numpy.random.RandomState, optional\n See :func:`imgaug.augmenters.meta.Augmenter.__init__`.\n\n \"\"\"\n def __init__(self, intensity_mean, intensity_freq_exponent, intensity_coarse_scale,\n alpha_min, alpha_multiplier, alpha_size_px_max, alpha_freq_exponent,\n sparsity, density_multiplier,\n name=None, deterministic=False, random_state=None):\n super(CloudLayer, self).__init__(name=name, deterministic=deterministic, random_state=random_state)\n self.intensity_mean = iap.handle_continuous_param(intensity_mean, \"intensity_mean\")\n self.intensity_freq_exponent = intensity_freq_exponent\n self.intensity_coarse_scale = intensity_coarse_scale\n self.alpha_min = iap.handle_continuous_param(alpha_min, \"alpha_min\")\n self.alpha_multiplier = iap.handle_continuous_param(alpha_multiplier, \"alpha_multiplier\")\n self.alpha_size_px_max = alpha_size_px_max\n self.alpha_freq_exponent = alpha_freq_exponent\n self.sparsity = iap.handle_continuous_param(sparsity, \"sparsity\")\n self.density_multiplier = iap.handle_continuous_param(density_multiplier, \"density_multiplier\")\n\n def _augment_images(self, images, random_state, parents, hooks):\n rss = ia.derive_random_states(random_state, len(images))\n result = images\n for i, (image, rs) in enumerate(zip(images, rss)):\n result[i] = self.draw_on_image(image, rs)\n return result\n\n def _augment_heatmaps(self, heatmaps, random_state, parents, hooks):\n return heatmaps\n\n def _augment_keypoints(self, keypoints_on_images, random_state, parents, hooks):\n return keypoints_on_images\n\n def get_parameters(self):\n return [self.intensity_mean, self.alpha_min, self.alpha_multiplier, self.alpha_size_px_max,\n self.alpha_freq_exponent, self.intensity_freq_exponent, self.sparsity, self.density_min,\n self.density_multiplier,\n self.intensity_coarse_scale]\n\n def draw_on_image(self, image, random_state):\n alpha, intensity = self.generate_maps(image, random_state)\n alpha = alpha[..., np.newaxis]\n intensity = intensity[..., np.newaxis]\n return np.clip(\n (1 - alpha) * image.astype(np.float64) + alpha * intensity.astype(np.float64),\n 0,\n 255\n ).astype(np.uint8)\n\n def generate_maps(self, image, random_state):\n intensity_mean_sample = self.intensity_mean.draw_sample(random_state)\n alpha_min_sample = self.alpha_min.draw_sample(random_state)\n alpha_multiplier_sample = self.alpha_multiplier.draw_sample(random_state)\n alpha_size_px_max = self.alpha_size_px_max\n intensity_freq_exponent = self.intensity_freq_exponent\n alpha_freq_exponent = self.alpha_freq_exponent\n sparsity_sample = self.sparsity.draw_sample(random_state)\n density_multiplier_sample = self.density_multiplier.draw_sample(random_state)\n\n height, width = image.shape[0:2]\n rss_alpha, rss_intensity = ia.derive_random_states(random_state, 2)\n\n intensity_coarse = self._generate_intensity_map_coarse(\n height, width, intensity_mean_sample,\n iap.Normal(0, scale=self.intensity_coarse_scale),\n rss_intensity\n )\n intensity_fine = self._generate_intensity_map_fine(height, width, intensity_mean_sample,\n intensity_freq_exponent, rss_intensity)\n intensity = np.clip(intensity_coarse + intensity_fine, 0, 255)\n\n alpha = self._generate_alpha_mask(height, width, alpha_min_sample, alpha_multiplier_sample,\n alpha_freq_exponent, alpha_size_px_max,\n sparsity_sample, density_multiplier_sample, rss_alpha)\n\n return alpha, intensity\n\n @classmethod\n def _generate_intensity_map_coarse(cls, height, width, intensity_mean, intensity_local_offset, random_state):\n height_intensity, width_intensity = (8, 8) # TODO this might be too simplistic for some image sizes\n intensity = intensity_mean\\\n + intensity_local_offset.draw_samples((height_intensity, width_intensity), random_state)\n intensity = ia.imresize_single_image(np.clip(intensity, 0, 255).astype(np.uint8), (height, width),\n interpolation=\"cubic\")\n\n return intensity\n\n @classmethod\n def _generate_intensity_map_fine(cls, height, width, intensity_mean, exponent, random_state):\n intensity_details_generator = iap.FrequencyNoise(\n exponent=exponent,\n size_px_max=max(height, width),\n upscale_method=\"cubic\"\n )\n intensity_details = intensity_details_generator.draw_samples((height, width), random_state)\n return intensity_mean * ((2*intensity_details - 1.0)/5.0)\n\n @classmethod\n def _generate_alpha_mask(cls, height, width, alpha_min, alpha_multiplier, exponent, alpha_size_px_max, sparsity,\n density_multiplier, random_state):\n alpha_generator = iap.FrequencyNoise(\n exponent=exponent,\n size_px_max=alpha_size_px_max,\n upscale_method=\"cubic\"\n )\n alpha_local = alpha_generator.draw_samples((height, width), random_state)\n alpha = alpha_min + (alpha_multiplier * alpha_local)\n alpha = (alpha ** sparsity) * density_multiplier\n alpha = np.clip(alpha, 0.0, 1.0)\n\n return alpha\n\n\ndef Snowflakes(density=(0.005, 0.075), density_uniformity=(0.3, 0.9), flake_size=(0.2, 0.7),\n flake_size_uniformity=(0.4, 0.8), angle=(-30, 30), speed=(0.007, 0.03),\n name=None, deterministic=False, random_state=None):\n \"\"\"\n Augmenter to add falling snowflakes to images.\n\n This is a wrapper around ``SnowflakesLayer``. It executes 1 to 3 layers per image.\n\n Parameters\n ----------\n density : number or tuple of number or list of number or imgaug.parameters.StochasticParameter\n Density of the snowflake layer, as a probability of each pixel in low resolution space to be a snowflake.\n Valid value range is ``(0.0, 1.0)``. Recommended to be around ``(0.01, 0.075)``.\n\n * If a number, then that value will be used for all images.\n * If a tuple ``(a, b)``, then a value from the continuous range ``[a, b]`` will be used.\n * If a list, then a random value will be sampled from that list per image.\n * If a StochasticParameter, then a value will be sampled per image from that parameter.\n\n density_uniformity : number or tuple of number or list of number or imgaug.parameters.StochasticParameter\n Size uniformity of the snowflakes. Higher values denote more similarly sized snowflakes.\n Valid value range is ``(0.0, 1.0)``. Recommended to be around ``0.5``.\n\n * If a number, then that value will be used for all images.\n * If a tuple ``(a, b)``, then a value from the continuous range ``[a, b]`` will be used.\n * If a list, then a random value will be sampled from that list per image.\n * If a StochasticParameter, then a value will be sampled per image from that parameter.\n\n flake_size : number or tuple of number or list of number or imgaug.parameters.StochasticParameter\n Size of the snowflakes. This parameter controls the resolution at which snowflakes are sampled.\n Higher values mean that the resolution is closer to the input image's resolution and hence each sampled\n snowflake will be smaller (because of the smaller pixel size).\n\n Valid value range is ``[0.0, 1.0)``. Recommended values:\n\n * On ``96x128`` a value of ``(0.1, 0.4)`` worked well.\n * On ``192x256`` a value of ``(0.2, 0.7)`` worked well.\n * On ``960x1280`` a value of ``(0.7, 0.95)`` worked well.\n\n Allowed datatypes:\n\n * If a number, then that value will be used for all images.\n * If a tuple ``(a, b)``, then a value from the continuous range ``[a, b]`` will be used.\n * If a list, then a random value will be sampled from that list per image.\n * If a StochasticParameter, then a value will be sampled per image from that parameter.\n\n flake_size_uniformity : number or tuple of number or list of number or imgaug.parameters.StochasticParameter\n Controls the size uniformity of the snowflakes. Higher values mean that the snowflakes are more similarly\n sized. Valid value range is ``(0.0, 1.0)``. Recommended to be around ``0.5``.\n\n * If a number, then that value will be used for all images.\n * If a tuple ``(a, b)``, then a value from the continuous range ``[a, b]`` will be used.\n * If a list, then a random value will be sampled from that list per image.\n * If a StochasticParameter, then a value will be sampled per image from that parameter.\n\n angle : number or tuple of number or list of number or imgaug.parameters.StochasticParameter\n Angle in degrees of motion blur applied to the snowflakes, where ``0.0`` is motion blur that points straight\n upwards. Recommended to be around ``(-30, 30)``.\n See also :func:`imgaug.augmenters.blur.MotionBlur.__init__`.\n\n * If a number, then that value will be used for all images.\n * If a tuple ``(a, b)``, then a value from the continuous range ``[a, b]`` will be used.\n * If a list, then a random value will be sampled from that list per image.\n * If a StochasticParameter, then a value will be sampled per image from that parameter.\n\n speed : number or tuple of number or list of number or imgaug.parameters.StochasticParameter\n Perceived falling speed of the snowflakes. This parameter controls the motion blur's kernel size.\n It follows roughly the form ``kernel_size = image_size * speed``. Hence,\n Values around ``1.0`` denote that the motion blur should \"stretch\" each snowflake over the whole image.\n\n Valid value range is ``(0.0, 1.0)``. Recommended values:\n\n * On ``96x128`` a value of ``(0.01, 0.05)`` worked well.\n * On ``192x256`` a value of ``(0.007, 0.03)`` worked well.\n * On ``960x1280`` a value of ``(0.001, 0.03)`` worked well.\n\n\n Allowed datatypes:\n\n * If a number, then that value will be used for all images.\n * If a tuple ``(a, b)``, then a value from the continuous range ``[a, b]`` will be used.\n * If a list, then a random value will be sampled from that list per image.\n * If a StochasticParameter, then a value will be sampled per image from that parameter.\n\n name : None or str, optional\n See :func:`imgaug.augmenters.meta.Augmenter.__init__`.\n\n deterministic : bool, optional\n See :func:`imgaug.augmenters.meta.Augmenter.__init__`.\n\n random_state : None or int or numpy.random.RandomState, optional\n See :func:`imgaug.augmenters.meta.Augmenter.__init__`.\n\n Examples\n --------\n >>> aug = iaa.Snowflakes(flake_size=(0.1, 0.4), speed=(0.01, 0.05))\n\n Adds snowflakes to small images (around ``96x128``).\n\n >>> aug = iaa.Snowflakes(flake_size=(0.2, 0.7), speed=(0.007, 0.03))\n\n Adds snowflakes to medium-sized images (around ``192x256``).\n\n >>> aug = iaa.Snowflakes(flake_size=(0.7, 0.95), speed=(0.001, 0.03))\n\n Adds snowflakes to large images (around ``960x1280``).\n\n \"\"\"\n if name is None:\n name = \"Unnamed%s\" % (ia.caller_name(),)\n\n layer = SnowflakesLayer(\n density=density, density_uniformity=density_uniformity,\n flake_size=flake_size, flake_size_uniformity=flake_size_uniformity,\n angle=angle, speed=speed,\n blur_sigma_fraction=(0.0001, 0.001)\n )\n\n return meta.SomeOf(\n (1, 3), children=[layer.deepcopy() for _ in range(3)],\n random_order=False, name=name, deterministic=deterministic, random_state=random_state\n )\n\n\n# TODO snowflakes are all almost 100% white, add some grayish tones and maybe color to them\nclass SnowflakesLayer(meta.Augmenter):\n \"\"\"\n Augmenter to add a single layer of falling snowflakes to images.\n\n Parameters\n ----------\n density : number or tuple of number or list of number or imgaug.parameters.StochasticParameter\n Density of the snowflake layer, as a probability of each pixel in low resolution space to be a snowflake.\n Valid value range is ``(0.0, 1.0)``. Recommended to be around ``(0.01, 0.075)``.\n\n * If a number, then that value will be used for all images.\n * If a tuple ``(a, b)``, then a value from the continuous range ``[a, b]`` will be used.\n * If a list, then a random value will be sampled from that list per image.\n * If a StochasticParameter, then a value will be sampled per image from that parameter.\n\n density_uniformity : number or tuple of number or list of number or imgaug.parameters.StochasticParameter\n Size uniformity of the snowflakes. Higher values denote more similarly sized snowflakes.\n Valid value range is ``(0.0, 1.0)``. Recommended to be around ``0.5``.\n\n * If a number, then that value will be used for all images.\n * If a tuple ``(a, b)``, then a value from the continuous range ``[a, b]`` will be used.\n * If a list, then a random value will be sampled from that list per image.\n * If a StochasticParameter, then a value will be sampled per image from that parameter.\n\n flake_size : number or tuple of number or list of number or imgaug.parameters.StochasticParameter\n Size of the snowflakes. This parameter controls the resolution at which snowflakes are sampled.\n Higher values mean that the resolution is closer to the input image's resolution and hence each sampled\n snowflake will be smaller (because of the smaller pixel size).\n\n Valid value range is ``[0.0, 1.0)``. Recommended values:\n\n * On 96x128 a value of ``(0.1, 0.4)`` worked well.\n * On 192x256 a value of ``(0.2, 0.7)`` worked well.\n * On 960x1280 a value of ``(0.7, 0.95)`` worked well.\n\n Allowed datatypes:\n\n * If a number, then that value will be used for all images.\n * If a tuple ``(a, b)``, then a value from the continuous range ``[a, b]`` will be used.\n * If a list, then a random value will be sampled from that list per image.\n * If a StochasticParameter, then a value will be sampled per image from that parameter.\n\n flake_size_uniformity : number or tuple of number or list of number or imgaug.parameters.StochasticParameter\n Controls the size uniformity of the snowflakes. Higher values mean that the snowflakes are more similarly\n sized. Valid value range is ``(0.0, 1.0)``. Recommended to be around ``0.5``.\n\n * If a number, then that value will be used for all images.\n * If a tuple ``(a, b)``, then a value from the continuous range ``[a, b]`` will be used.\n * If a list, then a random value will be sampled from that list per image.\n * If a StochasticParameter, then a value will be sampled per image from that parameter.\n\n angle : number or tuple of number or list of number or imgaug.parameters.StochasticParameter\n Angle in degrees of motion blur applied to the snowflakes, where ``0.0`` is motion blur that points straight\n upwards. Recommended to be around ``(-30, 30)``.\n See also :func:`imgaug.augmenters.blur.MotionBlur.__init__`.\n\n * If a number, then that value will be used for all images.\n * If a tuple ``(a, b)``, then a value from the continuous range ``[a, b]`` will be used.\n * If a list, then a random value will be sampled from that list per image.\n * If a StochasticParameter, then a value will be sampled per image from that parameter.\n\n speed : number or tuple of number or list of number or imgaug.parameters.StochasticParameter\n Perceived falling speed of the snowflakes. This parameter controls the motion blur's kernel size.\n It follows roughly the form ``kernel_size = image_size * speed``. Hence,\n Values around ``1.0`` denote that the motion blur should \"stretch\" each snowflake over the whole image.\n\n Valid value range is ``(0.0, 1.0)``. Recommended values:\n\n * On 96x128 a value of ``(0.01, 0.05)`` worked well.\n * On 192x256 a value of ``(0.007, 0.03)`` worked well.\n * On 960x1280 a value of ``(0.001, 0.03)`` worked well.\n\n\n Allowed datatypes:\n\n * If a number, then that value will be used for all images.\n * If a tuple ``(a, b)``, then a value from the continuous range ``[a, b]`` will be used.\n * If a list, then a random value will be sampled from that list per image.\n * If a StochasticParameter, then a value will be sampled per image from that parameter.\n\n blur_sigma_fraction : number or tuple of number or list of number or imgaug.parameters.StochasticParameter\n Standard deviation (as a fraction of the image size) of gaussian blur applied to the snowflakes.\n Valid value range is ``(0.0, 1.0)``. Recommended to be around ``(0.0001, 0.001)``. May still require tinkering\n based on image size.\n\n * If a number, then that value will be used for all images.\n * If a tuple ``(a, b)``, then a value from the continuous range ``[a, b]`` will be used.\n * If a list, then a random value will be sampled from that list per image.\n * If a StochasticParameter, then a value will be sampled per image from that parameter.\n\n blur_sigma_limits : tuple of float, optional\n Controls allows min and max values of `blur_sigma_fraction` after(!) multiplication with the image size.\n First value is the minimum, second value is the maximum. Values outside of that range will be clipped to be\n within that range. This prevents extreme values for very small or large images.\n\n name : None or str, optional\n See :func:`imgaug.augmenters.meta.Augmenter.__init__`.\n\n deterministic : bool, optional\n See :func:`imgaug.augmenters.meta.Augmenter.__init__`.\n\n random_state : None or int or numpy.random.RandomState, optional\n See :func:`imgaug.augmenters.meta.Augmenter.__init__`.\n\n \"\"\"\n def __init__(self, density, density_uniformity, flake_size, flake_size_uniformity, angle, speed, blur_sigma_fraction,\n blur_sigma_limits=(0.5, 3.75), name=None, deterministic=False,\n random_state=None):\n super(SnowflakesLayer, self).__init__(name=name, deterministic=deterministic, random_state=random_state)\n self.density = density\n self.density_uniformity = iap.handle_continuous_param(density_uniformity, \"density_uniformity\",\n value_range=(0.0, 1.0))\n self.flake_size = iap.handle_continuous_param(flake_size, \"flake_size\", value_range=(0.0+1e-4, 1.0))\n self.flake_size_uniformity = iap.handle_continuous_param(flake_size_uniformity, \"flake_size_uniformity\",\n value_range=(0.0, 1.0))\n self.angle = iap.handle_continuous_param(angle, \"angle\")\n self.speed = iap.handle_continuous_param(speed, \"speed\", value_range=(0.0, 1.0))\n self.blur_sigma_fraction = iap.handle_continuous_param(blur_sigma_fraction, \"blur_sigma_fraction\",\n value_range=(0.0, 1.0))\n self.blur_sigma_limits = blur_sigma_limits # (min, max), same for all images\n self.gate_noise_size = (8, 8) # (height, width), same for all images\n\n def _augment_images(self, images, random_state, parents, hooks):\n rss = ia.derive_random_states(random_state, len(images))\n result = images\n for i, (image, rs) in enumerate(zip(images, rss)):\n result[i] = self.draw_on_image(image, rs)\n return result\n\n def _augment_heatmaps(self, heatmaps, random_state, parents, hooks):\n return heatmaps\n\n def _augment_keypoints(self, keypoints_on_images, random_state, parents, hooks):\n return keypoints_on_images\n\n def get_parameters(self):\n return [self.density, self.density_uniformity, self.flake_size, self.flake_size_uniformity, self.angle,\n self.speed, self.blur_sigma_fraction, self.blur_sigma_limits, self.gate_noise_size]\n\n def draw_on_image(self, image, random_state):\n flake_size_sample = self.flake_size.draw_sample(random_state)\n flake_size_uniformity_sample = self.flake_size_uniformity.draw_sample(random_state)\n angle_sample = self.angle.draw_sample(random_state)\n speed_sample = self.speed.draw_sample(random_state)\n blur_sigma_fraction_sample = self.blur_sigma_fraction.draw_sample(random_state)\n\n height, width = image.shape[0:2]\n downscale_factor = np.clip(1.0 - flake_size_sample, 0.001, 1.0)\n height_down, width_down = int(height*downscale_factor), int(width*downscale_factor),\n noise = self._generate_noise(\n height_down,\n width_down,\n self.density,\n ia.derive_random_state(random_state)\n )\n\n # gate the sampled noise via noise in range [0.0, 1.0]\n # this leads to less flakes in some areas of the image and more in other areas\n gate_noise = iap.Beta(1.0, 1.0 - self.density_uniformity)\n noise = self._gate(noise, gate_noise, self.gate_noise_size, ia.derive_random_state(random_state))\n noise = ia.imresize_single_image(noise, (height, width), interpolation=\"cubic\")\n\n # apply a bit of gaussian blur and then motion blur according to angle and speed\n sigma = max(height, width) * blur_sigma_fraction_sample\n sigma = np.clip(sigma, self.blur_sigma_limits[0], self.blur_sigma_limits[1])\n noise_small_blur = self._blur(noise, sigma, random_state)\n noise_small_blur = self._motion_blur(noise_small_blur, angle=angle_sample, speed=speed_sample,\n random_state=random_state)\n\n # use contrast adjustment of noise to make the flake size a bit less uniform\n # then readjust the noise values to make them more visible again\n gain = 1.0 + 2*(1 - flake_size_uniformity_sample)\n gain_adj = 1.0 + 5*(1 - flake_size_uniformity_sample)\n noise_small_blur = contrast.GammaContrast(gain).augment_image(noise_small_blur)\n noise_small_blur = noise_small_blur.astype(np.float32) * gain_adj\n noise_small_blur_rgb = np.tile(noise_small_blur[..., np.newaxis], (1, 1, 3))\n\n # blend:\n # sum for a bit of glowy, hardly visible flakes\n # max for the main flakes\n image_f32 = image.astype(np.float32)\n image_f32 = self._blend_by_sum(image_f32, (0.1 + 20*speed_sample) * noise_small_blur_rgb)\n image_f32 = self._blend_by_max(image_f32, (1.0 + 20*speed_sample) * noise_small_blur_rgb)\n return image_f32\n\n @classmethod\n def _generate_noise(cls, height, width, density, random_state):\n noise = arithmetic.Salt(p=density, random_state=random_state)\n return noise.augment_image(np.zeros((height, width), dtype=np.uint8))\n\n @classmethod\n def _gate(cls, noise, gate_noise, gate_size, random_state):\n # the beta distribution here has most of its weight around 1.0 and will only rarely sample values around 0.0\n # the average of the sampled values seems to be at around 0.6-0.75\n gate_noise = gate_noise.draw_samples(gate_size, random_state)\n gate_noise_up = ia.imresize_single_image(gate_noise, noise.shape[0:2], interpolation=\"cubic\")\n gate_noise_up = np.clip(gate_noise_up, 0.0, 1.0)\n return np.clip(noise.astype(np.float32) * gate_noise_up, 0, 255).astype(np.uint8)\n\n @classmethod\n def _blur(cls, noise, sigma, random_state):\n blurer = blur.GaussianBlur(sigma, random_state=random_state)\n return blurer.augment_image(noise)\n\n @classmethod\n def _motion_blur(cls, noise, angle, speed, random_state):\n size = max(noise.shape[0:2])\n k = int(speed * size)\n if k <= 1:\n return noise\n\n # we use max(k, 3) here because MotionBlur errors for anything less than 3\n blurer = blur.MotionBlur(k=max(k, 3), angle=angle, direction=1.0, random_state=random_state)\n return blurer.augment_image(noise)\n\n @classmethod\n def _blend_by_sum(cls, image_f32, noise_small_blur_rgb):\n image_f32 = image_f32 + noise_small_blur_rgb\n return np.clip(image_f32, 0, 255).astype(np.uint8)\n\n @classmethod\n def _blend_by_max(cls, image_f32, noise_small_blur_rgb):\n image_f32 = np.maximum(image_f32, noise_small_blur_rgb)\n return np.clip(image_f32, 0, 255).astype(np.uint8)\n"
] | [
[
"numpy.maximum",
"numpy.tile",
"numpy.zeros",
"numpy.clip"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
computational-imaging/DeepOpticsHDR | [
"1180749b028dd21f6b7140c0538fe332bd29bb46"
] | [
"src/optics_numpy.py"
] | [
"#Julie Chang and Chris Metzler 2020\nimport abc\n\n# import tensorflow as tf\nimport numpy as np\n# import matplotlib as mpl\n# mpl.use('TKAgg')\nimport matplotlib.pyplot as plt\nfrom PIL import Image\n\nfrom numpy.fft import ifftshift\nimport fractions\n# import layers.optics_no_transpose as optics\n#import optics_no_transpose as optics\nfrom skimage.transform import resize\nfrom skimage.measure import block_reduce\nfrom scipy.ndimage import gaussian_filter\n# from scipy.interpolate import RectBivariateSpline\nimport scipy.interpolate as interp\nfrom skimage.io import imsave\n\ndef phaseshifts_from_height_map(height_map, wave_lengths, refractive_idcs, dtype=np.complex64):\n '''Calculates the phase shifts created by a height map with certain\n refractive index for light with specific wave length.\n '''\n # refractive index difference\n delta_N = refractive_idcs.reshape([1,-1,1,1]) - 1.\n # wave number\n wave_nos = 2. * np.pi / wave_lengths\n wave_nos = wave_nos.reshape([1,-1,1,1])\n # phase delay indiced by height field\n phi = wave_nos * delta_N * height_map\n phase_shifts = np.exp(1j*phi)\n return phase_shifts\n\ndef get_vanilla_zernike_height_map(zernike_volume, zernike_coeffs, output_resolution=None):\n heightmap_zernike = np.sum(zernike_coeffs * zernike_volume, axis=0)\n if output_resolution is not None:\n heightmap_zernike = resize(heightmap_zernike, output_resolution)\n return heightmap_zernike\n\nclass PhasePlate():\n def __init__(self,\n wave_lengths,\n height_map,\n refractive_idcs,\n height_tolerance=None,\n lateral_tolerance=None,\n dtype=np.complex64):\n\n self.wave_lengths = wave_lengths\n self.height_map = height_map\n self.resolution = np.array(np.shape(height_map))\n self.refractive_idcs=refractive_idcs\n self.height_tolerance=height_tolerance\n self.lateral_tolerance=lateral_tolerance\n self.dtype = dtype\n\n def __call__(self, input_field):\n # Add manufacturing tolerances in the form of height map noise\n if self.height_tolerance is not None:\n self.height_map += np.random.uniform(low=-self.height_tolerance,\n high=self.height_tolerance,\n size=self.height_map.shape)\n print(\"Phase plate with manufacturing tolerance %0.2e\"%self.height_tolerance)\n\n self.phase_shifts = phaseshifts_from_height_map(self.height_map,\n self.wave_lengths,\n self.refractive_idcs,\n dtype=self.dtype)\n\n input_field = input_field.astype(self.dtype)\n return input_field * self.phase_shifts\n\ndef psf2otf(input_filter, output_size):\n \"\"\"Convert 4D tensorflow filter into its FFT.\n Input shape: [in_channels, out_channels, height, width]\n \"\"\"\n # pad out to output_size with zeros\n # circularly shift so center pixel is at 0,0\n _, _, fh, fw = np.shape(input_filter)\n \n if output_size[0] != fh:\n pad = (output_size[0] - fh)/2\n\n if (output_size[0] - fh) % 2 != 0:\n pad_top = pad_left = int(np.ceil(pad))\n pad_bottom = pad_right = int(np.floor(pad))\n else:\n pad_top = pad_left = int(pad) + 1\n pad_bottom = pad_right = int(pad) - 1\n\n padded = np.pad(input_filter, ((0,0), (0,0), (pad_top, pad_bottom),\n (pad_left, pad_right)), mode='constant')\n else:\n padded = input_filter\n\n padded = np.fft.ifftshift(padded, axes=(2,3))\n tmp = np.fft.fft2(padded)\n\n return tmp\n\ndef propagate_exact(input_field, kernels):\n\n _, _, M_orig, N_orig = np.shape(input_field)\n\n # zero padding.\n Mpad = M_orig//2\n Npad = N_orig//2\n\n M = M_orig + 2*Mpad\n N = N_orig + 2*Npad\n\n padded_input_field = np.pad(input_field,\n ((0,0), (0,0), (Mpad,Mpad), (Npad,Npad)),\n mode='constant')\n\n objFT = np.fft.fft2(padded_input_field)\n out_field = np.fft.ifft2( objFT * kernels)\n\n out_field = out_field[:,:,Npad:-Npad,Npad:-Npad]\n\n return out_field\n\ndef plano_convex_initializer(focal_length,\n wave_lengths,\n wave_resolution,\n discretization_step,\n refractive_idx):\n convex_radius = (refractive_idx - 1.) * focal_length\n N,M = wave_resolution\n [x, y] = np.mgrid[-N//2:N//2,\n -M//2:M//2].astype(np.float64)\n\n x = x * discretization_step\n y = y * discretization_step\n x = x.reshape([N,M])\n y = y.reshape([N,M])\n\n # This approximates the spherical surface with qaudratic-phase surfaces.\n height_map = -(x ** 2 + y ** 2) / 2. * (1. / convex_radius)\n # height_map = np.mod(height_map, get_one_phase_shift_thickness(wave_lengths[0], refractive_idcs[0]))\n # return tf.constant(np.sqrt(height_map), dtype=dtype)\n \n return height_map\n\ndef circular_aperture(input_field, r_cutoff=None):\n try:\n input_shape = np.shape(input_field)\n except:\n input_shape = input_field.shape\n\n [x, y] = np.mgrid[-input_shape[2] // 2: input_shape[2] // 2,\n -input_shape[3] // 2: input_shape[3] // 2].astype(np.float64)\n\n if r_cutoff is None:\n r_cutoff = np.amax(x)\n\n r = np.sqrt(x ** 2 + y ** 2)[None,None,:,:]\n aperture = (r<r_cutoff).astype(np.float32)\n return aperture * input_field\n\ndef get_psfs(optical_element,\n depth_values,\n wave_lengths,\n optical_feature_size,\n sensor_distance,\n propagation_kernel,\n psf_resolution=None,\n sampling_factor=None,\n use_circular_aperture=True,\n r_cutoff=None,\n amplitude_mask=None,\n use_planar_incidence=False,\n dtype=np.complex64,\n sigma=None,\n get_otfs=True,\n otf_resolution=None):\n\n wave_resolution = optical_element.resolution\n physical_size = wave_resolution[0] * optical_feature_size\n # what about magnification\n \n N, M = wave_resolution\n [x, y] = np.mgrid[-N//2:N//2,\n -M//2:M//2].astype(np.float64)\n\n x = x/N * physical_size\n y = y/M * physical_size\n\n squared_sum = x**2 + y**2\n squared_sum = squared_sum[None,None,:,:]\n\n wave_nos = 2. * np.pi / wave_lengths\n wave_nos = wave_nos.reshape([1,-1,1,1])\n\n input_fields = np.tile(squared_sum, [len(depth_values), len(wave_lengths), 1, 1])\n input_fields = np.sqrt(input_fields + np.array(depth_values).reshape([-1, 1, 1, 1])**2)\n input_fields = np.exp(1.j * wave_nos * input_fields)\n\n if use_circular_aperture:\n input_fields = circular_aperture(input_fields, r_cutoff)\n if amplitude_mask is not None:\n input_fields = input_fields * amplitude_mask\n\n psfs = []\n otfs = []\n # calculate PSF for each depth\n for depth_idx in range(len(depth_values)):\n # propagate through optical element\n input_field = input_fields[depth_idx:depth_idx+1,:,:,:]\n field = optical_element(input_field)\n\n # propagate field to sensor\n sensor_incident_field = propagate_exact(field, propagation_kernel)\n psf = np.square(np.abs(sensor_incident_field))\n psf_edit = []\n for wavelength in range(np.shape(psf)[1]):\n psf_image = np.squeeze(psf[0,wavelength,:,:])\n if psf_resolution is not None:\n psf_image = np.array(Image.fromarray(psf_image).resize((psf_resolution[0], psf_resolution[1]),\n resample=Image.BILINEAR))\n if sampling_factor is not None:\n psf_image = block_reduce(psf_image, block_size=(sampling_factor,sampling_factor), func=np.mean)\n if sigma is not None:\n psf_image = gaussian_filter(psf_image, sigma)\n psf_image /= np.sum(psf_image)\n psf_edit.append(np.expand_dims(np.expand_dims(psf_image, axis=0), axis=0))\n \n psf = np.concatenate(psf_edit, axis=1)\n psfs.append(psf)\n \n # calculate OTF as well\n if get_otfs:\n if otf_resolution is None:\n otf_resolution = np.shape(psf)[2:3]\n otf = psf2otf(psf, otf_resolution)\n otfs.append(otf)\n\n return psfs, otfs\n\n\ndef get_psfs_coherent(optical_element,\n depth_values,\n wave_lengths,\n optical_feature_size,\n sensor_distance,\n propagation_kernel,\n psf_resolution=None,\n use_circular_aperture=True,\n r_cutoff=None,\n use_planar_incidence=False,\n dtype=np.complex64,\n get_otfs=True,\n otf_resolution=None):\n\n wave_resolution = optical_element.resolution\n physical_size = wave_resolution[0] * optical_feature_size\n # what about magnification\n \n N, M = wave_resolution\n [x, y] = np.mgrid[-N//2:N//2,\n -M//2:M//2].astype(np.float64)\n\n x = x/N * physical_size\n y = y/M * physical_size\n\n squared_sum = x**2 + y**2\n squared_sum = squared_sum[None,None,:,:]\n\n wave_nos = 2. * np.pi / wave_lengths\n wave_nos = wave_nos.reshape([1,-1,1,1])\n\n input_fields = np.tile(squared_sum, [len(depth_values), len(wave_lengths), 1, 1])\n input_fields = np.sqrt(input_fields + np.array(depth_values).reshape([-1, 1, 1, 1])**2)\n input_fields = np.exp(1.j * wave_nos * input_fields)\n\n if use_circular_aperture:\n input_fields = circular_aperture(input_fields, r_cutoff)\n\n psfs = []\n otfs = []\n # calculate PSF for each depth\n for depth_idx in range(len(depth_values)):\n # propagate through optical element\n input_field = input_fields[depth_idx:depth_idx+1,:,:,:]\n field = optical_element(input_field)\n\n # propagate field to sensor\n sensor_incident_field = propagate_exact(field, propagation_kernel)\n psf = sensor_incident_field\n # psf_edit = []\n # for wavelength in range(np.shape(psf)[1]):\n # psf_image = np.squeeze(psf[0,wavelength,:,:])\n # if psf_resolution is not None:\n # psf_image = np.array(Image.fromarray(psf_image).resize((psf_resolution[0], psf_resolution[1])))\n # psf_image /= np.sum(np.abs(psf_image))\n # psf_edit.append(np.expand_dims(np.expand_dims(psf_image, axis=0), axis=0))\n \n # psf = np.concatenate(psf_edit, axis=1)\n psfs.append(psf)\n \n # calculate OTF as well\n if get_otfs:\n otf = np.fft.fft2(psf)\n otfs.append(otf)\n\n return psfs, otfs\n\ndef PhaseShiftThinLens_rgb(focal_length,wave_lengths,wave_resolution,optical_feature_size,refractive_idcs):\n #Output is 1 x wave_resolution x wave_resolution x 3\n height_map_thinlens_0 = plano_convex_initializer(focal_length,\n wave_lengths[0],\n wave_resolution,\n optical_feature_size,\n refractive_idcs[0])\n PhaseThinLens_0 = phaseshifts_from_height_map(height_map_thinlens_0, wave_lengths[0],\n refractive_idcs[0])\n height_map_thinlens_1 = plano_convex_initializer(focal_length,\n wave_lengths[1],\n wave_resolution,\n optical_feature_size,\n refractive_idcs[1])\n PhaseThinLens_1 = phaseshifts_from_height_map(height_map_thinlens_1, wave_lengths[1],\n refractive_idcs[1])\n height_map_thinlens_2 = plano_convex_initializer(focal_length,\n wave_lengths[2],\n wave_resolution,\n optical_feature_size,\n refractive_idcs[2])\n PhaseThinLens_2 = phaseshifts_from_height_map(height_map_thinlens_2, wave_lengths[2],\n refractive_idcs[2])\n PhaseThinLens = np.concatenate((PhaseThinLens_0, PhaseThinLens_1, PhaseThinLens_2), axis=1)\n PhaseThinLens = np.transpose(PhaseThinLens, [0, 2, 3, 1])\n return PhaseThinLens\n\ndef SaveHeightasTiff(height_map,filename,input_feature_size=4.29e-6,output_feature_size=1e-6,mask_size=5.6e-3,quantization_res=21.16e-9,Interp_Method='Nearest'):\n #height_map is given in meters and should be saved as a 32-bit integer where 0=0 nm and 1=21.16 nm (quantization_res)\n #Interpolate the height_map to a higher resolution, then resample at the output_feature_size\n #Nearest neighbor interpolation works by far the best\n assert (np.allclose(np.mod(mask_size, output_feature_size), 0.)), \"mask_size must be a common multiple of the output_feature_size\"\n height_map = height_map/1e-6#Perform interpolation in um\n x_input = np.arange(height_map.shape[0]) * input_feature_size\n y_input = np.arange(height_map.shape[1]) * input_feature_size\n if Interp_Method=='Nearest':\n f = interp.RegularGridInterpolator((x_input,y_input), height_map,method='nearest',bounds_error=False,fill_value=0.)\n elif Interp_Method=='Linear':\n f = interp.RegularGridInterpolator((x_input, y_input), height_map, method='linear', bounds_error=False, fill_value=0.)\n else:\n f = interp.RectBivariateSpline(x_input, y_input, height_map, bbox=[None, None, None, None], kx=3, ky=3, s=0)\n n_pixel_out = int(mask_size / output_feature_size)\n if Interp_Method=='Nearest' or Interp_Method=='Linear':\n grid_x_out, grid_y_out = np.mgrid[0:n_pixel_out, 0:n_pixel_out]*output_feature_size\n grid_x_out=grid_x_out.flatten()\n grid_y_out=grid_y_out.flatten()\n points_out = np.array((grid_x_out,grid_y_out)).T\n resampled_height_map = f(points_out)\n resampled_height_map=np.reshape(resampled_height_map,(n_pixel_out,n_pixel_out))\n else:\n x_output = np.arange(n_pixel_out) * output_feature_size\n y_output = np.arange(n_pixel_out) * output_feature_size\n resampled_height_map = f(x_output,y_output)\n resampled_height_map = np.clip(resampled_height_map,height_map.min(),height_map.max())\n\n # Quantize the height map to the nearest quantization_res. Save as a fp value in um and as a integer value, where 0 = 0 and 1 = quantization_res\n quantized_resampled_height_map_fp = (np.floor((resampled_height_map)/(quantization_res/1e-6))*(quantization_res/1e-6)).astype(np.float32)\n quantized_resampled_height_map_int = (np.floor((resampled_height_map) / (quantization_res / 1e-6))).astype(np.int32) # In um, quantized to nearest 21.16nm\n\n # import matplotlib.pyplot as plt\n # plt.subplot(121)\n # imgplot = plt.imshow((height_map))\n # plt.colorbar(imgplot)\n # plt.title('Height Map After Interpolation')\n # plt.subplot(122)\n # imgplot = plt.imshow((resampled_height_map))\n # plt.colorbar(imgplot)\n # plt.title('Height Map After Interpolation')\n # plt.show()\n #\n # import matplotlib.pyplot as plt\n # plt.subplot(121)\n # height_map_slice = height_map[1000,:]\n # imgplot = plt.hist(height_map_slice)\n # plt.title('Height Map Slice After Interpolation')\n # plt.subplot(122)\n # resampled_height_map_slice = resampled_height_map[2500,:]\n # imgplot = plt.hist(resampled_height_map_slice)\n # plt.title('Height Map Slice After Interpolation')\n # plt.show()\n\n filename_fp=filename + \"_fp32_wrt_um.tiff\"\n imsave(filename_fp, quantized_resampled_height_map_fp)\n filename_int=filename + \"_integer.tiff\"\n imsave(filename_int, quantized_resampled_height_map_int)\n return [resampled_height_map,quantized_resampled_height_map_fp,quantized_resampled_height_map_int]\n"
] | [
[
"numpy.amax",
"numpy.expand_dims",
"numpy.sqrt",
"numpy.squeeze",
"numpy.concatenate",
"numpy.exp",
"numpy.pad",
"numpy.reshape",
"numpy.arange",
"scipy.interpolate.RegularGridInterpolator",
"numpy.ceil",
"numpy.fft.ifftshift",
"numpy.fft.fft2",
"scipy.interpolate.RectBivariateSpline",
"numpy.floor",
"numpy.transpose",
"numpy.array",
"numpy.sum",
"numpy.fft.ifft2",
"numpy.abs",
"scipy.ndimage.gaussian_filter",
"numpy.shape",
"numpy.mod",
"numpy.random.uniform"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.14",
"1.6",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
}
] |
qrsforever/workspace | [
"53c7ce7ca7da62c9fbb3d991ae9e4e34d07ece5f",
"53c7ce7ca7da62c9fbb3d991ae9e4e34d07ece5f",
"53c7ce7ca7da62c9fbb3d991ae9e4e34d07ece5f",
"53c7ce7ca7da62c9fbb3d991ae9e4e34d07ece5f",
"53c7ce7ca7da62c9fbb3d991ae9e4e34d07ece5f"
] | [
"ML/learn/intro_convolution.py",
"ML/learn/tensorflow/tf_print.py",
"python/learn/numpy/cut.py",
"python/learn/matplot/base.py",
"python/learn/thinkbayes/thinkplot.py"
] | [
"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\n# @file intro_convolution.py\n# @brief\n# @author QRS\n# @blog qrsforever.github.io\n# @version 1.0\n# @date 2019-06-03 20:52:26\n\n################################ jupyter-vim #######################################\n# https://github.com/qrsforever/vim/blob/master/bundle/.configs/jupyter-vim_conf.vim\n# %pylab --no-import-all # noqa\n#####################################################################################\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\n#####################################################################################\n# <codecell>\n#####################################################################################\n\na = np.array([200, 200])\nb = np.array([a, a])\n\nkernel_horizonal = np.array([np.array([2, 2]), np.array([-2, 2])])\n\nnp.multiply(b, kernel_horizonal)\n",
"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\n# @file tf_print.py\n# @brief\n# @author QRS\n# @blog qrsforever.github.io\n# @version 1.0\n# @date 2019-06-02 15:40:55\n\n################################ jupyter-vim #######################################\n# https://github.com/qrsforever/vim/blob/master/bundle/.configs/jupyter-vim_conf.vim\n# %pylab --no-import-all # noqa\n#####################################################################################\n\n# https://github.com/yaroslavvb/memory_util\n\nimport sys\nimport tensorflow as tf\nimport memory_util\n\nmemory_util.vlog(1)\n\nsess = tf.Session()\nwith sess.as_default():\n tensor = tf.range(10)\n print_op = tf.print(\"tensors:\", tensor, {'2': tensor * 2}, output_stream=sys.stderr)\n with tf.control_dependencies([print_op]):\n tripled_tensor = tensor * 3\n with memory_util.capture_stderr() as stderr:\n print(sess.run(tripled_tensor))\n print(stderr.getvalue())\n",
"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\nimport random\nimport numpy as np\n\nsizes = np.array([2, 3, 1])\n\nsizes[1:] # array([3, 1])\nsizes[:-1] # array([2, 3])\n\n\n###\nb = [np.random.randn(y, 1) for y in sizes[1:]]\nb\n\n###\nc = [np.random.randn(3, 1), np.random.randn(1, 1)]\nc\n\n###\ndata = np.array([[1,2,3],[4,5,6],[7,8,9]])\ndata\n\n### \nnp.random.shuffle(data)\ndata\n\n### that's not your desire\nrandom.shuffle(data)\ndata\n\n\n### that's ok for 1-d\na = [10,20,30,40,50,60]\nrandom.shuffle(a)\na\n\n### that's ok for 2-d\nd = [(1,2,3), (4,5,6), (7,8,9)]\nrandom.shuffle(d)\nd\n\n###\nbiases = [np.random.randn(y, 1) for y in sizes[1:]]\nweights = [np.random.randn(y, x) for x, y in zip(sizes[:-1], sizes[1:])]\nprint(biases, np.shape(biases))\nprint(weights, np.shape(weights))\n\nfor b, w in zip(biases, weights):\n print('\\n')\n print(b)\n print(w)\n",
"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\nimport matplotlib\nimport matplotlib.pyplot as plt\n\nprint(plt.style.available)\nplt.style.use('ggplot')\n\nprint(matplotlib.matplotlib_fname())\n\n# 设置中文和负号正常显示\nplt.rcParams['font.sans-serif'] = 'monospace'\nplt.rcParams['axes.unicode_minus'] = False\n\n\n# ioff() # Turn interactive plotting off\n# ion() # Turn interactive plotting on\nplt.ion() # 自动显示出图片\n\n",
"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\n\"\"\"This file contains code for use with \"Think Stats\",\nby Allen B. Downey, available from greenteapress.com\n\nCopyright 2014 Allen B. Downey\nLicense: GNU GPLv3 http://www.gnu.org/licenses/gpl.html\n\"\"\"\n\nfrom __future__ import print_function\n\nimport math\nimport matplotlib\nimport matplotlib.pyplot as pyplot\nimport numpy as np\nimport pandas\n\nimport warnings\n\n# customize some matplotlib attributes\n#matplotlib.rc('figure', figsize=(4, 3))\n\n#matplotlib.rc('font', size=14.0)\n#matplotlib.rc('axes', labelsize=22.0, titlesize=22.0)\n#matplotlib.rc('legend', fontsize=20.0)\n\n#matplotlib.rc('xtick.major', size=6.0)\n#matplotlib.rc('xtick.minor', size=3.0)\n\n#matplotlib.rc('ytick.major', size=6.0)\n#matplotlib.rc('ytick.minor', size=3.0)\n\n\nclass _Brewer(object):\n \"\"\"Encapsulates a nice sequence of colors.\n\n Shades of blue that look good in color and can be distinguished\n in grayscale (up to a point).\n \n Borrowed from http://colorbrewer2.org/\n \"\"\"\n color_iter = None\n\n colors = ['#f7fbff', '#deebf7', '#c6dbef',\n '#9ecae1', '#6baed6', '#4292c6',\n '#2171b5','#08519c','#08306b'][::-1]\n\n # lists that indicate which colors to use depending on how many are used\n which_colors = [[],\n [1],\n [1, 3],\n [0, 2, 4],\n [0, 2, 4, 6],\n [0, 2, 3, 5, 6],\n [0, 2, 3, 4, 5, 6],\n [0, 1, 2, 3, 4, 5, 6],\n [0, 1, 2, 3, 4, 5, 6, 7],\n [0, 1, 2, 3, 4, 5, 6, 7, 8],\n ]\n\n current_figure = None\n\n @classmethod\n def Colors(cls):\n \"\"\"Returns the list of colors.\n \"\"\"\n return cls.colors\n\n @classmethod\n def ColorGenerator(cls, num):\n \"\"\"Returns an iterator of color strings.\n\n n: how many colors will be used\n \"\"\"\n for i in cls.which_colors[num]:\n yield cls.colors[i]\n raise StopIteration('Ran out of colors in _Brewer.')\n\n @classmethod\n def InitIter(cls, num):\n \"\"\"Initializes the color iterator with the given number of colors.\"\"\"\n cls.color_iter = cls.ColorGenerator(num)\n\n @classmethod\n def ClearIter(cls):\n \"\"\"Sets the color iterator to None.\"\"\"\n cls.color_iter = None\n\n @classmethod\n def GetIter(cls, num):\n \"\"\"Gets the color iterator.\"\"\"\n fig = pyplot.gcf()\n if fig != cls.current_figure:\n cls.InitIter(num)\n cls.current_figure = fig \n\n if cls.color_iter is None:\n cls.InitIter(num)\n\n return cls.color_iter\n\n\ndef _UnderrideColor(options):\n \"\"\"If color is not in the options, chooses a color.\n \"\"\"\n if 'color' in options:\n return options\n\n # get the current color iterator; if there is none, init one\n color_iter = _Brewer.GetIter(5)\n\n try:\n options['color'] = next(color_iter)\n except StopIteration:\n # if you run out of colors, initialize the color iterator\n # and try again\n warnings.warn('Ran out of colors. Starting over.')\n _Brewer.ClearIter()\n _UnderrideColor(options)\n\n return options\n\n\ndef PrePlot(num=None, rows=None, cols=None):\n \"\"\"Takes hints about what's coming.\n\n num: number of lines that will be plotted\n rows: number of rows of subplots\n cols: number of columns of subplots\n \"\"\"\n if num:\n _Brewer.InitIter(num)\n\n if rows is None and cols is None:\n return\n\n if rows is not None and cols is None:\n cols = 1\n\n if cols is not None and rows is None:\n rows = 1\n\n # resize the image, depending on the number of rows and cols\n size_map = {(1, 1): (8, 6),\n (1, 2): (12, 6),\n (1, 3): (12, 6),\n (2, 2): (10, 10),\n (2, 3): (16, 10),\n (3, 1): (8, 10),\n (4, 1): (8, 12),\n }\n\n if (rows, cols) in size_map:\n fig = pyplot.gcf()\n fig.set_size_inches(*size_map[rows, cols])\n\n # create the first subplot\n if rows > 1 or cols > 1:\n ax = pyplot.subplot(rows, cols, 1)\n global SUBPLOT_ROWS, SUBPLOT_COLS\n SUBPLOT_ROWS = rows\n SUBPLOT_COLS = cols\n else:\n ax = pyplot.gca()\n\n return ax\n\ndef SubPlot(plot_number, rows=None, cols=None, **options):\n \"\"\"Configures the number of subplots and changes the current plot.\n\n rows: int\n cols: int\n plot_number: int\n options: passed to subplot\n \"\"\"\n rows = rows or SUBPLOT_ROWS\n cols = cols or SUBPLOT_COLS\n return pyplot.subplot(rows, cols, plot_number, **options)\n\n\ndef _Underride(d, **options):\n \"\"\"Add key-value pairs to d only if key is not in d.\n\n If d is None, create a new dictionary.\n\n d: dictionary\n options: keyword args to add to d\n \"\"\"\n if d is None:\n d = {}\n\n for key, val in options.items():\n d.setdefault(key, val)\n\n return d\n\n\ndef Clf():\n \"\"\"Clears the figure and any hints that have been set.\"\"\"\n global LOC\n LOC = None\n _Brewer.ClearIter()\n pyplot.clf()\n fig = pyplot.gcf()\n fig.set_size_inches(8, 6)\n\n\ndef Figure(**options):\n \"\"\"Sets options for the current figure.\"\"\"\n _Underride(options, figsize=(6, 8))\n pyplot.figure(**options)\n\n\ndef Plot(obj, ys=None, style='', **options):\n \"\"\"Plots a line.\n\n Args:\n obj: sequence of x values, or Series, or anything with Render()\n ys: sequence of y values\n style: style string passed along to pyplot.plot\n options: keyword args passed to pyplot.plot\n \"\"\"\n options = _UnderrideColor(options)\n label = getattr(obj, 'label', '_nolegend_')\n options = _Underride(options, linewidth=3, alpha=0.7, label=label)\n\n xs = obj\n if ys is None:\n if hasattr(obj, 'Render'):\n xs, ys = obj.Render()\n if isinstance(obj, pandas.Series):\n ys = obj.values\n xs = obj.index\n\n if ys is None:\n pyplot.plot(xs, style, **options)\n else:\n pyplot.plot(xs, ys, style, **options)\n\n\ndef Vlines(xs, y1, y2, **options):\n \"\"\"Plots a set of vertical lines.\n\n Args:\n xs: sequence of x values\n y1: sequence of y values\n y2: sequence of y values\n options: keyword args passed to pyplot.vlines\n \"\"\"\n options = _UnderrideColor(options)\n options = _Underride(options, linewidth=1, alpha=0.5)\n pyplot.vlines(xs, y1, y2, **options)\n\n\ndef Hlines(ys, x1, x2, **options):\n \"\"\"Plots a set of horizontal lines.\n\n Args:\n ys: sequence of y values\n x1: sequence of x values\n x2: sequence of x values\n options: keyword args passed to pyplot.vlines\n \"\"\"\n options = _UnderrideColor(options)\n options = _Underride(options, linewidth=1, alpha=0.5)\n pyplot.hlines(ys, x1, x2, **options)\n\n\ndef FillBetween(xs, y1, y2=None, where=None, **options):\n \"\"\"Fills the space between two lines.\n\n Args:\n xs: sequence of x values\n y1: sequence of y values\n y2: sequence of y values\n where: sequence of boolean\n options: keyword args passed to pyplot.fill_between\n \"\"\"\n options = _UnderrideColor(options)\n options = _Underride(options, linewidth=0, alpha=0.5)\n pyplot.fill_between(xs, y1, y2, where, **options)\n\n\ndef Bar(xs, ys, **options):\n \"\"\"Plots a line.\n\n Args:\n xs: sequence of x values\n ys: sequence of y values\n options: keyword args passed to pyplot.bar\n \"\"\"\n options = _UnderrideColor(options)\n options = _Underride(options, linewidth=0, alpha=0.6)\n pyplot.bar(xs, ys, **options)\n\n\ndef Scatter(xs, ys=None, **options):\n \"\"\"Makes a scatter plot.\n\n xs: x values\n ys: y values\n options: options passed to pyplot.scatter\n \"\"\"\n options = _Underride(options, color='blue', alpha=0.2, \n s=30, edgecolors='none')\n\n if ys is None and isinstance(xs, pandas.Series):\n ys = xs.values\n xs = xs.index\n\n pyplot.scatter(xs, ys, **options)\n\n\ndef HexBin(xs, ys, **options):\n \"\"\"Makes a scatter plot.\n\n xs: x values\n ys: y values\n options: options passed to pyplot.scatter\n \"\"\"\n options = _Underride(options, cmap=matplotlib.cm.Blues)\n pyplot.hexbin(xs, ys, **options)\n\n\ndef Pdf(pdf, **options):\n \"\"\"Plots a Pdf, Pmf, or Hist as a line.\n\n Args:\n pdf: Pdf, Pmf, or Hist object\n options: keyword args passed to pyplot.plot\n \"\"\"\n low, high = options.pop('low', None), options.pop('high', None)\n n = options.pop('n', 101)\n xs, ps = pdf.Render(low=low, high=high, n=n)\n options = _Underride(options, label=pdf.label)\n Plot(xs, ps, **options)\n\n\ndef Pdfs(pdfs, **options):\n \"\"\"Plots a sequence of PDFs.\n\n Options are passed along for all PDFs. If you want different\n options for each pdf, make multiple calls to Pdf.\n \n Args:\n pdfs: sequence of PDF objects\n options: keyword args passed to pyplot.plot\n \"\"\"\n for pdf in pdfs:\n Pdf(pdf, **options)\n\n\ndef Hist(hist, **options):\n \"\"\"Plots a Pmf or Hist with a bar plot.\n\n The default width of the bars is based on the minimum difference\n between values in the Hist. If that's too small, you can override\n it by providing a width keyword argument, in the same units\n as the values.\n\n Args:\n hist: Hist or Pmf object\n options: keyword args passed to pyplot.bar\n \"\"\"\n # find the minimum distance between adjacent values\n xs, ys = hist.Render()\n\n if 'width' not in options:\n try:\n options['width'] = 0.9 * np.diff(xs).min()\n except TypeError:\n warnings.warn(\"Hist: Can't compute bar width automatically.\"\n \"Check for non-numeric types in Hist.\"\n \"Or try providing width option.\"\n )\n\n options = _Underride(options, label=hist.label)\n options = _Underride(options, align='center')\n if options['align'] == 'left':\n options['align'] = 'edge'\n elif options['align'] == 'right':\n options['align'] = 'edge'\n options['width'] *= -1\n\n Bar(xs, ys, **options)\n\n\ndef Hists(hists, **options):\n \"\"\"Plots two histograms as interleaved bar plots.\n\n Options are passed along for all PMFs. If you want different\n options for each pmf, make multiple calls to Pmf.\n\n Args:\n hists: list of two Hist or Pmf objects\n options: keyword args passed to pyplot.plot\n \"\"\"\n for hist in hists:\n Hist(hist, **options)\n\n\ndef Pmf(pmf, **options):\n \"\"\"Plots a Pmf or Hist as a line.\n\n Args:\n pmf: Hist or Pmf object\n options: keyword args passed to pyplot.plot\n \"\"\"\n xs, ys = pmf.Render()\n low, high = min(xs), max(xs)\n\n width = options.pop('width', None)\n if width is None:\n try:\n width = np.diff(xs).min()\n except TypeError:\n warnings.warn(\"Pmf: Can't compute bar width automatically.\"\n \"Check for non-numeric types in Pmf.\"\n \"Or try providing width option.\")\n points = []\n\n lastx = np.nan\n lasty = 0\n for x, y in zip(xs, ys):\n if (x - lastx) > 1e-5:\n points.append((lastx, 0))\n points.append((x, 0))\n\n points.append((x, lasty))\n points.append((x, y))\n points.append((x+width, y))\n\n lastx = x + width\n lasty = y\n points.append((lastx, 0))\n pxs, pys = zip(*points)\n\n align = options.pop('align', 'center')\n if align == 'center':\n pxs = np.array(pxs) - width/2.0\n if align == 'right':\n pxs = np.array(pxs) - width\n\n options = _Underride(options, label=pmf.label)\n Plot(pxs, pys, **options)\n\n\ndef Pmfs(pmfs, **options):\n \"\"\"Plots a sequence of PMFs.\n\n Options are passed along for all PMFs. If you want different\n options for each pmf, make multiple calls to Pmf.\n \n Args:\n pmfs: sequence of PMF objects\n options: keyword args passed to pyplot.plot\n \"\"\"\n for pmf in pmfs:\n Pmf(pmf, **options)\n\n\ndef Diff(t):\n \"\"\"Compute the differences between adjacent elements in a sequence.\n\n Args:\n t: sequence of number\n\n Returns:\n sequence of differences (length one less than t)\n \"\"\"\n diffs = [t[i+1] - t[i] for i in range(len(t)-1)]\n return diffs\n\n\ndef Cdf(cdf, complement=False, transform=None, **options):\n \"\"\"Plots a CDF as a line.\n\n Args:\n cdf: Cdf object\n complement: boolean, whether to plot the complementary CDF\n transform: string, one of 'exponential', 'pareto', 'weibull', 'gumbel'\n options: keyword args passed to pyplot.plot\n\n Returns:\n dictionary with the scale options that should be passed to\n Config, Show or Save.\n \"\"\"\n xs, ps = cdf.Render()\n xs = np.asarray(xs)\n ps = np.asarray(ps)\n\n scale = dict(xscale='linear', yscale='linear')\n\n for s in ['xscale', 'yscale']: \n if s in options:\n scale[s] = options.pop(s)\n\n if transform == 'exponential':\n complement = True\n scale['yscale'] = 'log'\n\n if transform == 'pareto':\n complement = True\n scale['yscale'] = 'log'\n scale['xscale'] = 'log'\n\n if complement:\n ps = [1.0-p for p in ps]\n\n if transform == 'weibull':\n xs = np.delete(xs, -1)\n ps = np.delete(ps, -1)\n ps = [-math.log(1.0-p) for p in ps]\n scale['xscale'] = 'log'\n scale['yscale'] = 'log'\n\n if transform == 'gumbel':\n xs = np.delete(xs, 0)\n ps = np.delete(ps, 0)\n ps = [-math.log(p) for p in ps]\n scale['yscale'] = 'log'\n\n options = _Underride(options, label=cdf.label)\n Plot(xs, ps, **options)\n return scale\n\n\ndef Cdfs(cdfs, complement=False, transform=None, **options):\n \"\"\"Plots a sequence of CDFs.\n \n cdfs: sequence of CDF objects\n complement: boolean, whether to plot the complementary CDF\n transform: string, one of 'exponential', 'pareto', 'weibull', 'gumbel'\n options: keyword args passed to pyplot.plot\n \"\"\"\n for cdf in cdfs:\n Cdf(cdf, complement, transform, **options)\n\n\ndef Contour(obj, pcolor=False, contour=True, imshow=False, **options):\n \"\"\"Makes a contour plot.\n \n d: map from (x, y) to z, or object that provides GetDict\n pcolor: boolean, whether to make a pseudocolor plot\n contour: boolean, whether to make a contour plot\n imshow: boolean, whether to use pyplot.imshow\n options: keyword args passed to pyplot.pcolor and/or pyplot.contour\n \"\"\"\n try:\n d = obj.GetDict()\n except AttributeError:\n d = obj\n\n _Underride(options, linewidth=3, cmap=matplotlib.cm.Blues)\n\n xs, ys = zip(*d.keys())\n xs = sorted(set(xs))\n ys = sorted(set(ys))\n\n X, Y = np.meshgrid(xs, ys)\n func = lambda x, y: d.get((x, y), 0)\n func = np.vectorize(func)\n Z = func(X, Y)\n\n x_formatter = matplotlib.ticker.ScalarFormatter(useOffset=False)\n axes = pyplot.gca()\n axes.xaxis.set_major_formatter(x_formatter)\n\n if pcolor:\n pyplot.pcolormesh(X, Y, Z, **options)\n if contour:\n cs = pyplot.contour(X, Y, Z, **options)\n pyplot.clabel(cs, inline=1, fontsize=10)\n if imshow:\n extent = xs[0], xs[-1], ys[0], ys[-1]\n pyplot.imshow(Z, extent=extent, **options)\n \n\ndef Pcolor(xs, ys, zs, pcolor=True, contour=False, **options):\n \"\"\"Makes a pseudocolor plot.\n \n xs:\n ys:\n zs:\n pcolor: boolean, whether to make a pseudocolor plot\n contour: boolean, whether to make a contour plot\n options: keyword args passed to pyplot.pcolor and/or pyplot.contour\n \"\"\"\n _Underride(options, linewidth=3, cmap=matplotlib.cm.Blues)\n\n X, Y = np.meshgrid(xs, ys)\n Z = zs\n\n x_formatter = matplotlib.ticker.ScalarFormatter(useOffset=False)\n axes = pyplot.gca()\n axes.xaxis.set_major_formatter(x_formatter)\n\n if pcolor:\n pyplot.pcolormesh(X, Y, Z, **options)\n\n if contour:\n cs = pyplot.contour(X, Y, Z, **options)\n pyplot.clabel(cs, inline=1, fontsize=10)\n \n\ndef Text(x, y, s, **options):\n \"\"\"Puts text in a figure.\n\n x: number\n y: number\n s: string\n options: keyword args passed to pyplot.text\n \"\"\"\n options = _Underride(options,\n fontsize=16,\n verticalalignment='top',\n horizontalalignment='left')\n pyplot.text(x, y, s, **options)\n\n\nLEGEND = True\nLOC = None\n\ndef Config(**options):\n \"\"\"Configures the plot.\n\n Pulls options out of the option dictionary and passes them to\n the corresponding pyplot functions.\n \"\"\"\n names = ['title', 'xlabel', 'ylabel', 'xscale', 'yscale',\n 'xticks', 'yticks', 'axis', 'xlim', 'ylim']\n\n for name in names:\n if name in options:\n getattr(pyplot, name)(options[name])\n\n global LEGEND\n LEGEND = options.get('legend', LEGEND)\n\n if LEGEND:\n global LOC\n LOC = options.get('loc', LOC)\n pyplot.legend(loc=LOC)\n\n val = options.get('xticklabels', None)\n if val is not None:\n if val == 'invisible':\n ax = pyplot.gca()\n labels = ax.get_xticklabels()\n pyplot.setp(labels, visible=False)\n\n val = options.get('yticklabels', None)\n if val is not None:\n if val == 'invisible':\n ax = pyplot.gca()\n labels = ax.get_yticklabels()\n pyplot.setp(labels, visible=False)\n\n\ndef Show(**options):\n \"\"\"Shows the plot.\n\n For options, see Config.\n\n options: keyword args used to invoke various pyplot functions\n \"\"\"\n clf = options.pop('clf', True)\n Config(**options)\n pyplot.show()\n if clf:\n Clf()\n\n\ndef Plotly(**options):\n \"\"\"Shows the plot.\n\n For options, see Config.\n\n options: keyword args used to invoke various pyplot functions\n \"\"\"\n clf = options.pop('clf', True)\n Config(**options)\n import plotly.plotly as plotly\n url = plotly.plot_mpl(pyplot.gcf())\n if clf:\n Clf()\n return url\n\n\ndef Save(root=None, formats=None, **options):\n \"\"\"Saves the plot in the given formats and clears the figure.\n\n For options, see Config.\n\n Args:\n root: string filename root\n formats: list of string formats\n options: keyword args used to invoke various pyplot functions\n \"\"\"\n clf = options.pop('clf', True)\n Config(**options)\n\n if formats is None:\n formats = ['pdf', 'eps']\n\n try:\n formats.remove('plotly')\n Plotly(clf=False)\n except ValueError:\n pass\n\n if root:\n for fmt in formats:\n SaveFormat(root, fmt)\n if clf:\n Clf()\n\n\ndef SaveFormat(root, fmt='eps'):\n \"\"\"Writes the current figure to a file in the given format.\n\n Args:\n root: string filename root\n fmt: string format\n \"\"\"\n filename = '%s.%s' % (root, fmt)\n print('Writing', filename)\n pyplot.savefig(filename, format=fmt, dpi=300)\n\n\n# provide aliases for calling functons with lower-case names\npreplot = PrePlot\nsubplot = SubPlot\nclf = Clf\nfigure = Figure\nplot = Plot\nvlines = Vlines\nhlines = Hlines\nfill_between = FillBetween\ntext = Text\nscatter = Scatter\npmf = Pmf\npmfs = Pmfs\nhist = Hist\nhists = Hists\ndiff = Diff\ncdf = Cdf\ncdfs = Cdfs\ncontour = Contour\npcolor = Pcolor\nconfig = Config\nshow = Show\nsave = Save\n\n\ndef main():\n color_iter = _Brewer.ColorGenerator(7)\n for color in color_iter:\n print(color)\n\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"numpy.array",
"numpy.multiply"
],
[
"tensorflow.print",
"tensorflow.range",
"tensorflow.Session",
"tensorflow.control_dependencies"
],
[
"numpy.shape",
"numpy.array",
"numpy.random.randn",
"numpy.random.shuffle"
],
[
"matplotlib.matplotlib_fname",
"matplotlib.pyplot.ion",
"matplotlib.pyplot.style.use"
],
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.imshow",
"numpy.asarray",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.hlines",
"matplotlib.pyplot.gcf",
"matplotlib.pyplot.subplot",
"numpy.diff",
"matplotlib.ticker.ScalarFormatter",
"matplotlib.pyplot.text",
"matplotlib.pyplot.vlines",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.hexbin",
"matplotlib.pyplot.savefig",
"numpy.delete",
"matplotlib.pyplot.fill_between",
"numpy.meshgrid",
"matplotlib.pyplot.show",
"numpy.array",
"matplotlib.pyplot.clabel",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.clf",
"numpy.vectorize",
"matplotlib.pyplot.contour",
"matplotlib.pyplot.bar",
"matplotlib.pyplot.setp",
"matplotlib.pyplot.pcolormesh"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
wmkai/quantization | [
"351d184527e9867e0394878cf91b64ffd5c6b109"
] | [
"micronet/compression/quantization/wbwtab/bn_fuse/bn_fuse.py"
] | [
"import copy\nimport sys\nsys.path.append(\"..\")\nsys.path.append(\"../../../..\")\nimport os\nimport argparse\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom models import nin_gc, nin\n\nimport quantize\n\n\n# ******************** 是否保存模型完整参数 ********************\n#torch.set_printoptions(precision=8, edgeitems=sys.maxsize, linewidth=200, sci_mode=False)\n\ndef bn_fuse(conv, bn):\n # 可以进行“针对特征(A)二值的BN融合”的BN层位置\n global bn_counter, bin_bn_fuse_num\n bn_counter = bn_counter + 1\n # ******************** bn参数 *********************\n mean = bn.running_mean\n std = torch.sqrt(bn.running_var + bn.eps)\n gamma = bn.weight\n beta = bn.bias\n # ******************* conv参数 ********************\n w = conv.weight\n w_fused = w.clone()\n if conv.bias is not None:\n b = conv.bias\n else:\n b = mean.new_zeros(mean.shape)\n b_fused = b.clone()\n # ******************* 针对特征(A)二值的bn融合 *******************\n if(bn_counter >= 1 and bn_counter <= bin_bn_fuse_num):\n mask_positive = gamma.data.gt(0)\n mask_negetive = gamma.data.lt(0)\n\n w_fused[mask_positive] = w[mask_positive]\n b_fused[mask_positive] = b[mask_positive] - mean[mask_positive] + \\\n beta[mask_positive] * (std[mask_positive] / gamma[mask_positive])\n\n w_fused[mask_negetive] = w[mask_negetive] * -1\n b_fused[mask_negetive] = mean[mask_negetive] - b[mask_negetive] - \\\n beta[mask_negetive] * (std[mask_negetive] / gamma[mask_negetive])\n # ******************* 普通bn融合 *******************\n else:\n w_fused = w * (gamma / std).reshape([conv.out_channels, 1, 1, 1])\n b_fused = beta + (b - mean) * (gamma / std)\n if(bn_counter >= 2 and bn_counter <= bin_bn_fuse_num):\n bn_fused_conv = quantize.QuantConv2d(conv.in_channels,\n conv.out_channels,\n conv.kernel_size,\n stride=conv.stride,\n padding=conv.padding,\n dilation=conv.dilation,\n groups=conv.groups,\n bias=True,\n padding_mode=conv.padding_mode,\n W=args.W,\n quant_inference=True)\n else:\n bn_fused_conv = nn.Conv2d(conv.in_channels,\n conv.out_channels,\n conv.kernel_size,\n stride=conv.stride,\n padding=conv.padding,\n dilation=conv.dilation,\n groups=conv.groups,\n bias=True,\n padding_mode=conv.padding_mode)\n bn_fused_conv.weight.data = w_fused\n bn_fused_conv.bias.data = b_fused\n return bn_fused_conv\n\n\ndef bn_fuse_module(module):\n for name, child in module.named_children():\n if isinstance(child, nn.Conv2d):\n conv_name_temp = name\n conv_child_temp = child\n elif isinstance(child, nn.BatchNorm2d):\n bn_fused_conv = bn_fuse(conv_child_temp, child)\n module._modules[conv_name_temp] = bn_fused_conv\n module._modules[name] = nn.Identity()\n else:\n bn_fuse_module(child)\n\n\ndef model_bn_fuse(model, inplace=False):\n if not inplace:\n model = copy.deepcopy(model)\n bn_fuse_module(model)\n return model\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--gpu_id', action='store', default='',\n help='gpu_id')\n parser.add_argument('--prune_quant', action='store_true',\n help='this is prune_quant model')\n parser.add_argument('--model_type', type=int, default=1,\n help='model type:0-nin,1-nin_gc')\n parser.add_argument('--W', type=int, default=2,\n help='Wb:2, Wt:3, Wfp:32')\n parser.add_argument('--A', type=int, default=2,\n help='Ab:2, Afp:32')\n\n args = parser.parse_args()\n print('==> Options:', args)\n\n if args.gpu_id:\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = args.gpu_id\n\n if args.prune_quant:\n print('******Prune Quant model******')\n if args.model_type == 0:\n checkpoint = torch.load('../models_save/nin.pth')\n quant_model_train = nin.Net(cfg=checkpoint['cfg'])\n else:\n checkpoint = torch.load('../models_save/nin_gc.pth')\n quant_model_train = nin_gc.Net(cfg=checkpoint['cfg'])\n else:\n if args.model_type == 0:\n checkpoint = torch.load('../models_save/nin.pth')\n quant_model_train = nin.Net()\n else:\n checkpoint = torch.load('../models_save/nin_gc.pth')\n quant_model_train = nin_gc.Net()\n quant_bn_fused_model_inference = copy.deepcopy(quant_model_train)\n quantize.prepare(quant_model_train, inplace=True, A=args.A, W=args.W)\n quantize.prepare(quant_bn_fused_model_inference, inplace=True,\n A=args.A, W=args.W, quant_inference=True)\n quant_model_train.load_state_dict(checkpoint['state_dict'])\n quant_bn_fused_model_inference.load_state_dict(checkpoint['state_dict'])\n\n # ********************** quant_model_train ************************\n torch.save(quant_model_train, 'models_save/quant_model_train.pth')\n torch.save(quant_model_train.state_dict(), 'models_save/quant_model_train_para.pth')\n model_array = np.array(quant_model_train)\n model_para_array = np.array(quant_model_train.state_dict())\n np.savetxt('models_save/quant_model_train.txt', [model_array], fmt='%s', delimiter=',')\n np.savetxt('models_save/quant_model_train_para.txt', [model_para_array], fmt='%s', delimiter=',')\n\n # ********************* quant_bn_fused_model_inference **********************\n bn_counter = 0\n bin_bn_fuse_num = 0\n # 统计可以进行“针对特征(A)二值的BN融合”的BN层位置\n for m in quant_bn_fused_model_inference.modules():\n if isinstance(m, quantize.ActivationQuantizer):\n bin_bn_fuse_num += 1\n model_bn_fuse(quant_bn_fused_model_inference, inplace=True) # bn融合\n print('***quant_model_train***\\n', quant_model_train)\n print('\\n***quant_bn_fused_model_inference***\\n', quant_bn_fused_model_inference)\n torch.save(quant_bn_fused_model_inference, 'models_save/quant_bn_fused_model_inference.pth')\n torch.save(quant_bn_fused_model_inference.state_dict(), 'models_save/quant_bn_fused_model_inference_para.pth')\n model_array = np.array(quant_bn_fused_model_inference)\n model_para_array = np.array(quant_bn_fused_model_inference.state_dict())\n np.savetxt('models_save/quant_bn_fused_model_inference.txt', [model_array], fmt='%s', delimiter=',')\n np.savetxt('models_save/quant_bn_fused_model_inference_para.txt', [model_para_array], fmt='%s', delimiter=',')\n print(\"************* bn_fuse 完成 **************\")\n print(\"************* bn_fused_model 已保存 **************\")\n"
] | [
[
"torch.load",
"torch.sqrt",
"torch.nn.Conv2d",
"torch.nn.Identity",
"numpy.savetxt",
"numpy.array",
"torch.save"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
havi121/poliastro-AU | [
"98889b36892622b43cb284f64e6ecf72e3f01c6f"
] | [
"src/poliastro/plotting/tisserand.py"
] | [
"\"\"\" Generates Tisserand plots \"\"\"\nfrom enum import Enum\n\nimport numpy as np\nfrom astropy import units as u\nfrom matplotlib import pyplot as plt\n\nfrom poliastro.plotting._base import BODY_COLORS\nfrom poliastro.twobody.mean_elements import get_mean_elements\nfrom poliastro.util import norm\n\n\nclass TisserandKind(Enum):\n \"\"\"All possible Tisserand kinds\"\"\"\n\n APSIS = \"apsis\"\n ENERGY = \"energy\"\n PERIOD = \"period\"\n\n\nclass TisserandPlotter:\n \"\"\"Generates Tisserand figures\"\"\"\n\n def __init__(self, kind=TisserandKind.APSIS, axes=None):\n \"\"\"Object initializer\n\n Parameters\n ----------\n kind: TisserandKind\n Nature for the Tisserand\n axes: ~matplotlib.pyplot.axes\n Axes for the figure\n\n \"\"\"\n\n # Asign Tisserand kind\n self.kind = kind\n\n # Check if axis available\n if not axes:\n _, self.ax = plt.subplots(1, 1)\n else:\n self.ax = axes\n\n # Force axes scale regarding Tisserand kind\n self.ax.set_xscale(\"log\")\n if self.kind == TisserandKind.APSIS:\n self.ax.set_yscale(\"log\")\n\n def _solve_tisserand(\n self, body, vinf_span, num_contours, alpha_lim=(0, np.pi), N=100\n ):\n \"\"\"Solves all possible Tisserand lines with a meshgrid workflow\n\n Parameters\n ----------\n body: ~poliastro.bodies.Body\n Body to be plotted Tisserand\n vinf_array: ~astropy.units.Quantity\n Desired Vinf for the flyby\n num_contours: int\n Number of contour lines for flyby speed\n N: int\n Number of points for flyby angle\n\n Note\n ----\n The algorithm for generating Tisserand plots is the one depicted in\n \"Preliminary Trajectory Design of a Mission to Enceladus\" by David\n Falcato Fialho Palma, section 3.6\n\n \"\"\"\n\n # Generate mean orbital elements Earth\n body_rv = get_mean_elements(body).to_vectors()\n R_body, V_body = norm(body_rv.r), norm(body_rv.v)\n\n # Generate non-dimensional velocity and alpha span\n vinf_array = np.linspace(vinf_span[0], vinf_span[-1], num_contours)\n alpha_array = np.linspace(alpha_lim[0], alpha_lim[-1], N)\n vinf_array /= V_body\n\n # Construct the mesh for any configuration\n V_INF, ALPHA = np.meshgrid(vinf_array, alpha_array)\n\n # Solving for non-dimensional a_sc and ecc_sc\n A_SC = 1 / np.abs(1 - V_INF ** 2 - 2 * V_INF * np.cos(ALPHA))\n ECC_SC = np.sqrt(1 - 1 / A_SC * ((3 - 1 / A_SC - V_INF ** 2) / (2)) ** 2)\n\n # Compute main Tisserand variables\n RR_P = A_SC * R_body * (1 - ECC_SC)\n RR_A = A_SC * R_body * (1 + ECC_SC)\n TT = 2 * np.pi * np.sqrt((A_SC * R_body) ** 3 / body.parent.k)\n EE = -body.parent.k / (2 * A_SC * R_body)\n\n # Build color lines to internal canvas\n return RR_P, RR_A, EE, TT\n\n def _build_lines(self, RR_P, RR_A, EE, TT, color):\n \"\"\"Collect lines and append them to internal data\n\n Parameters\n ----------\n data: list\n Array containing [RR_P, RR_A, EE, TT, color]\n\n Returns\n -------\n lines: list\n Plotting lines for the Tisserand\n \"\"\"\n\n # Plot desired kind lines\n if self.kind == TisserandKind.APSIS:\n # Generate apsis lines\n lines = self.ax.plot(RR_A.to(u.AU), RR_P.to(u.AU), color=color)\n elif self.kind == TisserandKind.ENERGY:\n # Generate energy lines\n lines = self.ax.plot(\n RR_P.to(u.AU), EE.to(u.au ** 2 / u.s ** 2), color=color\n )\n elif self.kind == TisserandKind.PERIOD:\n # Generate period lines\n lines = self.ax.plot(RR_P.to(u.AU), TT.to(u.year), color=color)\n\n return lines\n\n def plot_line(self, body, vinf, alpha_lim=(0, np.pi), color=None):\n \"\"\"Plots body Tisserand line within flyby angle\n\n Parameters\n ----------\n body: ~poliastro.bodies.Body\n Body to be plotted Tisserand\n vinf: ~astropy.units.Quantity\n Vinf velocity line\n alpha_lim: tuple\n Minimum and maximum flyby angles\n color: str\n String representing for the color lines\n\n Returns\n -------\n self.ax: ~matplotlib.axes.Axes\n Apsis tisserand is the default plotting option\n\n \"\"\"\n\n # HACK: to reuse Tisserand solver, we transform input Vinf into a tuple\n vinf_span = (vinf, vinf)\n\n # Solve Tisserand parameters\n RR_P, RR_A, EE, TT = self._solve_tisserand(\n body, vinf_span, num_contours=2, alpha_lim=alpha_lim\n )\n\n # Check if color defined\n if not color:\n color = BODY_COLORS[body.name]\n\n # Build canvas lines from Tisserand parameters\n self._build_lines(RR_P, RR_A, EE, TT, color)\n\n return self.ax\n\n def plot(self, body, vinf_span, num_contours=10, color=None):\n \"\"\"Plots body Tisserand for given amount of solutions within Vinf span\n\n Parameters\n ----------\n body: ~poliastro.bodies.Body\n Body to be plotted Tisserand\n vinf_span: tuple\n Minimum and maximum Vinf velocities\n num_contours: int\n Number of points to iterate over previously defined velocities\n color: str\n String representing for the color lines\n\n Returns\n -------\n self.ax: ~matplotlib.axes.Axes\n Apsis tisserand is the default plotting option\n\n \"\"\"\n\n # Solve Tisserand parameters\n RR_P, RR_A, EE, TT = self._solve_tisserand(body, vinf_span, num_contours)\n\n # Check if color defined\n if not color:\n color = BODY_COLORS[body.name]\n\n # Build canvas lines from Tisserand parameters\n self._build_lines(RR_P, RR_A, EE, TT, color)\n\n return self.ax\n"
] | [
[
"numpy.sqrt",
"numpy.linspace",
"matplotlib.pyplot.subplots",
"numpy.cos",
"numpy.meshgrid"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
romanstrazanec/ChaosEquations | [
"cff505832b3ef8db2e3dc05e299a30f52b8e6473"
] | [
"python/readme_prog_change_sub.py"
] | [
"import matplotlib.pyplot as plt\n\nn = 5\nT = [-1, -.5, 0., .5, 1]\nx = [1] * len(T)\ny = [1] * len(T)\n\nplt.subplot(122)\nfor i in range(1, n+1):\n for j in range(len(T)):\n x[j], y[j] = (x[j] + y[j]*T[j], x[j] - y[j]*T[j])\n for j in range(len(T)-1):\n plt.arrow(x[j], y[j], x[j+1]-x[j], y[j+1]-y[j], head_width=.35, head_length=.35, alpha=.3, fc='k')\n plt.plot(x, y, alpha=.7, label=f\"{i} i\")\n\nplt.subplot(121)\nfor t in T:\n x, y = (1, 1)\n xs, ys = [x], [y]\n for i in range(1, n+1):\n x, y = (x + y*t, x - y*t)\n xs.append(x)\n ys.append(y)\n plt.plot(xs, ys, '.-', alpha=.5, label=f\"T = {t}\")\n\n\nplt.legend()\nplt.subplot(122)\nplt.legend()\nplt.savefig(\"../images/plot4sub.png\")\nplt.show()\n"
] | [
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show",
"matplotlib.pyplot.arrow"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
amorehead/Equivariant-GNNs | [
"4e81136242a4c8905b0e5fc39be5f704a42cc5e1"
] | [
"project/utils/modules.py"
] | [
"from typing import Dict\n\nimport dgl\nimport dgl.function as fn # for graphs\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom dgl.nn.pytorch.glob import AvgPooling, MaxPooling\nfrom dgl.nn.pytorch.softmax import edge_softmax\nfrom einops import rearrange\nfrom packaging import version\nfrom torch import Tensor, einsum, broadcast_tensors, relu, sigmoid\nfrom torch.nn import GELU\nfrom torch.nn.functional import normalize\nfrom torch.nn.parameter import Parameter\n\nfrom project.utils.fibers import Fiber, fiber2head\nfrom project.utils.from_se3cnn.utils_steerable import _basis_transformation_Q_J, get_spherical_from_cartesian_torch, \\\n precompute_sh\nfrom project.utils.utils import fourier_encode_dist, batched_index_select\nfrom project.utils.utils_profiling import profile # load before other local modules\n\n\n# -------------------------------------------------------------------------------------------------------------------------------------\n# Following code derived from SE(3)-Transformer (https://github.com/FabianFuchsML/se3-transformer-public/):\n# -------------------------------------------------------------------------------------------------------------------------------------\n\n@profile\ndef get_basis(Y, max_degree):\n \"\"\"Precompute the SE(3)-equivariant weight basis.\n This is called by get_basis_and_r().\n Args:\n Y: spherical harmonic dict, returned by utils_steerable.precompute_sh()\n max_degree: non-negative int for degree of highest feature type\n Returns:\n dict of equivariant bases, keys are in form '<d_in><d_out>'\n \"\"\"\n device = Y[0].device\n # No need to backprop through the basis construction\n with torch.no_grad():\n basis = {}\n for d_in in range(max_degree + 1):\n for d_out in range(max_degree + 1):\n K_Js = []\n for J in range(abs(d_in - d_out), d_in + d_out + 1):\n # Get spherical harmonic projection matrices\n Q_J = _basis_transformation_Q_J(J, d_in, d_out)\n Q_J = Q_J.float().T.to(device)\n\n # Create kernel from spherical harmonics\n K_J = torch.matmul(Y[J], Q_J)\n K_Js.append(K_J)\n\n # Reshape so can take linear combinations with a dot product\n size = (-1, 1, 2 * d_out + 1, 1, 2 * d_in + 1, 2 * min(d_in, d_out) + 1)\n basis[f'{d_in},{d_out}'] = torch.stack(K_Js, -1).view(*size)\n return basis\n\n\ndef get_basis_and_r(G, max_degree):\n \"\"\"Return equivariant weight basis (basis) and internodal distances (r).\n Call this function *once* at the start of each forward pass of the model.\n It computes the equivariant weight basis, W_J^lk(x), and internodal\n distances, needed to compute varphi_J^lk(x), of eqn 8 of\n https://arxiv.org/pdf/2006.10503.pdf. The return values of this function\n can be shared as input across all SE(3)-Transformer layers in a model.\n Args:\n G: DGL graph instance of type dgl.DGLGraph()\n max_degree: non-negative int for degree of highest feature-type\n Returns:\n dict of equivariant bases, keys are in form '<d_in><d_out>'\n vector of relative distances, ordered according to edge ordering of G\n \"\"\"\n # Relative positional encodings (vector)\n r_ij = get_spherical_from_cartesian_torch(G.edata['d'])\n # Spherical harmonic basis\n Y = precompute_sh(r_ij, 2 * max_degree)\n # Equivariant basis (dict['d_in><d_out>'])\n basis = get_basis(Y, max_degree)\n # Relative distances (scalar)\n r = torch.sqrt(torch.sum(G.edata['d'] ** 2, -1, keepdim=True))\n return basis, r\n\n\n### SE(3) equivariant operations on graphs in DGL\n\nclass GConvSE3(nn.Module):\n \"\"\"A tensor field network layer as a DGL module.\n\n GConvSE3 stands for a Graph Convolution SE(3)-equivariant layer. It is the\n equivalent of a linear layer in an MLP, a conv layer in a CNN, or a graph\n conv layer in a GCN.\n At each node, the activations are split into different \"feature types\",\n indexed by the SE(3) representation type: non-negative integers 0, 1, 2, ..\n \"\"\"\n\n def __init__(self, f_in, f_out, self_interaction: bool = False, edge_dim: int = 0):\n \"\"\"SE(3)-equivariant Graph Conv Layer\n Args:\n f_in: list of tuples [(multiplicities, type),...]\n f_out: list of tuples [(multiplicities, type),...]\n self_interaction: include self-interaction in convolution\n edge_dim: number of dimensions for edge embedding\n \"\"\"\n super().__init__()\n self.f_in = f_in\n self.f_out = f_out\n self.edge_dim = edge_dim\n self.self_interaction = self_interaction\n\n # Neighbor -> center weights\n self.kernel_unary = nn.ModuleDict()\n for (mi, di) in self.f_in.structure:\n for (mo, do) in self.f_out.structure:\n self.kernel_unary[f'({di},{do})'] = PairwiseConv(di, mi, do, mo, edge_dim=edge_dim)\n\n # Center -> center weights\n self.kernel_self = nn.ParameterDict()\n if self_interaction:\n for m_in, d_in in self.f_in.structure:\n if d_in in self.f_out.degrees:\n m_out = self.f_out.structure_dict[d_in]\n W = nn.Parameter(torch.randn(1, m_out, m_in) / np.sqrt(m_in))\n self.kernel_self[f'{d_in}'] = W\n\n def __repr__(self):\n return f'GConvSE3(structure={self.f_out}, self_interaction={self.self_interaction})'\n\n def udf_u_mul_e(self, d_out):\n \"\"\"Compute the convolution for a single output feature type.\n This function is set up as a User Defined Function in DGL.\n Args:\n d_out: output feature type\n Returns:\n edge -> node function handle\n \"\"\"\n\n def fnc(edges):\n # Neighbor -> center messages\n msg = 0\n for m_in, d_in in self.f_in.structure:\n src = edges.src[f'{d_in}'].view(-1, m_in * (2 * d_in + 1), 1)\n edge = edges.data[f'({d_in},{d_out})']\n msg = msg + torch.matmul(edge, src)\n msg = msg.view(msg.shape[0], -1, 2 * d_out + 1)\n\n # Center -> center messages\n if self.self_interaction:\n if f'{d_out}' in self.kernel_self.keys():\n dst = edges.dst[f'{d_out}']\n W = self.kernel_self[f'{d_out}']\n msg = msg + torch.matmul(W, dst)\n\n return {'msg': msg.view(msg.shape[0], -1, 2 * d_out + 1)}\n\n return fnc\n\n @profile\n def forward(self, h, G=None, r=None, basis=None, **kwargs):\n \"\"\"Forward pass of the linear layer\n Args:\n G: minibatch of (homo)graphs\n h: dict of features\n r: inter-atomic distances\n basis: pre-computed Q * Y\n Returns:\n tensor with new features [B, n_points, n_features_out]\n \"\"\"\n with G.local_scope():\n # Add node features to local graph scope\n for k, v in h.items():\n G.ndata[k] = v\n\n # Add edge features\n if 'w' in G.edata.keys():\n w = G.edata['w']\n feat = torch.cat([w, r], -1)\n else:\n feat = torch.cat([r, ], -1)\n\n for (mi, di) in self.f_in.structure:\n for (mo, do) in self.f_out.structure:\n etype = f'({di},{do})'\n G.edata[etype] = self.kernel_unary[etype](feat, basis)\n\n # Perform message-passing for each output feature type\n for d in self.f_out.degrees:\n G.update_all(self.udf_u_mul_e(d), fn.mean('msg', f'out{d}'))\n\n return {f'{d}': G.ndata[f'out{d}'] for d in self.f_out.degrees}\n\n\nclass RadialFunc(nn.Module):\n \"\"\"NN parameterized radial profile function.\"\"\"\n\n def __init__(self, num_freq, in_dim, out_dim, edge_dim: int = 0):\n \"\"\"NN parameterized radial profile function.\n Args:\n num_freq: number of output frequencies\n in_dim: multiplicity of input (num input channels)\n out_dim: multiplicity of output (num output channels)\n edge_dim: number of dimensions for edge embedding\n \"\"\"\n super().__init__()\n self.num_freq = num_freq\n self.in_dim = in_dim\n self.mid_dim = 32\n self.out_dim = out_dim\n self.edge_dim = edge_dim\n\n self.net = nn.Sequential(nn.Linear(self.edge_dim + 1, self.mid_dim),\n BN(self.mid_dim),\n nn.ReLU(),\n nn.Linear(self.mid_dim, self.mid_dim),\n BN(self.mid_dim),\n nn.ReLU(),\n nn.Linear(self.mid_dim, self.num_freq * in_dim * out_dim))\n\n nn.init.kaiming_uniform_(self.net[0].weight)\n nn.init.kaiming_uniform_(self.net[3].weight)\n nn.init.kaiming_uniform_(self.net[6].weight)\n\n def __repr__(self):\n return f\"RadialFunc(edge_dim={self.edge_dim}, in_dim={self.in_dim}, out_dim={self.out_dim})\"\n\n def forward(self, x):\n y = self.net(x)\n return y.view(-1, self.out_dim, 1, self.in_dim, 1, self.num_freq)\n\n\nclass PairwiseConv(nn.Module):\n \"\"\"SE(3)-equivariant convolution between two single-type features\"\"\"\n\n def __init__(self, degree_in: int, nc_in: int, degree_out: int,\n nc_out: int, edge_dim: int = 0):\n \"\"\"SE(3)-equivariant convolution between a pair of feature types.\n This layer performs a convolution from nc_in features of type degree_in\n to nc_out features of type degree_out.\n Args:\n degree_in: degree of input fiber\n nc_in: number of channels on input\n degree_out: degree of out order\n nc_out: number of channels on output\n edge_dim: number of dimensions for edge embedding\n \"\"\"\n super().__init__()\n # Log settings\n self.degree_in = degree_in\n self.degree_out = degree_out\n self.nc_in = nc_in\n self.nc_out = nc_out\n\n # Functions of the degree\n self.num_freq = 2 * min(degree_in, degree_out) + 1\n self.d_out = 2 * degree_out + 1\n self.edge_dim = edge_dim\n\n # Radial profile function\n self.rp = RadialFunc(self.num_freq, nc_in, nc_out, self.edge_dim)\n\n @profile\n def forward(self, feat, basis):\n # Get radial weights\n R = self.rp(feat)\n kernel = torch.sum(R * basis[f'{self.degree_in},{self.degree_out}'], -1)\n return kernel.view(kernel.shape[0], self.d_out * self.nc_out, -1)\n\n\nclass G1x1SE3(nn.Module):\n \"\"\"Graph Linear SE(3)-equivariant layer, equivalent to a 1x1 convolution.\n\n This is equivalent to a self-interaction layer in TensorField Networks.\n \"\"\"\n\n def __init__(self, f_in, f_out, learnable=True):\n \"\"\"SE(3)-equivariant 1x1 convolution.\n Args:\n f_in: input Fiber() of feature multiplicities and types\n f_out: output Fiber() of feature multiplicities and types\n \"\"\"\n super().__init__()\n self.f_in = f_in\n self.f_out = f_out\n\n # Linear mappings: 1 per output feature type\n self.transform = nn.ParameterDict()\n for m_out, d_out in self.f_out.structure:\n m_in = self.f_in.structure_dict[d_out]\n self.transform[str(d_out)] = nn.Parameter(torch.randn(m_out, m_in) / np.sqrt(m_in), requires_grad=learnable)\n\n def __repr__(self):\n return f\"G1x1SE3(structure={self.f_out})\"\n\n def forward(self, features, **kwargs):\n output = {}\n for k, v in features.items():\n if str(k) in self.transform.keys():\n output[k] = torch.matmul(self.transform[str(k)], v)\n return output\n\n\nclass GNormSE3(nn.Module):\n \"\"\"Graph Norm-based SE(3)-equivariant nonlinearity.\n\n Nonlinearities are important in SE(3) equivariant GCNs. They are also quite\n expensive to compute, so it is convenient for them to share resources with\n other layers, such as normalization. The general workflow is as follows:\n > for feature type in features:\n > norm, phase <- feature\n > output = fnc(norm) * phase\n\n where fnc: {R+}^m -> R^m is a learnable map from m norms to m scalars.\n \"\"\"\n\n def __init__(self, fiber, nonlin=nn.ReLU(inplace=True), num_layers: int = 0):\n \"\"\"Initializer.\n Args:\n fiber: Fiber() of feature multiplicities and types\n nonlin: nonlinearity to use everywhere\n num_layers: non-negative number of linear layers in fnc\n \"\"\"\n super().__init__()\n self.fiber = fiber\n self.nonlin = nonlin\n self.num_layers = num_layers\n\n # Regularization for computing phase: gradients explode otherwise\n self.eps = 1e-12\n\n # Norm mappings: 1 per feature type\n self.transform = nn.ModuleDict()\n for m, d in self.fiber.structure:\n self.transform[str(d)] = self._build_net(int(m))\n\n def __repr__(self):\n return f\"GNormSE3(num_layers={self.num_layers}, nonlin={self.nonlin})\"\n\n def _build_net(self, m: int):\n net = []\n for i in range(self.num_layers):\n net.append(BN(int(m)))\n net.append(self.nonlin)\n # TODO: implement cleaner init\n net.append(nn.Linear(m, m, bias=(i == self.num_layers - 1)))\n nn.init.kaiming_uniform_(net[-1].weight)\n if self.num_layers == 0:\n net.append(BN(int(m)))\n net.append(self.nonlin)\n return nn.Sequential(*net)\n\n @profile\n def forward(self, features, **kwargs):\n output = {}\n for k, v in features.items():\n # Compute the norms and normalized features\n # v shape: [...,m , 2*k+1]\n norm = v.norm(2, -1, keepdim=True).clamp_min(self.eps).expand_as(v)\n phase = v / norm\n\n # Transform on norms\n transformed = self.transform[str(k)](norm[..., 0]).unsqueeze(-1)\n\n # Nonlinearity on norm\n output[k] = (transformed * phase).view(*v.shape)\n\n return output\n\n\nclass BN(nn.Module):\n \"\"\"SE(3)-equvariant batch/layer normalization\"\"\"\n\n def __init__(self, m):\n \"\"\"SE(3)-equvariant batch/layer normalization\n Args:\n m: int for number of output channels\n \"\"\"\n super().__init__()\n self.bn = nn.LayerNorm(m)\n\n def forward(self, x):\n return self.bn(x)\n\n\nclass GConvSE3Partial(nn.Module):\n \"\"\"Graph SE(3)-equivariant node -> edge layer\"\"\"\n\n def __init__(self, f_in, f_out, edge_dim: int = 0):\n \"\"\"SE(3)-equivariant partial convolution.\n A partial convolution computes the inner product between a kernel and\n each input channel, without summing over the result from each input\n channel. This unfolded structure makes it amenable to be used for\n computing the value-embeddings of the attention mechanism.\n Args:\n f_in: list of tuples [(multiplicities, type),...]\n f_out: list of tuples [(multiplicities, type),...]\n \"\"\"\n super().__init__()\n self.f_in = f_in\n self.f_out = f_out\n self.edge_dim = edge_dim\n\n # Node -> edge weights\n self.kernel_unary = nn.ModuleDict()\n for (mi, di) in self.f_in.structure:\n for (mo, do) in self.f_out.structure:\n self.kernel_unary[f'({di},{do})'] = PairwiseConv(di, mi, do, mo, edge_dim=edge_dim)\n\n def __repr__(self):\n return f'GConvSE3Partial(structure={self.f_out})'\n\n def udf_u_mul_e(self, d_out):\n \"\"\"Compute the partial convolution for a single output feature type.\n This function is set up as a User Defined Function in DGL.\n Args:\n d_out: output feature type\n Returns:\n node -> edge function handle\n \"\"\"\n\n def fnc(edges):\n # Neighbor -> center messages\n msg = 0\n for m_in, d_in in self.f_in.structure:\n src = edges.src[f'{d_in}'].view(-1, m_in * (2 * d_in + 1), 1)\n edge = edges.data[f'({d_in},{d_out})']\n msg = msg + torch.matmul(edge, src)\n msg = msg.view(msg.shape[0], -1, 2 * d_out + 1)\n\n return {f'out{d_out}': msg.view(msg.shape[0], -1, 2 * d_out + 1)}\n\n return fnc\n\n @profile\n def forward(self, h, G=None, r=None, basis=None, **kwargs):\n \"\"\"Forward pass of the linear layer\n Args:\n h: dict of node-features\n G: minibatch of (homo)graphs\n r: inter-atomic distances\n basis: pre-computed Q * Y\n Returns:\n tensor with new features [B, n_points, n_features_out]\n \"\"\"\n with G.local_scope():\n # Add node features to local graph scope\n for k, v in h.items():\n G.ndata[k] = v\n\n # Add edge features\n if 'w' in G.edata.keys():\n w = G.edata['w'] # shape: [#edges_in_batch, #bond_types]\n feat = torch.cat([w, r], -1)\n else:\n feat = torch.cat([r, ], -1)\n for (mi, di) in self.f_in.structure:\n for (mo, do) in self.f_out.structure:\n etype = f'({di},{do})'\n G.edata[etype] = self.kernel_unary[etype](feat, basis)\n\n # Perform message-passing for each output feature type\n for d in self.f_out.degrees:\n G.apply_edges(self.udf_u_mul_e(d))\n\n return {f'{d}': G.edata[f'out{d}'] for d in self.f_out.degrees}\n\n\nclass GMABSE3(nn.Module):\n \"\"\"An SE(3)-equivariant multi-headed self-attention module for DGL graphs.\"\"\"\n\n def __init__(self, f_value: Fiber, f_key: Fiber, n_heads: int):\n \"\"\"SE(3)-equivariant MAB (multi-headed attention block) layer.\n Args:\n f_value: Fiber() object for value-embeddings\n f_key: Fiber() object for key-embeddings\n n_heads: number of heads\n \"\"\"\n super().__init__()\n self.f_value = f_value\n self.f_key = f_key\n self.n_heads = n_heads\n self.new_dgl = version.parse(dgl.__version__) > version.parse('0.4.4')\n\n def __repr__(self):\n return f'GMABSE3(n_heads={self.n_heads}, structure={self.f_value})'\n\n def udf_u_mul_e(self, d_out):\n \"\"\"Compute the weighted sum for a single output feature type.\n This function is set up as a User Defined Function in DGL.\n Args:\n d_out: output feature type\n Returns:\n edge -> node function handle\n \"\"\"\n\n def fnc(edges):\n # Neighbor -> center messages\n attn = edges.data['a']\n value = edges.data[f'v{d_out}']\n\n # Apply attention weights\n msg = attn.unsqueeze(-1).unsqueeze(-1) * value\n\n return {'m': msg}\n\n return fnc\n\n @profile\n def forward(self, v, k: Dict = None, q: Dict = None, G=None, **kwargs):\n \"\"\"Forward pass of the linear layer\n Args:\n G: minibatch of (homo)graphs\n v: dict of value edge-features\n k: dict of key edge-features\n q: dict of query node-features\n Returns:\n tensor with new features [B, n_points, n_features_out]\n \"\"\"\n with G.local_scope():\n # Add node features to local graph scope\n ## We use the stacked tensor representation for attention\n for m, d in self.f_value.structure:\n G.edata[f'v{d}'] = v[f'{d}'].view(-1, self.n_heads, m // self.n_heads, 2 * d + 1)\n G.edata['k'] = fiber2head(k, self.n_heads, self.f_key, squeeze=True)\n G.ndata['q'] = fiber2head(q, self.n_heads, self.f_key, squeeze=True)\n\n # Compute attention weights\n ## Inner product between (key) neighborhood and (query) center\n G.apply_edges(fn.e_dot_v('k', 'q', 'e'))\n\n ## Apply softmax\n e = G.edata.pop('e')\n if self.new_dgl:\n # in dgl 5.3, e has an extra dimension compared to dgl 4.3\n # the following, we get rid of this be reshaping\n n_edges = G.edata['k'].shape[0]\n e = e.view([n_edges, self.n_heads])\n e = e / np.sqrt(self.f_key.n_features)\n G.edata['a'] = edge_softmax(G, e)\n\n # Perform attention-weighted message-passing\n for d in self.f_value.degrees:\n G.update_all(self.udf_u_mul_e(d), fn.sum('m', f'out{d}'))\n\n output = {}\n for m, d in self.f_value.structure:\n output[f'{d}'] = G.ndata[f'out{d}'].view(-1, m, 2 * d + 1)\n\n return output\n\n\nclass GSE3Res(nn.Module):\n \"\"\"Graph attention block with SE(3)-equivariance and skip connection\"\"\"\n\n def __init__(self, f_in: Fiber, f_out: Fiber, edge_dim: int = 0, div: float = 4,\n n_heads: int = 1, learnable_skip=True):\n super().__init__()\n self.f_in = f_in\n self.f_out = f_out\n self.div = div\n self.n_heads = n_heads\n\n # f_mid_out has same structure as 'f_out' but #channels divided by 'div'\n # this will be used for the values\n f_mid_out = {k: int(v // div) for k, v in self.f_out.structure_dict.items()}\n self.f_mid_out = Fiber(dictionary=f_mid_out)\n\n # f_mid_in has same structure as f_mid_out, but only degrees which are in f_in\n # this will be used for keys and queries\n # (queries are merely projected, hence degrees have to match input)\n f_mid_in = {d: m for d, m in f_mid_out.items() if d in self.f_in.degrees}\n self.f_mid_in = Fiber(dictionary=f_mid_in)\n\n self.edge_dim = edge_dim\n\n self.GMAB = nn.ModuleDict()\n\n # Projections\n self.GMAB['v'] = GConvSE3Partial(f_in, self.f_mid_out, edge_dim=edge_dim)\n self.GMAB['k'] = GConvSE3Partial(f_in, self.f_mid_in, edge_dim=edge_dim)\n self.GMAB['q'] = G1x1SE3(f_in, self.f_mid_in)\n\n # Attention\n self.GMAB['attn'] = GMABSE3(self.f_mid_out, self.f_mid_in, n_heads=n_heads)\n\n # Skip connections\n self.project = G1x1SE3(self.f_mid_out, f_out, learnable=learnable_skip)\n self.add = GSum(f_out, f_in)\n # the following checks whether the skip connection would change\n # the output fibre structure; the reason can be that the input has\n # more channels than the output (for at least one degree); this would\n # then cause a (hard to debug) error in the next layer\n assert self.add.f_out.structure_dict == f_out.structure_dict, \\\n 'skip connection would change output structure'\n\n @profile\n def forward(self, features, G, **kwargs):\n # Embeddings\n v = self.GMAB['v'](features, G=G, **kwargs)\n k = self.GMAB['k'](features, G=G, **kwargs)\n q = self.GMAB['q'](features, G=G)\n\n # Attention\n z = self.GMAB['attn'](v, k=k, q=q, G=G)\n\n # Skip + residual\n z = self.project(z)\n z = self.add(z, features)\n return z\n\n\n### Helper and wrapper functions\n\nclass GSum(nn.Module):\n \"\"\"SE(3)-equivariant graph residual sum function.\"\"\"\n\n def __init__(self, f_x: Fiber, f_y: Fiber):\n \"\"\"SE(3)-equivariant graph residual sum function.\n Args:\n f_x: Fiber() object for fiber of summands\n f_y: Fiber() object for fiber of summands\n \"\"\"\n super().__init__()\n self.f_x = f_x\n self.f_y = f_y\n self.f_out = Fiber.combine_max(f_x, f_y)\n\n def __repr__(self):\n return f\"GSum(structure={self.f_out})\"\n\n def forward(self, x, y):\n out = {}\n for k in self.f_out.degrees:\n k = str(k)\n if (k in x) and (k in y):\n if x[k].shape[1] > y[k].shape[1]:\n diff = x[k].shape[1] - y[k].shape[1]\n zeros = torch.zeros(x[k].shape[0], diff, x[k].shape[2]).to(y[k].device)\n y[k] = torch.cat([y[k], zeros], 1)\n elif x[k].shape[1] < y[k].shape[1]:\n diff = y[k].shape[1] - x[k].shape[1]\n zeros = torch.zeros(x[k].shape[0], diff, x[k].shape[2]).to(y[k].device)\n x[k] = torch.cat([x[k], zeros], 1)\n\n out[k] = x[k] + y[k]\n elif k in x:\n out[k] = x[k]\n elif k in y:\n out[k] = y[k]\n return out\n\n\nclass GAvgPooling(nn.Module):\n \"\"\"Graph Average Pooling module.\"\"\"\n\n def __init__(self, type='0'):\n super().__init__()\n self.pool = AvgPooling()\n self.type = type\n\n @profile\n def forward(self, features, G, **kwargs):\n if self.type == '0':\n h = features['0'][..., -1]\n pooled = self.pool(G, h)\n elif self.type == '1':\n pooled = []\n for i in range(3):\n h_i = features['1'][..., i]\n pooled.append(self.pool(G, h_i).unsqueeze(-1))\n pooled = torch.cat(pooled, axis=-1)\n pooled = {'1': pooled}\n else:\n print('GAvgPooling for type > 0 not implemented')\n exit()\n return pooled\n\n\nclass GMaxPooling(nn.Module):\n \"\"\"Graph Max Pooling module.\"\"\"\n\n def __init__(self):\n super().__init__()\n self.pool = MaxPooling()\n\n @profile\n def forward(self, features, G, **kwargs):\n h = features['0'][..., -1]\n return self.pool(G, h)\n\n\n# -------------------------------------------------------------------------------------------------------------------------------------\n# Following code derived from egnn-pytorch (https://github.com/lucidrains/egnn-pytorch/blob/main/egnn_pytorch/egnn_pytorch.py):\n# -------------------------------------------------------------------------------------------------------------------------------------\n\nclass EnInvGraphConv(nn.Module):\n \"\"\"A graph neural network layer as a DGL module.\n\n EnInvGraphConv stands for a Graph Convolution E(n)-invariant layer. It is the\n equivalent of a linear layer in an MLP, a conv layer in a CNN, or a graph\n conv layer in a GCN.\n \"\"\"\n\n def __init__(\n self,\n node_feat,\n edge_feat=0,\n coord_feat=16,\n fourier_feat=0,\n norm_rel_coords=False,\n norm_coord_weights=False,\n num_nearest_neighbors=0,\n dropout=0.0,\n init_eps=1e-3\n ):\n \"\"\"E(n)-invariant Graph Conv Layer\n\n Parameters\n ----------\n node_feat : int\n Node feature size.\n edge_feat : int\n Edge feature size.\n coord_feat : int\n Coordinates feature size.\n fourier_feat : int\n Fourier feature size.\n norm_rel_coords : boolean\n Fourier feature size.\n norm_coord_weights : boolean\n Fourier feature size.\n num_nearest_neighbors : int\n Fourier feature size.\n dropout : float\n Fourier feature size.\n init_eps : float\n Fourier feature size.\n \"\"\"\n super().__init__()\n self.fourier_feat = fourier_feat\n\n edge_input_dim = (fourier_feat * 2) + (node_feat * 2) + edge_feat + 1\n dropout = nn.Dropout(dropout) if dropout > 0 else nn.Identity()\n\n self.edge_mlp = nn.Sequential(\n nn.Linear(edge_input_dim, edge_input_dim * 2),\n dropout,\n GELU(),\n nn.Linear(edge_input_dim * 2, coord_feat),\n GELU()\n )\n\n self.node_mlp = nn.Sequential(\n nn.Linear(node_feat + coord_feat, node_feat * 2),\n dropout,\n GELU(),\n nn.Linear(node_feat * 2, node_feat),\n )\n\n self.norm_coord_weights = norm_coord_weights\n self.norm_rel_coords = norm_rel_coords\n\n if norm_rel_coords:\n self.rel_coords_scale = nn.Parameter(torch.ones(1))\n\n self.coords_mlp = nn.Sequential(\n nn.Linear(coord_feat, coord_feat * 4),\n dropout,\n GELU(),\n nn.Linear(coord_feat * 4, 1)\n )\n\n self.num_nearest_neighbors = num_nearest_neighbors\n\n self.init_eps = init_eps\n self.apply(self.init_)\n\n def init_(self, module):\n if type(module) in {nn.Linear}:\n # Seems to be needed to keep the network from exploding to NaN with greater depths\n nn.init.normal_(module.weight, std=self.init_eps)\n\n def forward(self, h, x, e=None, mask=None):\n \"\"\"Forward pass of the linear layer\n\n Parameters\n ----------\n h : Tensor\n The input node embedding.\n x : Tensor\n The input coordinates embedding.\n e : Tensor\n The input edge embedding.\n mask : Tensor\n The coordinate mask to apply.\n \"\"\"\n b, n, d, fourier_features, num_nearest = *h.shape, self.fourier_feat, self.num_nearest_neighbors\n use_nearest = num_nearest > 0\n nbhd_indices = None\n\n rel_coords = rearrange(x, 'b i d -> b i () d') - rearrange(x, 'b j d -> b () j d')\n rel_dist = (rel_coords ** 2).sum(dim=-1, keepdim=True)\n\n if use_nearest:\n nbhd_indices = rel_dist[..., 0].topk(num_nearest, dim=-1, largest=False).indices\n rel_coords = batched_index_select(rel_coords, nbhd_indices, dim=2)\n rel_dist = batched_index_select(rel_dist, nbhd_indices, dim=2)\n\n if fourier_features > 0:\n rel_dist = fourier_encode_dist(rel_dist, num_encodings=fourier_features)\n rel_dist = rearrange(rel_dist, 'b i j () d -> b i j d')\n\n if use_nearest:\n feats_j = batched_index_select(h, nbhd_indices, dim=1)\n else:\n feats_j = rearrange(h, 'b j d -> b () j d')\n\n feats_i = rearrange(h, 'b i d -> b i () d')\n feats_i, feats_j = broadcast_tensors(feats_i, feats_j)\n\n edge_input = torch.cat((feats_i, feats_j, rel_dist), dim=-1)\n\n if e is not None:\n edge_input = torch.cat((edge_input, e), dim=-1)\n\n m_ij = self.edge_mlp(edge_input)\n\n m_i = m_ij.sum(dim=-2)\n\n node_mlp_input = torch.cat((h, m_i), dim=-1)\n node_out = self.node_mlp(node_mlp_input) + h\n\n # Free GPU memory\n rel_coords.detach()\n rel_dist.detach()\n feats_i.detach()\n feats_j.detach()\n edge_input.detach()\n m_i.detach()\n m_ij.detach()\n node_mlp_input.detach()\n if nbhd_indices is not None:\n nbhd_indices.detach()\n if mask is not None:\n mask.detach()\n\n return node_out\n\n def __repr__(self):\n return f'EnInvGraphConv(structure=h{self.node_feat}-x{self.coord_feat}-e{self.edge_feat})'\n\n\nclass EnGraphConv(nn.Module):\n \"\"\"A graph neural network layer.\n\n EnGraphConv stands for a Graph Convolution E(n)-equivariant layer. It is the\n equivalent of a linear layer in an MLP, a conv layer in a CNN, or a graph\n conv layer in a GCN.\n \"\"\"\n\n def __init__(\n self,\n node_feat,\n edge_feat=0,\n coord_feat=16,\n fourier_feat=0,\n norm_rel_coords=False,\n norm_coord_weights=False,\n num_nearest_neighbors=0,\n dropout=0.0,\n init_eps=1e-3\n ):\n \"\"\"E(n)-equivariant Graph Conv Layer\n\n Parameters\n ----------\n node_feat : int\n Node feature size.\n edge_feat : int\n Edge feature size.\n coord_feat : int\n Coordinates feature size.\n fourier_feat : int\n Fourier feature size.\n norm_rel_coords : boolean\n Fourier feature size.\n norm_coord_weights : boolean\n Fourier feature size.\n num_nearest_neighbors : int\n Fourier feature size.\n dropout : float\n Fourier feature size.\n init_eps : float\n Fourier feature size.\n \"\"\"\n super().__init__()\n self.fourier_feat = fourier_feat\n\n edge_input_dim = (fourier_feat * 2) + (node_feat * 2) + edge_feat + 1\n dropout = nn.Dropout(dropout) if dropout > 0 else nn.Identity()\n\n self.edge_mlp = nn.Sequential(\n nn.Linear(edge_input_dim, edge_input_dim * 2),\n dropout,\n GELU(),\n nn.Linear(edge_input_dim * 2, coord_feat),\n GELU()\n )\n\n self.node_mlp = nn.Sequential(\n nn.Linear(node_feat + coord_feat, node_feat * 2),\n dropout,\n GELU(),\n nn.Linear(node_feat * 2, node_feat),\n )\n\n self.norm_coord_weights = norm_coord_weights\n self.norm_rel_coords = norm_rel_coords\n\n if norm_rel_coords:\n self.rel_coords_scale = nn.Parameter(torch.ones(1))\n\n self.coords_mlp = nn.Sequential(\n nn.Linear(coord_feat, coord_feat * 4),\n dropout,\n GELU(),\n nn.Linear(coord_feat * 4, 1)\n )\n\n self.num_nearest_neighbors = num_nearest_neighbors\n\n self.init_eps = init_eps\n self.apply(self.init_)\n\n def init_(self, module):\n if type(module) in {nn.Linear}:\n # Seems to be needed to keep the network from exploding to NaN with greater depths\n nn.init.normal_(module.weight, std=self.init_eps)\n\n def forward(self, h, x, e=None, mask=None):\n \"\"\"Forward pass of the linear layer\n\n Parameters\n ----------\n h : Tensor\n The input node embedding.\n x : Tensor\n The input coordinates embedding.\n e : Tensor\n The input edge embedding.\n mask : Tensor\n The coordinate mask to apply.\n \"\"\"\n nbhd_indices = None\n b, n, d, fourier_features, num_nearest = *h.shape, self.fourier_feat, self.num_nearest_neighbors\n use_nearest = num_nearest > 0\n\n rel_coords = rearrange(x, 'b i d -> b i () d') - rearrange(x, 'b j d -> b () j d')\n rel_dist = (rel_coords ** 2).sum(dim=-1, keepdim=True)\n\n if use_nearest:\n nbhd_indices = rel_dist[..., 0].topk(num_nearest, dim=-1, largest=False).indices\n rel_coords = batched_index_select(rel_coords, nbhd_indices, dim=2)\n rel_dist = batched_index_select(rel_dist, nbhd_indices, dim=2)\n\n if fourier_features > 0:\n rel_dist = fourier_encode_dist(rel_dist, num_encodings=fourier_features)\n rel_dist = rearrange(rel_dist, 'b i j () d -> b i j d')\n\n if use_nearest:\n feats_j = batched_index_select(h, nbhd_indices, dim=1)\n else:\n feats_j = rearrange(h, 'b j d -> b () j d')\n\n feats_i = rearrange(h, 'b i d -> b i () d')\n feats_i, feats_j = broadcast_tensors(feats_i, feats_j)\n\n edge_input = torch.cat((feats_i, feats_j, rel_dist), dim=-1)\n\n if e is not None:\n edge_input = torch.cat((edge_input, e), dim=-1)\n\n m_ij = self.edge_mlp(edge_input)\n\n coord_weights = self.coords_mlp(m_ij)\n coord_weights = rearrange(coord_weights, 'b i j () -> b i j')\n\n if self.norm_coord_weights:\n coord_weights = coord_weights.tanh()\n\n if self.norm_rel_coords:\n rel_coords = normalize(rel_coords, dim=-1) * self.rel_coords_scale\n\n if mask is not None:\n mask_i = rearrange(mask, 'b i -> b i ()')\n\n if use_nearest:\n mask_j = batched_index_select(mask, nbhd_indices, dim=1)\n else:\n mask_j = rearrange(mask, 'b j -> b () j')\n\n mask = mask_i * mask_j\n coord_weights.masked_fill_(~mask, 0.)\n\n # Free GPU memory\n mask_i.detach()\n mask_j.detach()\n\n coords_out = einsum('b i j, b i j c -> b i c', coord_weights, rel_coords) + x\n\n m_i = m_ij.sum(dim=-2)\n\n node_mlp_input = torch.cat((h, m_i), dim=-1)\n node_out = self.node_mlp(node_mlp_input) + h\n\n # Free GPU memory\n rel_coords.detach()\n rel_dist.detach()\n feats_i.detach()\n feats_j.detach()\n edge_input.detach()\n m_i.detach()\n m_ij.detach()\n coord_weights.detach()\n node_mlp_input.detach()\n if nbhd_indices is not None:\n nbhd_indices.detach()\n if mask is not None:\n mask.detach()\n\n return node_out, coords_out\n\n def __repr__(self):\n return f'GConvEn(structure=h{self.node_feat}-x{self.coord_feat}-e{self.edge_feat})'\n\n\n# -------------------------------------------------------------------------------------------------------------------------------------\n# Following code derived from DMLC (https://github.com/dmlc/dgl/blob/master/examples/pytorch/dagnn/main.py):\n# -------------------------------------------------------------------------------------------------------------------------------------\n\nclass DAGNNConv(nn.Module):\n def __init__(self,\n in_dim,\n k):\n super(DAGNNConv, self).__init__()\n\n self.s = Parameter(torch.FloatTensor(in_dim, 1))\n self.k = k\n\n self.reset_parameters()\n\n def reset_parameters(self):\n gain = nn.init.calculate_gain('sigmoid')\n nn.init.xavier_uniform_(self.s, gain=gain)\n\n def forward(self, graph, feats):\n with graph.local_scope():\n results = [feats]\n\n degs = graph.in_degrees().float()\n norm = torch.pow(degs, -0.5)\n norm = norm.to(feats.device).unsqueeze(1)\n\n for _ in range(self.k):\n feats = feats * norm\n graph.ndata['h'] = feats\n graph.update_all(fn.copy_u('h', 'm'),\n fn.sum('m', 'h'))\n feats = graph.ndata['h']\n feats = feats * norm\n results.append(feats)\n\n H = torch.stack(results, dim=1)\n S = sigmoid(torch.matmul(H, self.s))\n S = S.permute(0, 2, 1)\n H = torch.matmul(S, H).squeeze()\n\n return H\n\n\nclass MLPLayer(nn.Module):\n def __init__(self,\n in_dim,\n out_dim,\n bias=True,\n activation=None,\n dropout=0):\n super(MLPLayer, self).__init__()\n\n self.linear = nn.Linear(in_dim, out_dim, bias=bias)\n self.activation = activation\n self.dropout = nn.Dropout(dropout)\n self.reset_parameters()\n\n def reset_parameters(self):\n gain = 1.\n if self.activation is relu:\n gain = nn.init.calculate_gain('relu')\n nn.init.xavier_uniform_(self.linear.weight, gain=gain)\n if self.linear.bias is not None:\n nn.init.zeros_(self.linear.bias)\n\n def forward(self, feats):\n feats = self.dropout(feats)\n feats = self.linear(feats)\n if self.activation:\n feats = self.activation(feats)\n\n return feats\n\n\nclass DAGNN(nn.Module):\n def __init__(self,\n k,\n in_dim,\n hid_dim,\n out_dim,\n bias=True,\n activation=relu,\n dropout=0, ):\n super(DAGNN, self).__init__()\n self.mlp = nn.ModuleList()\n self.mlp.append(MLPLayer(in_dim=in_dim, out_dim=hid_dim, bias=bias,\n activation=activation, dropout=dropout))\n self.mlp.append(MLPLayer(in_dim=hid_dim, out_dim=out_dim, bias=bias,\n activation=None, dropout=dropout))\n self.dagnn = DAGNNConv(in_dim=out_dim, k=k)\n\n def forward(self, graph, feats):\n for layer in self.mlp:\n feats = layer(feats)\n feats = self.dagnn(graph, feats)\n return feats\n\n\n# -------------------------------------------------------------------------------------------------------------------------------------\n# Following code curated for DeepInteract (https://github.com/jianlin-cheng/DeepInteract):\n# -------------------------------------------------------------------------------------------------------------------------------------\nclass SAGEConv(nn.Module):\n \"\"\"GraphSAGE convolution module used by the GraphSAGE model.\n This variant of the SAGEConv layer is able to infer edges via a soft estimation on messages.\n\n Parameters\n ----------\n in_feat : int\n Input feature size.\n out_feat : int\n Output feature size.\n \"\"\"\n\n def __init__(self, in_feat, out_feat):\n super(SAGEConv, self).__init__()\n # A linear submodule for projecting the input and neighbor feature to the output.\n self.linear = nn.Linear(in_feat * 2, out_feat)\n\n def forward(self, g, h):\n \"\"\"Forward computation\n\n Parameters\n ----------\n g : Graph\n The input graph.\n h : Tensor\n The input node feature.\n \"\"\"\n with g.local_scope():\n g.ndata['h'] = h\n # update_all is a message passing API.\n g.update_all(message_func=fn.copy_u('h', 'm'), reduce_func=fn.mean('m', 'h_N'))\n h_N = g.ndata['h_N']\n h_total = torch.cat([h, h_N], dim=1)\n return self.linear(h_total)\n"
] | [
[
"numpy.sqrt",
"torch.cat",
"torch.zeros",
"torch.nn.ParameterDict",
"torch.sum",
"torch.no_grad",
"torch.FloatTensor",
"torch.pow",
"torch.nn.init.calculate_gain",
"torch.nn.Dropout",
"torch.ones",
"torch.einsum",
"torch.nn.ModuleDict",
"torch.randn",
"torch.nn.Sequential",
"torch.nn.ModuleList",
"torch.nn.Linear",
"torch.nn.init.normal_",
"torch.nn.init.zeros_",
"torch.stack",
"torch.nn.functional.normalize",
"torch.nn.GELU",
"torch.nn.LayerNorm",
"torch.nn.init.kaiming_uniform_",
"torch.matmul",
"torch.nn.Identity",
"torch.nn.init.xavier_uniform_",
"torch.broadcast_tensors",
"torch.nn.ReLU"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
SauravMaheshkar/rtdl | [
"c3f8051210d1cd7fdffc5a63221e3c4e84415ed8"
] | [
"bin/node.py"
] | [
"# %%\nimport gc\nimport itertools\nimport math\nimport typing as ty\nfrom copy import deepcopy\nfrom pathlib import Path\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim.swa_utils as swa_utils\nimport zero\nfrom torch import Tensor\nimport wandb\n\nimport lib\nimport lib.node as node\n\n\n# %%\nclass NODE(nn.Module):\n def __init__(\n self,\n *,\n d_in: int,\n num_layers: int,\n layer_dim: int,\n depth: int,\n tree_dim: int,\n choice_function: str,\n bin_function: str,\n d_out: int,\n categories: ty.Optional[ty.List[int]],\n d_embedding: int,\n ) -> None:\n super().__init__()\n\n if categories is not None:\n d_in += len(categories) * d_embedding\n category_offsets = torch.tensor([0] + categories[:-1]).cumsum(0)\n self.register_buffer('category_offsets', category_offsets)\n self.category_embeddings = nn.Embedding(sum(categories), d_embedding)\n nn.init.kaiming_uniform_(self.category_embeddings.weight, a=math.sqrt(5))\n print(f'{self.category_embeddings.weight.shape=}')\n\n self.d_out = d_out\n self.block = node.DenseBlock(\n input_dim=d_in,\n num_layers=num_layers,\n layer_dim=layer_dim,\n depth=depth,\n tree_dim=tree_dim,\n bin_function=getattr(node, bin_function),\n choice_function=getattr(node, choice_function),\n flatten_output=False,\n )\n\n def forward(self, x_num: Tensor, x_cat: Tensor) -> Tensor:\n if x_cat is not None:\n x_cat = self.category_embeddings(x_cat + self.category_offsets[None])\n x = torch.cat([x_num, x_cat.view(x_cat.size(0), -1)], dim=-1)\n else:\n x = x_num\n\n x = self.block(x)\n x = x[..., : self.d_out].mean(dim=-2)\n x = x.squeeze(-1)\n return x\n\n\n# %%\nargs, output = lib.load_config()\nassert 'weight_decay' not in args, 'NODE architecture performs badly with weight decay'\nif 'swa' in args:\n assert args['swa']['n_checkpoints'] > 1\n\n# %%\nzero.set_randomness(args['seed'])\ndataset_dir = lib.get_path(args['data']['path'])\nstats: ty.Dict[str, ty.Any] = {\n 'dataset': dataset_dir.name,\n 'algorithm': Path(__file__).stem,\n **lib.load_json(output / 'stats.json'),\n}\n\nD = lib.Dataset.from_dir(dataset_dir)\nX = D.build_X(\n normalization=args['data'].get('normalization'),\n num_nan_policy='mean',\n cat_nan_policy='new',\n cat_policy=args['data'].get('cat_policy', 'indices'),\n cat_min_frequency=args['data'].get('cat_min_frequency', 0.0),\n seed=args['seed'],\n)\nif not isinstance(X, tuple):\n X = (X, None)\n\nzero.set_randomness(args['seed'])\nY, y_info = D.build_y(args['data'].get('y_policy'))\nlib.dump_pickle(y_info, output / 'y_info.pickle')\nX = tuple(None if x is None else lib.to_tensors(x) for x in X)\nY = lib.to_tensors(Y)\ndevice = lib.get_device()\nif device.type != 'cpu':\n X = tuple(None if x is None else {k: v.to(device) for k, v in x.items()} for x in X)\n Y_device = {k: v.to(device) for k, v in Y.items()}\nelse:\n Y_device = Y\nX_num, X_cat = X\nif not D.is_multiclass:\n Y_device = {k: v.float() for k, v in Y_device.items()}\n\ntrain_size = D.size(lib.TRAIN)\nbatch_size, epoch_size = (\n stats['batch_size'],\n stats['epoch_size'],\n) = lib.get_epoch_parameters(train_size, args['training'].get('batch_size', 'v3'))\neval_batch_size = args['training']['eval_batch_size']\nchunk_size = None\nstats['chunk_sizes'] = {}\nstats['eval_batch_sizes'] = {}\n\nloss_fn = (\n F.binary_cross_entropy_with_logits\n if D.is_binclass\n else F.cross_entropy\n if D.is_multiclass\n else F.mse_loss\n)\n\nargs['model'].setdefault('d_embedding', None)\nmodel = NODE(\n d_in=0 if X_num is None else X_num['train'].shape[1],\n d_out=D.info['n_classes'] if D.is_multiclass else 1,\n categories=lib.get_categories(X_cat),\n **args['model'],\n).to(device)\nif torch.cuda.device_count() > 1: # type: ignore[code]\n print('Using nn.DataParallel')\n model = nn.DataParallel(model)\nstats['n_parameters'] = lib.get_n_parameters(model)\noptimizer = lib.make_optimizer(\n args['training']['optimizer'],\n model.parameters(),\n args['training']['lr'],\n args['training']['weight_decay'],\n)\n\nstream = zero.Stream(lib.IndexLoader(train_size, batch_size, True, device))\nprogress = zero.ProgressTracker(args['training']['patience'])\ntraining_log = {lib.TRAIN: [], lib.VAL: [], lib.TEST: []}\nstage = 0\nlr_n_decays = 0\ntimer = zero.Timer()\nswa_stage_first_epoch = None\n\n\ndef print_epoch_info():\n print(\n f'\\n>>> Epoch {stream.epoch} | Stage {stage} | {lib.format_seconds(timer())} | {output}'\n )\n details = {'lr': lib.get_lr(optimizer), 'chunk_size': chunk_size}\n details.update((x, stats[x]) for x in ['batch_size', 'epoch_size', 'n_parameters'])\n print(' | '.join(f'{k} = {v}' for k, v in details.items()))\n\n\ndef get_checkpoint_path(suffix):\n return output / f'checkpoint_{suffix}.pt'\n\n\ndef step(batch_idx):\n logits = model(\n X_num[lib.TRAIN][batch_idx],\n None if X_cat is None else X_cat[lib.TRAIN][batch_idx],\n )\n targets = Y_device[lib.TRAIN][batch_idx] # type: ignore[code]\n if not D.is_multiclass:\n targets = targets.to(logits.dtype)\n return logits, targets\n\n\ndef _predict(part):\n result = []\n for idx in lib.IndexLoader(\n D.size(part),\n args['training']['eval_batch_size'],\n False,\n device,\n ):\n result.append(\n model(\n None if X_num is None else X_num[part][idx],\n None if X_cat is None else X_cat[part][idx],\n )\n )\n return torch.cat(result).cpu()\n\n\[email protected]_grad()\ndef predict(m, part):\n global eval_batch_size\n m.eval()\n random_state = zero.get_random_state()\n while eval_batch_size:\n try:\n zero.set_random_state(random_state)\n return _predict(part)\n except RuntimeError as err:\n if not lib.is_oom_exception(err):\n raise\n zero.free_memory()\n gc.collect()\n eval_batch_size //= 2\n print('New eval batch size:', eval_batch_size)\n stats['eval_batch_sizes'][stream.epoch] = eval_batch_size\n raise RuntimeError('Not enough memory even for eval_batch_size=1')\n\n\[email protected]_grad()\ndef evaluate(m, parts):\n metrics = {}\n predictions = {}\n for part in parts:\n predictions[part] = predict(m, part).numpy()\n metrics[part] = lib.calculate_metrics(\n D.info['task_type'],\n Y[part].numpy(), # type: ignore[code]\n predictions[part], # type: ignore[code]\n 'logits',\n y_info,\n )\n\n for part, part_metrics in metrics.items():\n print(f'[{part:<5}]', lib.make_summary(part_metrics))\n\n return metrics, predictions\n\n\nSTATE_VARIABLES = [\n 'progress',\n 'stats',\n 'timer',\n 'training_log',\n 'stage',\n 'swa_stage_first_epoch',\n 'lr_n_decays',\n 'chunk_size',\n 'eval_batch_size',\n]\n\n\ndef save_checkpoint(suffix):\n model_artifact = wandb.Artifact('node-artifact', type='model')\n torch.save(\n {\n 'model': model.state_dict(),\n 'optimizer': optimizer.state_dict(),\n 'stream': stream.state_dict(),\n 'random_state': zero.get_random_state(),\n **{x: globals()[x] for x in STATE_VARIABLES},\n },\n get_checkpoint_path(suffix),\n )\n lib.dump_stats(stats, output, suffix == 'final')\n lib.backup_output(output)\n model_artifact.add_file(get_checkpoint_path(suffix))\n wandb.run.log_artifact(model_artifact)\n\nfor stage in list(range(args.get('swa', {}).get('n_checkpoints', 1)))[::-1]:\n if get_checkpoint_path(stage).exists():\n print(f'Loading checkpoint {get_checkpoint_path(stage).name}')\n c = torch.load(get_checkpoint_path(stage))\n model.load_state_dict(c['model'])\n optimizer.load_state_dict(c['optimizer'])\n stream.load_state_dict(c['stream'])\n globals().update({x: c[x] for x in STATE_VARIABLES})\n stats.setdefault('old_stats', []).append(deepcopy(stats))\n stats.setdefault('continuations', []).append(stream.epoch)\n zero.set_random_state(c['random_state'])\n break\n\n\n# %%\ntimer.run()\nwith torch.no_grad():\n # NODE-specific initialization\n if stream.epoch == 0:\n model.eval()\n size = 2048\n while True:\n try:\n zero.set_randomness(args['seed'])\n x = step(torch.randperm(train_size)[:size])\n del x\n except RuntimeError as err:\n if not lib.is_oom_exception(err):\n raise\n size //= 2\n else:\n break\n\nwandb.init(project=\"RTDL\", config=args)\nfor epoch in stream.epochs(args['training']['n_epochs']):\n print_epoch_info()\n\n epoch_losses = []\n for batch_idx in epoch:\n loss, new_chunk_size = lib.learn_with_auto_virtual_batch(\n model, optimizer, loss_fn, step, batch_idx, batch_size, chunk_size\n )\n wandb.log({\"Training Loss\": loss})\n epoch_losses.append(loss.detach())\n if new_chunk_size and new_chunk_size < (chunk_size or batch_size):\n chunk_size = new_chunk_size\n print('New chunk size:', chunk_size)\n stats['chunk_sizes'][stream.iteration] = chunk_size\n zero.free_memory()\n gc.collect()\n epoch_losses = torch.stack(epoch_losses).tolist()\n training_log[lib.TRAIN].extend(epoch_losses)\n print(f'[{lib.TRAIN}] loss = {round(sum(epoch_losses) / len(epoch_losses), 3)}')\n\n metrics, predictions = evaluate(model, [lib.VAL, lib.TEST])\n wandb.log({\"score\": metrics[lib.VAL]['score']})\n for k, v in metrics.items():\n training_log[k].append(v)\n wandb.log({k:v})\n\n progress.update(metrics[lib.VAL]['score'])\n if progress.success:\n print('New best epoch!')\n stats[f'best_epoch_{stage}'] = stream.epoch\n stats[f'metrics_{stage}'] = metrics\n save_checkpoint(stage)\n for k, v in predictions.items():\n np.save(output / f'p_{stage}_{k}.npy', v)\n wandb.log({f\"predictions_{k}\": v})\n\n elif progress.fail:\n\n if stage == 0 and lr_n_decays < args['training']['lr_n_decays']:\n print('Reducing lr...')\n stats[f'lr_decay_{lr_n_decays}'] = stream.epoch\n lib.set_lr(optimizer, lib.get_lr(optimizer) * args['training']['lr_decay'])\n lr_n_decays += 1\n progress.forget_bad_updates()\n\n else:\n print(f'Finishing stage {stage}...')\n stats[f'time_{stage}'] = lib.format_seconds(timer())\n if 'swa' not in args or stage + 1 == args['swa']['n_checkpoints']:\n break\n\n best_stage_checkpoint = torch.load(get_checkpoint_path(stage))\n model.load_state_dict(best_stage_checkpoint['model'])\n optimizer.load_state_dict(best_stage_checkpoint['optimizer'])\n\n progress = zero.ProgressTracker(args['swa']['patience'])\n lib.set_lr(optimizer, args['training']['lr'] * args['swa']['lr_factor'])\n swa_stage_first_epoch = stream.epoch + 1\n stage += 1\n\n if stream.epoch == swa_stage_first_epoch:\n lib.set_lr(optimizer, args['training']['lr'])\n\n\n# %%\ndef load_best_model(stage):\n model.load_state_dict(torch.load(get_checkpoint_path(stage))['model'])\n\n\nif 'swa' in args:\n print('\\nRunning SWA...')\n swa_model = swa_utils.AveragedModel(model)\n swa_progress = zero.ProgressTracker(None)\n best_swa_model = None\n\n for stage in range(args['swa']['n_checkpoints']):\n load_best_model(stage)\n swa_model.update_parameters(model)\n\n if stage > 0 and args['swa']['update_bn_n_epochs']:\n zero.set_randomness(args['seed'])\n with torch.no_grad():\n swa_utils.update_bn(\n itertools.chain.from_iterable(\n zero.iter_batches(\n X[lib.TRAIN], chunk_size or batch_size, shuffle=True\n )\n for _ in range(args['swa']['update_bn_n_epochs'])\n ),\n swa_model,\n device,\n )\n swa_progress.update(\n evaluate(swa_model if stage > 0 else model, [lib.VAL])[0][lib.VAL]['score']\n )\n if swa_progress.success:\n print('New best SWA checkpoint!')\n stats['n_swa_checkpoints'] = stage + 1\n if stage > 0:\n best_swa_model = deepcopy(swa_model)\n if best_swa_model is None:\n load_best_model(0)\n else:\n lib.load_swa_state_dict(model, best_swa_model)\nelse:\n load_best_model(0)\n\nprint('\\nRunning the final evaluation...')\nstats['metrics'], predictions = evaluate(model, lib.PARTS)\nfor k, v in predictions.items():\n np.save(output / f'p_{k}.npy', v)\n wandb.run.summary[f\"final_prediction_{k}\"] = v\nstats['time_final'] = lib.format_seconds(timer())\nsave_checkpoint('final')\nprint(f'Done! Time elapsed: {stats[\"time_final\"]}')\nprint(\n '\\n!!! WARNING !!! The metrics for a single model are stored under the \"metrics_0\" key.\\n'\n)\n"
] | [
[
"torch.cat",
"torch.randperm",
"torch.cuda.device_count",
"numpy.save",
"torch.tensor",
"torch.nn.DataParallel",
"torch.no_grad",
"torch.stack",
"torch.optim.swa_utils.AveragedModel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ColdFrenzy/Adaptive_Learning | [
"02cdd519a7e224fe5f2a49b0c21baa3dac5ce0e1"
] | [
"models/custom_models.py"
] | [
"import tensorflow as tf\n\n\ndef dense_model(in_shape, hidden_layer_shapes, num_outputs, name):\n x = None\n inputs = tf.keras.layers.Input(shape=(in_shape,), name=\"observations\")\n for i,layer_shape in enumerate(hidden_layer_shapes):\n x = tf.keras.layers.Dense(\n layer_shape, name=\"dense_\" + str(i), activation=tf.nn.relu\n )(x if x is not None else inputs)\n out_layer = tf.keras.layers.Dense(num_outputs, name=\"out\", activation=None)(\n x\n )\n value_layer = tf.keras.layers.Dense(1, name=\"value\", activation=None)(x)\n return tf.keras.Model(inputs, [out_layer, value_layer], name=name)\n\n\ndef res_net_model(in_shape, hidden_layer_shapes, num_outputs, name):\n \"\"\"\n hidden_layer_shapes : list\n list with the shape of every hidden layer\n Simple neural network block with n_layers dense layers and a residual connection \n \"\"\"\n x = None\n inputs = tf.keras.layers.Input(shape=(in_shape,), name=\"observations\")\n for i,layer_shape in enumerate(hidden_layer_shapes):\n x = tf.keras.layers.Dense(\n layer_shape, name=\"dense_\"+str(i), activation=tf.nn.relu\n )(x if x is not None else inputs)\n x = tf.keras.layers.Dense(in_shape, name=\"dense_\" + str(i) +\".2\", activation=tf.nn.relu)(\n x\n )\n x = tf.keras.layers.Add()([inputs, x])\n x = tf.keras.layers.ReLU()(x)\n x = tf.keras.layers.BatchNormalization()(x)\n out_layer = tf.keras.layers.Dense(num_outputs, name=\"out\", activation=None)(\n x\n )\n value_layer = tf.keras.layers.Dense(1, name=\"value\", activation=None)(x)\n return tf.keras.Model(inputs, [out_layer, value_layer], name=name)\n\n\ndef conv_dense_model(in_shape, num_outputs, name):\n\n if len(in_shape) == 2:\n in_shape = in_shape + (1,)\n inputs = tf.keras.Input(shape=in_shape , name=\"observations\")\n\n x = tf.keras.layers.Conv2D(64, 4, name=\"conv_1\")(inputs)\n x = tf.keras.layers.Conv2D(64, 2, name=\"conv_2\")(x)\n x = tf.keras.layers.Conv2D(64, 2, name=\"conv_3\")(x)\n x = tf.keras.layers.Flatten()(x)\n x = tf.keras.layers.Dense(64, name=\"dense_1\",activation=tf.nn.relu)(x)\n out_layer = tf.keras.layers.Dense(num_outputs, name=\"out\", activation=None)(x)\n value_layer = tf.keras.layers.Dense(1, name=\"value\", activation=None)(x)\n return tf.keras.Model(inputs, [out_layer, value_layer], name=name) \n\n\ndef conv_dense_model_connect3(in_shape,num_outputs,name):\n if len(in_shape) == 2:\n in_shape = in_shape + (1,)\n inputs = tf.keras.Input(shape=in_shape , name=\"observations\")\n\n x = tf.keras.layers.Conv2D(64, 3, name=\"conv_1\")(inputs)\n x = tf.keras.layers.Conv2D(64, 2, name=\"conv_2\")(x)\n x = tf.keras.layers.Flatten()(x)\n x = tf.keras.layers.Dense(64, name=\"dense_1\",activation=tf.nn.relu)(x)\n out_layer = tf.keras.layers.Dense(num_outputs, name=\"out\", activation=None)(x)\n value_layer = tf.keras.layers.Dense(1, name=\"value\", activation=None)(x)\n return tf.keras.Model(inputs, [out_layer, value_layer], name=name) \n\ndef dense_q_model(in_shape, hidden_shape, num_outputs, name):\n inputs = tf.keras.layers.Input(shape=(in_shape,), name=\"observations\")\n hidden_layer = tf.keras.layers.Dense(\n hidden_shape, name=\"layer1\", activation=tf.nn.relu\n )(inputs)\n out_layer = tf.keras.layers.Dense(num_outputs, name=\"out\", activation=None)(\n hidden_layer\n )\n return tf.keras.Model(inputs, out_layer, name=name)\n\n\nif __name__ == \"__main__\":\n # model = res_net_model(42, [256,128,64], 7, \"res_model\")\n # model = dense_model(42, [256,128,64], 7, \"dense_block\")\n # model.summary()\n model = conv_dense_model((7,6,1),7,\"conv_dense_model\")\n tf.keras.utils.plot_model(model, \"conv_dense_model.png\", True)\n"
] | [
[
"tensorflow.keras.layers.ReLU",
"tensorflow.keras.Input",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.utils.plot_model",
"tensorflow.keras.layers.Conv2D",
"tensorflow.keras.Model",
"tensorflow.keras.layers.BatchNormalization",
"tensorflow.keras.layers.Add",
"tensorflow.keras.layers.Flatten",
"tensorflow.keras.layers.Input"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.7",
"2.6",
"2.4",
"2.3",
"2.5",
"2.2"
]
}
] |
kcyu2014/nas-landmarkreg | [
"a00c3619bf4042e446e1919087f0b09fe9fa3a65",
"a00c3619bf4042e446e1919087f0b09fe9fa3a65",
"a00c3619bf4042e446e1919087f0b09fe9fa3a65"
] | [
"utils_nvidia.py",
"nasws/cnn/search_space/nasbench101/lib/cifar.py",
"nasws/cnn/search_space/monodepth/models/backbone/drn.py"
] | [
"import argparse\nimport os\nimport shutil\nimport time\nimport math\nimport logging\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.parallel\nimport torch.backends.cudnn as cudnn\nimport torch.distributed as dist\nimport torch.optim\nimport torch.utils.data\nimport torch.utils.data.distributed\nimport torchvision.transforms as transforms\nimport torchvision.datasets as datasets\nimport torchvision.models as models\nimport numpy as np\n\ntry:\n from apex.parallel import DistributedDataParallel as DDP\n from apex.fp16_utils import *\n from apex import amp, optimizers\n from apex.multi_tensor_apply import multi_tensor_applier\nexcept ImportError:\n raise ImportError(\"Please install apex from https://www.github.com/nvidia/apex to run this example.\")\n\n\ntry:\n from nvidia.dali.plugin.pytorch import DALIClassificationIterator\n from nvidia.dali.pipeline import Pipeline\n import nvidia.dali.ops as ops\n import nvidia.dali.types as types\nexcept ImportError:\n raise ImportError(\"Please install DALI from https://www.github.com/NVIDIA/DALI to run this example.\")\n\nfrom nasws.cnn.utils import AverageMeter\nfrom utils import accuracy\n\n\n# item() is a recent addition, so this helps with backward compatibility.\ndef to_python_float(t):\n if hasattr(t, 'item'):\n return t.item()\n else:\n return t[0]\n\n\nclass HybridTrainPipe(Pipeline):\n def __init__(self, batch_size, num_threads, device_id, data_dir, crop,\n shard_id, num_shards, dali_cpu=False, args=None,\n file_list=None\n ):\n \n super(HybridTrainPipe, self).__init__(batch_size,\n num_threads,\n device_id,\n seed=12 + device_id)\n \n self.input = ops.FileReader(file_root=data_dir,\n shard_id=args.apex_local_rank,\n num_shards=args.world_size,\n random_shuffle=True,\n pad_last_batch=True,\n file_list=file_list)\n #let user decide which pipeline works him bets for RN version he runs\n dali_device = 'cpu' if dali_cpu else 'gpu'\n decoder_device = 'cpu' if dali_cpu else 'mixed'\n # This padding sets the size of the internal nvJPEG buffers to be able to handle all images from full-sized ImageNet\n # without additional reallocations\n device_memory_padding = 211025920 if decoder_device == 'mixed' else 0\n host_memory_padding = 140544512 if decoder_device == 'mixed' else 0\n self.decode = ops.ImageDecoderRandomCrop(device=decoder_device, output_type=types.RGB,\n device_memory_padding=device_memory_padding,\n host_memory_padding=host_memory_padding,\n random_aspect_ratio=[0.8, 1.25],\n random_area=[0.1, 1.0],\n num_attempts=100)\n self.res = ops.Resize(device=dali_device,\n resize_x=crop,\n resize_y=crop,\n interp_type=types.INTERP_TRIANGULAR)\n self.cmnp = ops.CropMirrorNormalize(device=\"gpu\",\n output_dtype=types.FLOAT,\n output_layout=types.NCHW,\n crop=(crop, crop),\n image_type=types.RGB,\n mean=[0.485 * 255,0.456 * 255,0.406 * 255],\n std=[0.229 * 255,0.224 * 255,0.225 * 255])\n self.coin = ops.CoinFlip(probability=0.5)\n logging.info('DALI \"{0}\" variant'.format(dali_device))\n\n def define_graph(self):\n rng = self.coin()\n self.jpegs, self.labels = self.input(name=\"Reader\")\n images = self.decode(self.jpegs)\n images = self.res(images)\n output = self.cmnp(images.gpu(), mirror=rng)\n return [output, self.labels]\n\n\nclass HybridValPipe(Pipeline):\n def __init__(self, batch_size, num_threads, device_id, data_dir, crop,\n size, shard_id, num_shards, args=None):\n super(HybridValPipe, self).__init__(batch_size,\n num_threads,\n device_id,\n seed=12 + device_id)\n self.input = ops.FileReader(file_root=data_dir,\n shard_id=args.apex_local_rank,\n num_shards=args.world_size,\n random_shuffle=False,\n pad_last_batch=True)\n self.decode = ops.ImageDecoder(device=\"mixed\", output_type=types.RGB)\n self.res = ops.Resize(device=\"gpu\",\n resize_shorter=size,\n interp_type=types.INTERP_TRIANGULAR)\n self.cmnp = ops.CropMirrorNormalize(device=\"gpu\",\n output_dtype=types.FLOAT,\n output_layout=types.NCHW,\n crop=(crop, crop),\n image_type=types.RGB,\n mean=[0.485 * 255,0.456 * 255,0.406 * 255],\n std=[0.229 * 255,0.224 * 255,0.225 * 255])\n\n def define_graph(self):\n self.jpegs, self.labels = self.input(name=\"Reader\")\n images = self.decode(self.jpegs)\n images = self.res(images)\n output = self.cmnp(images)\n return [output, self.labels]\n\n\ndef fast_collate(batch, memory_format):\n\n imgs = [img[0] for img in batch]\n targets = torch.tensor([target[1] for target in batch], dtype=torch.int64)\n w = imgs[0].size()[1]\n h = imgs[0].size()[2]\n # print(imgs[0].size())\n tensor = torch.zeros( (len(imgs), 3, h, w), dtype=torch.uint8).contiguous(memory_format=memory_format)\n for i, img in enumerate(imgs):\n nump_array = np.asarray(img, dtype=np.uint8)\n if(nump_array.ndim < 3):\n nump_array = np.expand_dims(nump_array, axis=-1)\n # nump_array = np.rollaxis(nump_array, 2)\n # print(nump_array.shape)\n tensor[i] += torch.from_numpy(nump_array)\n return tensor, targets\n\n\nclass data_prefetcher():\n def __init__(self, loader):\n self.loader = iter(loader)\n self.stream = torch.cuda.Stream()\n self.mean = torch.tensor([0.485 * 255, 0.456 * 255, 0.406 * 255]).cuda().view(1,3,1,1)\n self.std = torch.tensor([0.229 * 255, 0.224 * 255, 0.225 * 255]).cuda().view(1,3,1,1)\n # With Amp, it isn't necessary to manually convert data to half.\n # if args.fp16:\n # self.mean = self.mean.half()\n # self.std = self.std.half()\n self.preload()\n\n def preload(self):\n try:\n self.next_input, self.next_target = next(self.loader)\n except StopIteration:\n self.next_input = None\n self.next_target = None\n return\n # if record_stream() doesn't work, another option is to make sure device inputs are created\n # on the main stream.\n # self.next_input_gpu = torch.empty_like(self.next_input, device='cuda')\n # self.next_target_gpu = torch.empty_like(self.next_target, device='cuda')\n # Need to make sure the memory allocated for next_* is not still in use by the main stream\n # at the time we start copying to next_*:\n # self.stream.wait_stream(torch.cuda.current_stream())\n with torch.cuda.stream(self.stream):\n self.next_input = self.next_input.cuda(non_blocking=True)\n self.next_target = self.next_target.cuda(non_blocking=True)\n # more code for the alternative if record_stream() doesn't work:\n # copy_ will record the use of the pinned source tensor in this side stream.\n # self.next_input_gpu.copy_(self.next_input, non_blocking=True)\n # self.next_target_gpu.copy_(self.next_target, non_blocking=True)\n # self.next_input = self.next_input_gpu\n # self.next_target = self.next_target_gpu\n\n # With Amp, it isn't necessary to manually convert data to half.\n # if args.fp16:\n # self.next_input = self.next_input.half()\n # else:\n self.next_input = self.next_input.float()\n self.next_input = self.next_input.sub_(self.mean).div_(self.std)\n\n def next(self):\n torch.cuda.current_stream().wait_stream(self.stream)\n input = self.next_input\n target = self.next_target\n if input is not None:\n input.record_stream(torch.cuda.current_stream())\n if target is not None:\n target.record_stream(torch.cuda.current_stream())\n self.preload()\n return input, target\n\n\ndef reduce_tensor(tensor, world_size):\n rt = tensor.clone()\n dist.all_reduce(rt, op=dist.reduce_op.SUM)\n rt /= world_size\n return rt\n\n\n\ndef adjust_learning_rate(optimizer, epoch, step, len_epoch, args):\n \"\"\"LR schedule that should yield 76% converged accuracy with batch size 256\"\"\"\n factor = epoch // 30\n\n if epoch >= 80:\n factor = factor + 1\n\n lr = args.learning_rate*(0.1**factor)\n\n \"\"\"Warmup\"\"\"\n if epoch < 5:\n lr = lr*float(1 + step + epoch*len_epoch)/(5.*len_epoch)\n\n # if(args.apex_local_rank == 0):\n # print(\"epoch = {}, step = {}, lr = {}\".format(epoch, step, lr))\n\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n\n \n# def adjust_learning_rate(optimizer, epoch, args):\n# # Smaller slope for the last 5 epochs because lr * 1/250 is relatively large\n# if args.epochs - epoch > 5:\n# lr = args.learning_rate * (args.epochs - 5 - epoch) / (args.epochs - 5)\n# else:\n# lr = args.learning_rate * (args.epochs - epoch) / ((args.epochs - 5) * 5)\n# for param_group in optimizer.param_groups:\n# param_group['lr'] = lr\n# return lr \n\n\ndef train(train_loader, model, criterion, optimizer, epoch, args):\n batch_time = AverageMeter()\n losses = AverageMeter()\n top1 = AverageMeter()\n top5 = AverageMeter()\n\n # switch to train mode\n model.train()\n end = time.time()\n prefetcher = data_prefetcher(train_loader)\n input, target = prefetcher.next()\n i = 0\n while input is not None:\n i += 1\n if args.apex_profiling >= 0 and i == args.apex_profiling:\n print(\"Profiling begun at iteration {}\".format(i))\n torch.cuda.cudart().cudaProfilerStart()\n\n if args.apex_profiling >= 0: torch.cuda.nvtx.range_push(\"Body of iteration {}\".format(i))\n\n adjust_learning_rate(optimizer, epoch, i, len(train_loader), args)\n\n # compute output\n if args.apex_profiling >= 0: torch.cuda.nvtx.range_push(\"forward\")\n logits, logtis_aux = model(input)\n if args.apex_profiling >= 0: torch.cuda.nvtx.range_pop()\n loss = criterion(logits, target)\n if args.auxiliary:\n loss_aux = criterion(logtis_aux, target)\n loss += args.auxiliary_weight * loss_aux\n\n # compute gradient and do SGD step\n optimizer.zero_grad()\n\n if args.apex_profiling >= 0: torch.cuda.nvtx.range_push(\"backward\")\n with amp.scale_loss(loss, optimizer) as scaled_loss:\n scaled_loss.backward()\n if args.apex_profiling >= 0: torch.cuda.nvtx.range_pop()\n\n # for param in model.parameters():\n # print(param.data.double().sum().item(), param.grad.data.double().sum().item())\n\n if args.apex_profiling >= 0: torch.cuda.nvtx.range_push(\"optimizer.step()\")\n optimizer.step()\n if args.apex_profiling >= 0: torch.cuda.nvtx.range_pop()\n\n if i%args.report_freq == 0:\n # Every report_freq iterations, check the loss, accuracy, and speed.\n # For best performance, it doesn't make sense to print these metrics every\n # iteration, since they incur an allreduce and some host<->device syncs.\n\n # Measure accuracy\n prec1, prec5 = accuracy(logits.data, target, topk=(1, 5))\n\n # Average loss and accuracy across processes for logging\n if args.distributed:\n reduced_loss = reduce_tensor(loss.data, args.world_size)\n prec1 = reduce_tensor(prec1, args.world_size)\n prec5 = reduce_tensor(prec5, args.world_size)\n else:\n reduced_loss = loss.data\n\n # to_python_float incurs a host<->device sync\n losses.update(to_python_float(reduced_loss), input.size(0))\n top1.update(to_python_float(prec1), input.size(0))\n top5.update(to_python_float(prec5), input.size(0))\n\n torch.cuda.synchronize()\n batch_time.update((time.time() - end)/args.report_freq)\n end = time.time()\n\n if args.apex_local_rank == 0:\n logging.info('Epoch: [{0}][{1}/{2}]\\t'\n 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t'\n 'Speed {3:.3f} ({4:.3f})\\t'\n 'Loss {loss.val:.10f} ({loss.avg:.4f})\\t'\n 'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\\t'\n 'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(\n epoch, i, len(train_loader),\n args.world_size*args.batch_size/batch_time.val,\n args.world_size*args.batch_size/batch_time.avg,\n batch_time=batch_time,\n loss=losses, top1=top1, top5=top5))\n if args.apex_profiling >= 0: torch.cuda.nvtx.range_push(\"prefetcher.next()\")\n input, target = prefetcher.next()\n if args.apex_profiling >= 0: torch.cuda.nvtx.range_pop()\n\n # Pop range \"Body of iteration {}\".format(i)\n if args.apex_profiling >= 0: torch.cuda.nvtx.range_pop()\n\n if args.apex_profiling >= 0 and i == args.apex_profiling + 10:\n print(\"Profiling ended at iteration {}\".format(i))\n torch.cuda.cudart().cudaProfilerStop()\n quit()\n return top1.avg, losses.avg\n\n\ndef validate(val_loader, model, criterion, args):\n batch_time = AverageMeter()\n losses = AverageMeter()\n top1 = AverageMeter()\n top5 = AverageMeter()\n\n # switch to evaluate mode\n model.eval()\n\n end = time.time()\n\n prefetcher = data_prefetcher(val_loader)\n input, target = prefetcher.next()\n i = 0\n while input is not None:\n i += 1\n\n # compute output\n with torch.no_grad():\n output, _ = model(input)\n loss = criterion(output, target)\n\n # measure accuracy and record loss\n prec1, prec5 = accuracy(output.data, target, topk=(1, 5))\n\n if args.distributed:\n reduced_loss = reduce_tensor(loss.data, args.world_size)\n prec1 = reduce_tensor(prec1, args.world_size)\n prec5 = reduce_tensor(prec5, args.world_size)\n else:\n reduced_loss = loss.data\n\n losses.update(to_python_float(reduced_loss), input.size(0))\n top1.update(to_python_float(prec1), input.size(0))\n top5.update(to_python_float(prec5), input.size(0))\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n # TODO: Change timings to mirror train().\n if args.apex_local_rank == 0 and i % args.report_freq == 0:\n logging.info('Test: [{0}/{1}]\\t'\n 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t'\n 'Speed {2:.3f} ({3:.3f})\\t'\n 'Loss {loss.val:.4f} ({loss.avg:.4f})\\t'\n 'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\\t'\n 'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(\n i, len(val_loader),\n args.world_size * args.batch_size / batch_time.val,\n args.world_size * args.batch_size / batch_time.avg,\n batch_time=batch_time, loss=losses,\n top1=top1, top5=top5))\n\n input, target = prefetcher.next()\n\n logging.info(' * Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f}'\n .format(top1=top1, top5=top5))\n\n return top1.avg, top5.avg, losses.avg\n\n\ndef dali_apex_train(train_loader, model, criterion, optimizer, epoch, args):\n batch_time = AverageMeter()\n losses = AverageMeter()\n top1 = AverageMeter()\n top5 = AverageMeter()\n\n # switch to train mode\n model.train()\n end = time.time()\n\n for i, data in enumerate(train_loader):\n input = data[0][\"data\"]\n target = data[0][\"label\"].squeeze().cuda().long()\n train_loader_len = int(math.ceil(train_loader._size / args.batch_size))\n\n if args.dali_profiling >= 0 and i == args.dali_profiling:\n print(\"Profiling begun at iteration {}\".format(i))\n torch.cuda.cudart().cudaProfilerStart()\n\n if args.dali_profiling >= 0: torch.cuda.nvtx.range_push(\"Body of iteration {}\".format(i))\n\n # adjust_learning_rate(optimizer, epoch, i, train_loader_len, args)\n if args.debug:\n if i > 10:\n logging.info('Break in debug mode after 10 batchs...')\n break\n\n # compute output\n if args.dali_profiling >= 0: torch.cuda.nvtx.range_push(\"forward\")\n logits, logtis_aux = model(input)\n if args.dali_profiling >= 0: torch.cuda.nvtx.range_pop()\n loss = criterion(logits, target)\n if args.auxiliary:\n loss_aux = criterion(logtis_aux, target)\n loss += args.auxiliary_weight * loss_aux\n\n # compute gradient and do SGD step\n optimizer.zero_grad()\n\n if args.dali_profiling >= 0: torch.cuda.nvtx.range_push(\"backward\")\n if args.apex_opt_level is not None:\n with amp.scale_loss(loss, optimizer) as scaled_loss:\n scaled_loss.backward()\n else:\n loss.backward()\n if args.dali_profiling >= 0: torch.cuda.nvtx.range_pop()\n\n if args.dali_profiling >= 0: torch.cuda.nvtx.range_push(\"optimizer.step()\")\n optimizer.step()\n if args.dali_profiling >= 0: torch.cuda.nvtx.range_pop()\n\n if i%args.report_freq == 0:\n # Every print_freq iterations, check the loss, accuracy, and speed.\n # For best performance, it doesn't make sense to print these metrics every\n # iteration, since they incur an allreduce and some host<->device syncs.\n\n # Measure accuracy\n prec1, prec5 = accuracy(logits.data, target, topk=(1, 5))\n\n # Average loss and accuracy across processes for logging\n if args.distributed:\n reduced_loss = reduce_tensor(loss.data, args.world_size)\n prec1 = reduce_tensor(prec1, args.world_size)\n prec5 = reduce_tensor(prec5, args.world_size)\n else:\n reduced_loss = loss.data\n\n # to_python_float incurs a host<->device sync\n losses.update(to_python_float(reduced_loss), input.size(0))\n top1.update(to_python_float(prec1), input.size(0))\n top5.update(to_python_float(prec5), input.size(0))\n\n torch.cuda.synchronize()\n batch_time.update((time.time() - end)/args.report_freq)\n end = time.time()\n\n if args.apex_local_rank == 0:\n logging.info('Epoch: [{0}][{1}/{2}]\\t'\n 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t'\n 'Speed {3:.3f} ({4:.3f})\\t'\n 'Loss {loss.val:.10f} ({loss.avg:.4f})\\t'\n 'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\\t'\n 'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(\n epoch, i, train_loader_len,\n args.world_size*args.batch_size/batch_time.val,\n args.world_size*args.batch_size/batch_time.avg,\n batch_time=batch_time,\n loss=losses, top1=top1, top5=top5))\n\n # Pop range \"Body of iteration {}\".format(i)\n if args.dali_profiling >= 0: torch.cuda.nvtx.range_pop()\n\n if args.dali_profiling >= 0 and i == args.dali_profiling + 2:\n print(\"Profiling ended at iteration {}\".format(i))\n torch.cuda.cudart().cudaProfilerStop()\n quit()\n\n return top1.avg, losses.avg\n\n\n\ndef dali_validate(val_loader, model, criterion, args):\n batch_time = AverageMeter()\n losses = AverageMeter()\n top1 = AverageMeter()\n top5 = AverageMeter()\n\n # switch to evaluate mode\n model.eval()\n\n end = time.time()\n\n for i, data in enumerate(val_loader):\n input = data[0][\"data\"]\n target = data[0][\"label\"].squeeze().cuda().long()\n val_loader_len = int(val_loader._size / args.batch_size)\n\n if args.debug:\n if i > 10:\n break\n\n # compute output\n with torch.no_grad():\n output, _ = model(input)\n loss = criterion(output, target)\n\n # measure accuracy and record loss\n prec1, prec5 = accuracy(output.data, target, topk=(1, 5))\n\n if args.distributed:\n reduced_loss = reduce_tensor(loss.data, args.world_size)\n prec1 = reduce_tensor(prec1, args.world_size)\n prec5 = reduce_tensor(prec5, args.world_size)\n else:\n reduced_loss = loss.data\n\n losses.update(to_python_float(reduced_loss), input.size(0))\n top1.update(to_python_float(prec1), input.size(0))\n top5.update(to_python_float(prec5), input.size(0))\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n # TODO: Change timings to mirror train().\n if args.apex_local_rank == 0 and i % args.report_freq == 0:\n logging.info('Test: [{0}/{1}]\\t'\n 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t'\n 'Speed {2:.3f} ({3:.3f})\\t'\n 'Loss {loss.val:.4f} ({loss.avg:.4f})\\t'\n 'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\\t'\n 'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(\n i, val_loader_len,\n args.world_size * args.batch_size / batch_time.val,\n args.world_size * args.batch_size / batch_time.avg,\n batch_time=batch_time, loss=losses,\n top1=top1, top5=top5))\n\n logging.info(' * Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f}'\n .format(top1=top1, top5=top5))\n\n return top1.avg, top5.avg, losses.avg\n",
"# Copyright 2019 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"CIFAR-10 data pipeline with preprocessing.\n\nThe data is generated via generate_cifar10_tfrecords.py.\n\"\"\"\n\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport functools\ntry:\n import tensorflow as tf\nexcept:\n pass\n\nWIDTH = 32\nHEIGHT = 32\nRGB_MEAN = [125.31, 122.95, 113.87]\nRGB_STD = [62.99, 62.09, 66.70]\n\n\nclass CIFARInput(object):\n \"\"\"Wrapper class for input_fn passed to TPUEstimator.\"\"\"\n\n def __init__(self, mode, config):\n \"\"\"Initializes a CIFARInput object.\n\n Args:\n mode: one of [train, valid, test, augment, sample]\n config: config dict built from config.py\n\n Raises:\n ValueError: invalid mode or data files\n \"\"\"\n self.mode = mode\n self.config = config\n if mode == 'train': # Training set (no validation & test)\n self.data_files = config['train_data_files']\n elif mode == 'train_eval': # For computing train error\n self.data_files = [config['train_data_files'][0]]\n elif mode == 'valid': # For computing validation error\n self.data_files = [config['valid_data_file']]\n elif mode == 'test': # For computing the test error\n self.data_files = [config['test_data_file']]\n elif mode == 'augment': # Training set (includes validation, no test)\n self.data_files = (config['train_data_files'] +\n [config['valid_data_file']])\n elif mode == 'sample': # Fixed batch of 100 samples from validation\n self.data_files = [config['sample_data_file']]\n else:\n raise ValueError('invalid mode')\n\n if not self.data_files:\n raise ValueError('no data files provided')\n\n @property\n def num_images(self):\n \"\"\"Number of images in the dataset (depends on the mode).\"\"\"\n if self.mode == 'train':\n return 40000\n elif self.mode == 'train_eval':\n return 10000\n elif self.mode == 'valid':\n return 10000\n elif self.mode == 'test':\n return 10000\n elif self.mode == 'augment':\n return 50000\n elif self.mode == 'sample':\n return 100\n\n def input_fn(self, params):\n \"\"\"Returns a CIFAR tf.data.Dataset object.\n\n Args:\n params: parameter dict pass by Estimator.\n\n Returns:\n tf.data.Dataset object\n \"\"\"\n batch_size = params['batch_size']\n is_training = (self.mode == 'train' or self.mode == 'augment')\n\n dataset = tf.data.TFRecordDataset(self.data_files)\n dataset = dataset.prefetch(buffer_size=batch_size)\n\n # Repeat dataset for training modes\n if is_training:\n # Shuffle buffer with whole dataset to ensure full randomness per epoch\n dataset = dataset.cache().apply(\n tf.contrib.data.shuffle_and_repeat(\n buffer_size=self.num_images))\n\n # This is a hack to allow computing metrics on a fixed batch on TPU. Because\n # TPU shards the batch acrosss cores, we replicate the fixed batch so that\n # each core contains the whole batch.\n if self.mode == 'sample':\n dataset = dataset.repeat()\n\n # Parse, preprocess, and batch images\n parser_fn = functools.partial(_parser, is_training)\n dataset = dataset.apply(\n tf.contrib.data.map_and_batch(\n parser_fn,\n batch_size=batch_size,\n num_parallel_batches=self.config['tpu_num_shards'],\n drop_remainder=True))\n\n # Assign static batch size dimension\n dataset = dataset.map(functools.partial(_set_batch_dimension, batch_size))\n\n # Prefetch to overlap in-feed with training\n dataset = dataset.prefetch(tf.contrib.data.AUTOTUNE)\n\n return dataset\n\n\ndef _preprocess(image):\n \"\"\"Perform standard CIFAR preprocessing.\n\n Pads the image then performs a random crop.\n Then, image is flipped horizontally randomly.\n\n Args:\n image: image Tensor with shape [height, width, 3]\n\n Returns:\n preprocessed image with the same dimensions.\n \"\"\"\n # Pad 4 pixels on all sides with 0\n image = tf.image.resize_image_with_crop_or_pad(\n image, HEIGHT + 8, WIDTH + 8)\n\n # Random crop\n image = tf.random_crop(image, [HEIGHT, WIDTH, 3], seed=0)\n\n # Random flip\n image = tf.image.random_flip_left_right(image, seed=0)\n\n return image\n\n\ndef _parser(use_preprocessing, serialized_example):\n \"\"\"Parses a single tf.Example into image and label tensors.\"\"\"\n features = tf.parse_single_example(\n serialized_example,\n features={\n 'image': tf.FixedLenFeature([], tf.string),\n 'label': tf.FixedLenFeature([], tf.int64),\n })\n image = tf.decode_raw(features['image'], tf.uint8)\n image.set_shape([3 * HEIGHT * WIDTH])\n image = tf.reshape(image, [3, HEIGHT, WIDTH])\n # TODO(chrisying): handle NCHW format\n image = tf.transpose(image, [1, 2, 0])\n image = tf.cast(image, tf.float32)\n if use_preprocessing:\n image = _preprocess(image)\n image -= tf.constant(RGB_MEAN, shape=[1, 1, 3])\n image /= tf.constant(RGB_STD, shape=[1, 1, 3])\n label = tf.cast(features['label'], tf.int32)\n return image, label\n\n\ndef _set_batch_dimension(batch_size, images, labels):\n images.set_shape(images.get_shape().merge_with(\n tf.TensorShape([batch_size, None, None, None])))\n labels.set_shape(labels.get_shape().merge_with(\n tf.TensorShape([batch_size])))\n\n return images, labels\n",
"import torch.nn as nn\nimport math\nimport torch.utils.model_zoo as model_zoo\nfrom models.sync_batchnorm.batchnorm import SynchronizedBatchNorm2d\n\nwebroot = \"https://tigress-web.princeton.edu/~fy/drn/models/\"\n\nmodel_urls = {\n \"resnet50\": \"https://download.pytorch.org/models/resnet50-19c8e357.pth\",\n \"drn-c-26\": webroot + \"drn_c_26-ddedf421.pth\",\n \"drn-c-42\": webroot + \"drn_c_42-9d336e8c.pth\",\n \"drn-c-58\": webroot + \"drn_c_58-0a53a92c.pth\",\n \"drn-d-22\": webroot + \"drn_d_22-4bd2f8ea.pth\",\n \"drn-d-38\": webroot + \"drn_d_38-eebb45f0.pth\",\n \"drn-d-54\": webroot + \"drn_d_54-0e0534ff.pth\",\n \"drn-d-105\": webroot + \"drn_d_105-12b40979.pth\",\n}\n\n\ndef conv3x3(in_planes, out_planes, stride=1, padding=1, dilation=1):\n return nn.Conv2d(\n in_planes,\n out_planes,\n kernel_size=3,\n stride=stride,\n padding=padding,\n bias=False,\n dilation=dilation,\n )\n\n\nclass BasicBlock(nn.Module):\n expansion = 1\n\n def __init__(\n self,\n inplanes,\n planes,\n stride=1,\n downsample=None,\n dilation=(1, 1),\n residual=True,\n BatchNorm=None,\n ):\n super(BasicBlock, self).__init__()\n self.conv1 = conv3x3(\n inplanes, planes, stride, padding=dilation[0], dilation=dilation[0]\n )\n self.bn1 = BatchNorm(planes)\n self.relu = nn.ReLU(inplace=True)\n self.conv2 = conv3x3(planes, planes, padding=dilation[1], dilation=dilation[1])\n self.bn2 = BatchNorm(planes)\n self.downsample = downsample\n self.stride = stride\n self.residual = residual\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n if self.residual:\n out += residual\n out = self.relu(out)\n\n return out\n\n\nclass Bottleneck(nn.Module):\n expansion = 4\n\n def __init__(\n self,\n inplanes,\n planes,\n stride=1,\n downsample=None,\n dilation=(1, 1),\n residual=True,\n BatchNorm=None,\n ):\n super(Bottleneck, self).__init__()\n self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)\n self.bn1 = BatchNorm(planes)\n self.conv2 = nn.Conv2d(\n planes,\n planes,\n kernel_size=3,\n stride=stride,\n padding=dilation[1],\n bias=False,\n dilation=dilation[1],\n )\n self.bn2 = BatchNorm(planes)\n self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)\n self.bn3 = BatchNorm(planes * 4)\n self.relu = nn.ReLU(inplace=True)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n out = self.relu(out)\n\n return out\n\n\nclass DRN(nn.Module):\n def __init__(\n self,\n block,\n layers,\n arch=\"D\",\n channels=(16, 32, 64, 128, 256, 512, 512, 512),\n BatchNorm=None,\n ):\n super(DRN, self).__init__()\n self.inplanes = channels[0]\n self.out_dim = channels[-1]\n self.arch = arch\n\n if arch == \"C\":\n self.conv1 = nn.Conv2d(\n 3, channels[0], kernel_size=7, stride=1, padding=3, bias=False\n )\n self.bn1 = BatchNorm(channels[0])\n self.relu = nn.ReLU(inplace=True)\n\n self.layer1 = self._make_layer(\n BasicBlock, channels[0], layers[0], stride=1, BatchNorm=BatchNorm\n )\n self.layer2 = self._make_layer(\n BasicBlock, channels[1], layers[1], stride=2, BatchNorm=BatchNorm\n )\n\n elif arch == \"D\":\n self.layer0 = nn.Sequential(\n nn.Conv2d(\n 3, channels[0], kernel_size=7, stride=1, padding=3, bias=False\n ),\n BatchNorm(channels[0]),\n nn.ReLU(inplace=True),\n )\n\n self.layer1 = self._make_conv_layers(\n channels[0], layers[0], stride=1, BatchNorm=BatchNorm\n )\n self.layer2 = self._make_conv_layers(\n channels[1], layers[1], stride=2, BatchNorm=BatchNorm\n )\n\n self.layer3 = self._make_layer(\n block, channels[2], layers[2], stride=2, BatchNorm=BatchNorm\n )\n self.layer4 = self._make_layer(\n block, channels[3], layers[3], stride=2, BatchNorm=BatchNorm\n )\n self.layer5 = self._make_layer(\n block,\n channels[4],\n layers[4],\n dilation=2,\n new_level=False,\n BatchNorm=BatchNorm,\n )\n self.layer6 = (\n None\n if layers[5] == 0\n else self._make_layer(\n block,\n channels[5],\n layers[5],\n dilation=4,\n new_level=False,\n BatchNorm=BatchNorm,\n )\n )\n\n if arch == \"C\":\n self.layer7 = (\n None\n if layers[6] == 0\n else self._make_layer(\n BasicBlock,\n channels[6],\n layers[6],\n dilation=2,\n new_level=False,\n residual=False,\n BatchNorm=BatchNorm,\n )\n )\n self.layer8 = (\n None\n if layers[7] == 0\n else self._make_layer(\n BasicBlock,\n channels[7],\n layers[7],\n dilation=1,\n new_level=False,\n residual=False,\n BatchNorm=BatchNorm,\n )\n )\n elif arch == \"D\":\n self.layer7 = (\n None\n if layers[6] == 0\n else self._make_conv_layers(\n channels[6], layers[6], dilation=2, BatchNorm=BatchNorm\n )\n )\n self.layer8 = (\n None\n if layers[7] == 0\n else self._make_conv_layers(\n channels[7], layers[7], dilation=1, BatchNorm=BatchNorm\n )\n )\n\n self._init_weight()\n\n def _init_weight(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2.0 / n))\n elif isinstance(m, SynchronizedBatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n\n def _make_layer(\n self,\n block,\n planes,\n blocks,\n stride=1,\n dilation=1,\n new_level=True,\n residual=True,\n BatchNorm=None,\n ):\n assert dilation == 1 or dilation % 2 == 0\n downsample = None\n if stride != 1 or self.inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n nn.Conv2d(\n self.inplanes,\n planes * block.expansion,\n kernel_size=1,\n stride=stride,\n bias=False,\n ),\n BatchNorm(planes * block.expansion),\n )\n\n layers = list()\n layers.append(\n block(\n self.inplanes,\n planes,\n stride,\n downsample,\n dilation=(1, 1)\n if dilation == 1\n else (dilation // 2 if new_level else dilation, dilation),\n residual=residual,\n BatchNorm=BatchNorm,\n )\n )\n self.inplanes = planes * block.expansion\n for i in range(1, blocks):\n layers.append(\n block(\n self.inplanes,\n planes,\n residual=residual,\n dilation=(dilation, dilation),\n BatchNorm=BatchNorm,\n )\n )\n\n return nn.Sequential(*layers)\n\n def _make_conv_layers(self, channels, convs, stride=1, dilation=1, BatchNorm=None):\n modules = []\n for i in range(convs):\n modules.extend(\n [\n nn.Conv2d(\n self.inplanes,\n channels,\n kernel_size=3,\n stride=stride if i == 0 else 1,\n padding=dilation,\n bias=False,\n dilation=dilation,\n ),\n BatchNorm(channels),\n nn.ReLU(inplace=True),\n ]\n )\n self.inplanes = channels\n return nn.Sequential(*modules)\n\n def forward(self, x):\n if self.arch == \"C\":\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n elif self.arch == \"D\":\n x = self.layer0(x)\n\n x = self.layer1(x)\n x = self.layer2(x)\n\n x = self.layer3(x)\n low_level_feat = x\n\n x = self.layer4(x)\n x = self.layer5(x)\n\n if self.layer6 is not None:\n x = self.layer6(x)\n\n if self.layer7 is not None:\n x = self.layer7(x)\n\n if self.layer8 is not None:\n x = self.layer8(x)\n\n return x, low_level_feat\n\n\nclass DRN_A(nn.Module):\n def __init__(self, block, layers, BatchNorm=None):\n self.inplanes = 64\n super(DRN_A, self).__init__()\n self.out_dim = 512 * block.expansion\n self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)\n self.bn1 = BatchNorm(64)\n self.relu = nn.ReLU(inplace=True)\n self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n self.layer1 = self._make_layer(block, 64, layers[0], BatchNorm=BatchNorm)\n self.layer2 = self._make_layer(\n block, 128, layers[1], stride=2, BatchNorm=BatchNorm\n )\n self.layer3 = self._make_layer(\n block, 256, layers[2], stride=1, dilation=2, BatchNorm=BatchNorm\n )\n self.layer4 = self._make_layer(\n block, 512, layers[3], stride=1, dilation=4, BatchNorm=BatchNorm\n )\n\n self._init_weight()\n\n def _init_weight(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2.0 / n))\n elif isinstance(m, SynchronizedBatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n\n def _make_layer(self, block, planes, blocks, stride=1, dilation=1, BatchNorm=None):\n downsample = None\n if stride != 1 or self.inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n nn.Conv2d(\n self.inplanes,\n planes * block.expansion,\n kernel_size=1,\n stride=stride,\n bias=False,\n ),\n BatchNorm(planes * block.expansion),\n )\n\n layers = []\n layers.append(\n block(self.inplanes, planes, stride, downsample, BatchNorm=BatchNorm)\n )\n self.inplanes = planes * block.expansion\n for i in range(1, blocks):\n layers.append(\n block(\n self.inplanes,\n planes,\n dilation=(dilation, dilation),\n BatchNorm=BatchNorm,\n )\n )\n\n return nn.Sequential(*layers)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n x = self.maxpool(x)\n\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n\n return x\n\n\ndef drn_a_50(BatchNorm, pretrained=True):\n model = DRN_A(Bottleneck, [3, 4, 6, 3], BatchNorm=BatchNorm)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls[\"resnet50\"]))\n return model\n\n\ndef drn_c_26(BatchNorm, pretrained=True):\n model = DRN(BasicBlock, [1, 1, 2, 2, 2, 2, 1, 1], arch=\"C\", BatchNorm=BatchNorm)\n if pretrained:\n pretrained = model_zoo.load_url(model_urls[\"drn-c-26\"])\n del pretrained[\"fc.weight\"]\n del pretrained[\"fc.bias\"]\n model.load_state_dict(pretrained)\n return model\n\n\ndef drn_c_42(BatchNorm, pretrained=True):\n model = DRN(BasicBlock, [1, 1, 3, 4, 6, 3, 1, 1], arch=\"C\", BatchNorm=BatchNorm)\n if pretrained:\n pretrained = model_zoo.load_url(model_urls[\"drn-c-42\"])\n del pretrained[\"fc.weight\"]\n del pretrained[\"fc.bias\"]\n model.load_state_dict(pretrained)\n return model\n\n\ndef drn_c_58(BatchNorm, pretrained=True):\n model = DRN(Bottleneck, [1, 1, 3, 4, 6, 3, 1, 1], arch=\"C\", BatchNorm=BatchNorm)\n if pretrained:\n pretrained = model_zoo.load_url(model_urls[\"drn-c-58\"])\n del pretrained[\"fc.weight\"]\n del pretrained[\"fc.bias\"]\n model.load_state_dict(pretrained)\n return model\n\n\ndef drn_d_22(BatchNorm, pretrained=True):\n model = DRN(BasicBlock, [1, 1, 2, 2, 2, 2, 1, 1], arch=\"D\", BatchNorm=BatchNorm)\n if pretrained:\n pretrained = model_zoo.load_url(model_urls[\"drn-d-22\"])\n del pretrained[\"fc.weight\"]\n del pretrained[\"fc.bias\"]\n model.load_state_dict(pretrained)\n return model\n\n\ndef drn_d_24(BatchNorm, pretrained=True):\n model = DRN(BasicBlock, [1, 1, 2, 2, 2, 2, 2, 2], arch=\"D\", BatchNorm=BatchNorm)\n if pretrained:\n pretrained = model_zoo.load_url(model_urls[\"drn-d-24\"])\n del pretrained[\"fc.weight\"]\n del pretrained[\"fc.bias\"]\n model.load_state_dict(pretrained)\n return model\n\n\ndef drn_d_38(BatchNorm, pretrained=True):\n model = DRN(BasicBlock, [1, 1, 3, 4, 6, 3, 1, 1], arch=\"D\", BatchNorm=BatchNorm)\n if pretrained:\n pretrained = model_zoo.load_url(model_urls[\"drn-d-38\"])\n del pretrained[\"fc.weight\"]\n del pretrained[\"fc.bias\"]\n model.load_state_dict(pretrained)\n return model\n\n\ndef drn_d_40(BatchNorm, pretrained=True):\n model = DRN(BasicBlock, [1, 1, 3, 4, 6, 3, 2, 2], arch=\"D\", BatchNorm=BatchNorm)\n if pretrained:\n pretrained = model_zoo.load_url(model_urls[\"drn-d-40\"])\n del pretrained[\"fc.weight\"]\n del pretrained[\"fc.bias\"]\n model.load_state_dict(pretrained)\n return model\n\n\ndef drn_d_54(BatchNorm, pretrained=True):\n model = DRN(Bottleneck, [1, 1, 3, 4, 6, 3, 1, 1], arch=\"D\", BatchNorm=BatchNorm)\n if pretrained:\n pretrained = model_zoo.load_url(model_urls[\"drn-d-54\"])\n del pretrained[\"fc.weight\"]\n del pretrained[\"fc.bias\"]\n model.load_state_dict(pretrained)\n return model\n\n\ndef drn_d_105(BatchNorm, pretrained=True):\n model = DRN(Bottleneck, [1, 1, 3, 4, 23, 3, 1, 1], arch=\"D\", BatchNorm=BatchNorm)\n if pretrained:\n pretrained = model_zoo.load_url(model_urls[\"drn-d-105\"])\n del pretrained[\"fc.weight\"]\n del pretrained[\"fc.bias\"]\n model.load_state_dict(pretrained)\n return model\n\n\nif __name__ == \"__main__\":\n import torch\n\n model = drn_a_50(BatchNorm=nn.BatchNorm2d, pretrained=True)\n input = torch.rand(1, 3, 512, 512)\n output, low_level_feat = model(input)\n print(output.size())\n print(low_level_feat.size())\n"
] | [
[
"torch.cuda.synchronize",
"numpy.expand_dims",
"torch.cuda.nvtx.range_pop",
"numpy.asarray",
"torch.cuda.current_stream",
"torch.from_numpy",
"torch.tensor",
"torch.no_grad",
"torch.cuda.nvtx.range_push",
"torch.cuda.stream",
"torch.distributed.all_reduce",
"torch.cuda.Stream",
"torch.cuda.cudart"
],
[
"tensorflow.image.resize_image_with_crop_or_pad",
"tensorflow.TensorShape",
"tensorflow.transpose",
"tensorflow.image.random_flip_left_right",
"tensorflow.constant",
"tensorflow.FixedLenFeature",
"tensorflow.data.TFRecordDataset",
"tensorflow.decode_raw",
"tensorflow.reshape",
"tensorflow.cast",
"tensorflow.random_crop",
"tensorflow.contrib.data.map_and_batch",
"tensorflow.contrib.data.shuffle_and_repeat"
],
[
"torch.nn.Sequential",
"torch.nn.Conv2d",
"torch.nn.MaxPool2d",
"torch.rand",
"torch.nn.ReLU",
"torch.utils.model_zoo.load_url"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
pygongnlp/gramcorrector | [
"1b5b7f46f7185675b46341e40b2a866fd6d1d7ad",
"1b5b7f46f7185675b46341e40b2a866fd6d1d7ad"
] | [
"sec/test.py",
"sged/train.py"
] | [
"import argparse\nimport torch\nimport os\n\nfrom transformers import AutoTokenizer, AutoModelForMaskedLM\nfrom utils import load_data, write_to_file\nfrom metric import compute_metrics\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--model_name_or_path\", default=\"model/chinese_bert\", type=str)\n parser.add_argument(\"--save_path\", default=\"./\", type=str)\n parser.add_argument(\"--test_file\", default=\"data/sighan/test.json\", type=str)\n args = parser.parse_args()\n\n assert os.path.exists(args.save_path)\n\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path)\n model = AutoModelForMaskedLM.from_pretrained(args.model_name_or_path)\n\n checkpoint = torch.load(os.path.join(args.save_path, \"model.tar\"), map_location=device)\n model.load_state_dict(checkpoint[\"model_state_dict\"])\n model = model.to(device)\n\n src, trg = load_data(file_path=args.test_file, mode=\"test\")\n\n results = []\n for s, t in zip(src, trg):\n inputs = tokenizer(t, return_tensors=\"pt\")\n inputs = inputs.to(device)\n outputs = model(**inputs)\n\n logits = outputs.logits[0][1:-1] #filter [CLS] & [SEP]\n predict = tokenizer.convert_ids_to_tokens(logits.argmax(-1).tolist())\n\n s_tok = tokenizer.tokenize(s)\n t_tok = tokenizer.tokenize(t)\n assert len(s_tok) == len(t_tok) == len(predict)\n results.append([s_tok, t_tok, predict])\n\n metrics = compute_metrics(results)\n print(f\"{', '.join([f'{key}={value:.4f}' for key, value in metrics.items()])}\")\n\n write_to_file(file_path=os.path.join(args.save_path, \"result_test.json\"), results=results)\n print(f\"write to {os.path.join(args.save_path, 'result_test.json')}\")\n\n\n\n\n\n\n\n\n\n",
"import argparse\nimport time\nimport os\nfrom tqdm import tqdm\n\nimport torch\nimport torch.optim as optim\nimport torch.nn as nn\nfrom torch.utils.data import DataLoader, BatchSampler, RandomSampler, DistributedSampler\n\nfrom transformers import AutoConfig, AutoTokenizer, AutoModelForSequenceClassification\nfrom utils import set_seed, compute_model_size, load_data, write_to_file, epoch_time, label2id, id2label\nfrom data import SGEDDataset\nfrom metric import compute_acc\nfrom collactor import DataCollactorForSGED\n\n\ndef train(model, data_loader, optimizer, id2label, device, step=500):\n model.train()\n\n epoch_loss = 0\n results = []\n for i, (src, labels, src_oral) in enumerate(data_loader):\n src[\"labels\"] = labels\n src = src.to(device)\n\n optimizer.zero_grad()\n outputs = model(**src)\n\n loss = outputs.loss\n loss.backward()\n epoch_loss += loss.item()\n\n optimizer.step()\n\n predictions = outputs.logits.argmax(-1).tolist()\n labels = labels.tolist()\n for s, label, predict in zip(src_oral, labels, predictions):\n label, predict = id2label[label], id2label[predict]\n results.append([s, label, predict])\n\n if (i + 1) % step == 0:\n acc = compute_acc(results)\n print(f\"Step {i + 1}, loss={epoch_loss / (i + 1):.4f}, acc={acc:.4f}\")\n\n return epoch_loss / len(data_loader), compute_acc(results)\n\n\ndef valid(model, data_loader, id2label, device, step=500):\n model.eval()\n\n epoch_loss = 0\n results = []\n with torch.no_grad():\n for i, (src, labels, src_oral) in enumerate(data_loader):\n src[\"labels\"] = labels\n src = src.to(device)\n outputs = model(**src)\n\n loss = outputs.loss\n epoch_loss += loss.item()\n\n predictions = outputs.logits.argmax(-1).tolist()\n labels = labels.tolist()\n for s, label, predict in zip(src_oral, labels, predictions):\n label, predict = id2label[label], id2label[predict]\n results.append([s, label, predict])\n\n if (i + 1) % step == 0:\n acc = compute_acc(results)\n print(f\"Step {i + 1}, loss={epoch_loss / (i + 1):.4f}, acc={acc:.4f}\")\n\n return epoch_loss / len(data_loader), compute_acc(results), results\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(\"Sentence-level Grammatical Error Detection\")\n parser.add_argument(\"--model_name_or_path\", default=\"bert-base-chinese\", type=str)\n parser.add_argument(\"--train_file\", default=\"data/sighan/train.json\", type=str)\n parser.add_argument(\"--valid_file\", default=\"data/sighan/dev.json\", type=str)\n parser.add_argument(\"--save_path\", default=\"sged/checkpoints/bert\", type=str)\n parser.add_argument(\"--train_batch_size\", default=2, type=int)\n parser.add_argument(\"--valid_batch_size\", default=2, type=int)\n parser.add_argument(\"--max_length\", default=512, type=int)\n parser.add_argument(\"--seed\", default=42, type=int)\n parser.add_argument(\"--epochs\", default=100, type=int)\n parser.add_argument(\"--patience\", default=3, type=int)\n parser.add_argument(\"--step\", default=500, type=int)\n parser.add_argument(\"--lr\", default=3e-5, type=float)\n parser.add_argument(\"--weight_decay\", default=0.0, type=float)\n args = parser.parse_args()\n print(f\"Params={args}\")\n\n set_seed(args.seed)\n\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n if not os.path.exists(args.save_path):\n os.makedirs(args.save_path)\n\n tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path)\n model = AutoModelForSequenceClassification.from_pretrained(args.model_name_or_path, num_labels=len(label2id))\n model = model.to(device)\n compute_model_size(model)\n\n train_dataset = SGEDDataset(file_path=args.train_file, mode=\"train\")\n valid_dataset = SGEDDataset(file_path=args.valid_file, mode=\"valid\")\n\n collactor = DataCollactorForSGED(tokenizer=tokenizer, max_length=args.max_length, label2id=label2id)\n train_dataloader = DataLoader(train_dataset, shuffle=True, batch_size=args.train_batch_size, collate_fn=collactor)\n valid_dataloader = DataLoader(valid_dataset, shuffle=False, batch_size=args.valid_batch_size, collate_fn=collactor)\n\n no_decay = [\"bias\", \"LayerNorm.weight\"]\n optimizer_grouped_parameters = [\n {\n \"params\": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],\n \"weight_decay\": args.weight_decay,\n },\n {\n \"params\": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],\n \"weight_decay\": 0.0,\n },\n ]\n optimizer = optim.AdamW(optimizer_grouped_parameters, lr=args.lr)\n\n patience = 0\n print(\"Start valid before training...\")\n valid_loss, valid_acc, _ = valid(model, valid_dataloader, id2label, device, args.step)\n store_metrics = {\n \"valid_acc\": valid_acc\n }\n print(f\"Before training, valid_loss={valid_loss:.4f}, valid_acc={valid_acc:.4f}\")\n\n all_start_time = time.time()\n for epoch in range(args.epochs):\n print(f\"Start train {epoch + 1}th epochs\")\n start_time = time.time()\n train_loss, train_acc = train(model, train_dataloader, optimizer, id2label, device, args.step)\n end_time = time.time()\n epoch_mins, epoch_secs = epoch_time(start_time, end_time)\n print(f\"Epoch {epoch + 1}th: time={epoch_mins}m{epoch_secs}s, \"\n f\"train_loss={train_loss:.4f}, train_acc={train_acc:.4f}\")\n\n print(f\"Start valid {epoch + 1}th epochs\")\n start_time = time.time()\n valid_loss, valid_acc, results = valid(model, valid_dataloader, id2label, device, args.step)\n end_time = time.time()\n epoch_mins, epoch_secs = epoch_time(start_time, end_time)\n print(f\"Epoch {epoch + 1}th: time={epoch_mins}m{epoch_secs}s, \"\n f\"valid_loss={valid_loss:.4f}, valid_acc={valid_acc:.4f}\")\n\n if valid_acc > store_metrics[\"valid_acc\"]:\n store_metrics[\"valid_acc\"] = valid_acc\n store_metrics[\"train_acc\"] = train_acc\n patience = 0\n\n torch.save({\n \"config\": args,\n \"epoch\": epoch + 1,\n \"model_state_dict\": model.state_dict(),\n \"valid_acc\": valid_acc,\n \"train_acc\": train_acc,\n \"train_loss\": train_loss,\n \"valid_loss\": valid_loss,\n \"label2id\": label2id\n }, os.path.join(args.save_path, \"model.tar\"))\n print(f\"save model to {args.save_path}\")\n\n write_to_file(os.path.join(args.save_path, \"result_valid.json\"), results)\n print(f\"write result to {os.path.join(args.save_path, 'result_valid.json')}\")\n else:\n patience += 1\n print(f\"patience up to {patience}\")\n\n if patience == args.patience:\n all_end_time = time.time()\n epoch_mins, epoch_secs = epoch_time(all_start_time, all_end_time)\n print(\"Training Over!\")\n print(f\"All time={epoch_mins}m{epoch_secs}s\")\n print(f\"Best train_acc={store_metrics['train_acc']:.4f}, valid_acc={store_metrics['valid_acc']:.4f}\")\n break\n\n if patience < args.patience:\n all_end_time = time.time()\n epoch_mins, epoch_secs = epoch_time(all_start_time, all_end_time)\n print(\"Training Over!\")\n print(f\"All time={epoch_mins}m{epoch_secs}s\")\n print(f\"Best train_acc={store_metrics['train_acc']:.4f}, valid_acc={store_metrics['valid_acc']:.4f}\")\n\n\n\n\n\n\n\n\n\n"
] | [
[
"torch.cuda.is_available"
],
[
"torch.no_grad",
"torch.optim.AdamW",
"torch.utils.data.DataLoader",
"torch.cuda.is_available"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
NKPmedia/rising | [
"2a580e9c74c8fb690e27e8bacf09ab97184ab1ee",
"2a580e9c74c8fb690e27e8bacf09ab97184ab1ee",
"2a580e9c74c8fb690e27e8bacf09ab97184ab1ee"
] | [
"rising/transforms/spatial.py",
"tests/transforms/test_crop.py",
"tests/transforms/functional/test_intensity.py"
] | [
"# from __future__ import annotations\nimport torch\n\nfrom itertools import combinations\nfrom typing import Union, Sequence, Callable, Optional\nfrom torch.multiprocessing import Value\n\nfrom rising.random import AbstractParameter, DiscreteParameter\nfrom rising.transforms.abstract import AbstractTransform, BaseTransform\nfrom rising.transforms.functional.spatial import *\n\n\n__all__ = [\"Mirror\", \"Rot90\", \"ResizeNative\",\n \"Zoom\", \"ProgressiveResize\", \"SizeStepScheduler\"]\n\nscheduler_type = Callable[[int], Union[int, Sequence[int]]]\n\n\nclass Mirror(BaseTransform):\n \"\"\"Random mirror transform\"\"\"\n\n def __init__(self,\n dims: Union[int, DiscreteParameter,\n Sequence[Union[int, DiscreteParameter]]],\n keys: Sequence[str] = ('data',), grad: bool = False, **kwargs):\n \"\"\"\n Args:\n dims: axes which should be mirrored\n keys: keys which should be mirrored\n prob: probability for mirror. If float value is provided,\n it is used for all dims\n grad: enable gradient computation inside transformation\n **kwargs: keyword arguments passed to superclass\n\n Examples:\n >>> # Use mirror transform for augmentations\n >>> from rising.random import DiscreteCombinationsParameter\n >>> # We sample from all possible mirror combination for\n >>> # volumetric data\n >>> trafo = Mirror(DiscreteCombinationsParameter((0, 1, 2)))\n \"\"\"\n super().__init__(augment_fn=mirror, dims=dims, keys=keys, grad=grad,\n property_names=('dims',), **kwargs)\n\n\nclass Rot90(AbstractTransform):\n \"\"\"Rotate 90 degree around dims\"\"\"\n\n def __init__(self, dims: Union[Sequence[int], DiscreteParameter],\n keys: Sequence[str] = ('data',),\n num_rots: Sequence[int] = (0, 1, 2, 3),\n prob: float = 0.5, grad: bool = False, **kwargs):\n \"\"\"\n Args:\n dims: dims/axis ro rotate. If more than two dims are\n provided, 2 dimensions are randomly chosen at each call\n keys: keys which should be rotated\n num_rots: possible values for number of rotations\n prob: probability for rotation\n grad: enable gradient computation inside transformation\n kwargs: keyword arguments passed to superclass\n\n See Also:\n :func:`torch.Tensor.rot90`\n \"\"\"\n super().__init__(grad=grad, **kwargs)\n self.keys = keys\n self.prob = prob\n if not isinstance(dims, DiscreteParameter):\n if len(dims) > 2:\n dims = list(combinations(dims, 2))\n else:\n dims = (dims,)\n dims = DiscreteParameter(dims)\n self.register_sampler(\"dims\", dims)\n self.register_sampler(\"num_rots\", DiscreteParameter(num_rots))\n\n def forward(self, **data) -> dict:\n \"\"\"\n Apply transformation\n\n Args:\n data: dict with tensors\n\n Returns:\n dict: dict with augmented data\n \"\"\"\n if torch.rand(1) < self.prob:\n num_rots = self.num_rots\n rand_dims = self.dims\n\n for key in self.keys:\n data[key] = rot90(data[key], k=num_rots, dims=rand_dims)\n return data\n\n\nclass ResizeNative(BaseTransform):\n \"\"\"Resize data to given size\"\"\"\n\n def __init__(self, size: Union[int, Sequence[int]], mode: str = 'nearest',\n align_corners: Optional[bool] = None, preserve_range: bool = False,\n keys: Sequence = ('data',), grad: bool = False, **kwargs):\n \"\"\"\n Args:\n size: spatial output size (excluding batch size and\n number of channels)\n mode: one of ``nearest``, ``linear``, ``bilinear``, ``bicubic``,\n ``trilinear``, ``area`` (for more inforamtion see\n :func:`torch.nn.functional.interpolate`)\n align_corners: input and output tensors are aligned by the center \\\n points of their corners pixels, preserving the values at the\n corner pixels.\n preserve_range: output tensor has same range as input tensor\n keys: keys which should be augmented\n grad: enable gradient computation inside transformation\n **kwargs: keyword arguments passed to augment_fn\n \"\"\"\n super().__init__(augment_fn=resize_native, size=size, mode=mode,\n align_corners=align_corners, preserve_range=preserve_range,\n keys=keys, grad=grad, **kwargs)\n\n\nclass Zoom(BaseTransform):\n \"\"\"Apply augment_fn to keys. By default the scaling factor is sampled\n from a uniform distribution with the range specified by\n :attr:`random_args`\n \"\"\"\n\n def __init__(self, scale_factor: Union[Sequence, AbstractParameter] = (0.75, 1.25),\n mode: str = 'nearest', align_corners: bool = None,\n preserve_range: bool = False, keys: Sequence = ('data',),\n grad: bool = False, **kwargs):\n \"\"\"\n Args:\n scale_factor: positional arguments passed for random function.\n If Sequence[Sequence] is provided, a random value for each item\n in the outer Sequence is generated. This can be used to set\n different ranges for different axis.\n mode: one of `nearest`, `linear`, `bilinear`,\n `bicubic`, `trilinear`, `area` (for more\n inforamtion see :func:`torch.nn.functional.interpolate`)\n align_corners: input and output tensors are aligned by the center\n points of their corners pixels, preserving the values at the\n corner pixels.\n preserve_range: output tensor has same range as input tensor\n keys: keys which should be augmented\n grad: enable gradient computation inside transformation\n **kwargs: keyword arguments passed to augment_fn\n\n See Also:\n :func:`random.uniform`, :func:`torch.nn.functional.interpolate`\n \"\"\"\n super().__init__(augment_fn=resize_native, scale_factor=scale_factor,\n mode=mode, align_corners=align_corners,\n preserve_range=preserve_range, keys=keys, grad=grad,\n property_names=('scale_factor',), **kwargs)\n\n\nclass ProgressiveResize(ResizeNative):\n \"\"\"Resize data to sizes specified by scheduler\"\"\"\n\n def __init__(self, scheduler: scheduler_type, mode: str = 'nearest',\n align_corners: bool = None, preserve_range: bool = False,\n keys: Sequence = ('data',), grad: bool = False, **kwargs):\n \"\"\"\n Args:\n scheduler: scheduler which determined the current size.\n The scheduler is called with the current iteration of the\n transform\n mode: one of ``nearest``, ``linear``, ``bilinear``, ``bicubic``,\n ``trilinear``, ``area`` (for more inforamtion see\n :func:`torch.nn.functional.interpolate`)\n align_corners: input and output tensors are aligned by the center\n points of their corners pixels, preserving the values at the\n corner pixels.\n preserve_range: output tensor has same range as input tensor\n keys: keys which should be augmented\n grad: enable gradient computation inside transformation\n **kwargs: keyword arguments passed to augment_fn\n\n Warnings:\n When this transformations is used in combination with\n multiprocessing, the step counter is not perfectly synchronized\n between multiple processes.\n As a result the step count my jump between values\n in a range of the number of processes used.\n \"\"\"\n super().__init__(size=0, mode=mode, align_corners=align_corners,\n preserve_range=preserve_range,\n keys=keys, grad=grad, **kwargs)\n self.scheduler = scheduler\n self._step = Value('i', 0)\n\n def reset_step(self) -> ResizeNative:\n \"\"\"\n Reset step to 0\n\n Returns:\n ResizeNative: returns self to allow chaining\n \"\"\"\n with self._step.get_lock():\n self._step.value = 0\n return self\n\n def increment(self) -> ResizeNative:\n \"\"\"\n Increment step by 1\n\n Returns:\n ResizeNative: returns self to allow chaining\n \"\"\"\n with self._step.get_lock():\n self._step.value += 1\n return self\n\n @property\n def step(self) -> int:\n \"\"\"\n Current step\n\n Returns:\n int: number of steps\n \"\"\"\n return self._step.value\n\n def forward(self, **data) -> dict:\n \"\"\"\n Resize data\n\n Args:\n **data: input batch\n\n Returns:\n dict: augmented batch\n \"\"\"\n self.kwargs[\"size\"] = self.scheduler(self.step)\n self.increment()\n return super().forward(**data)\n\n\nclass SizeStepScheduler:\n \"\"\"Scheduler return size when milestone is reached\"\"\"\n\n def __init__(self, milestones: Sequence[int],\n sizes: Union[Sequence[int], Sequence[Sequence[int]]]):\n \"\"\"\n Args:\n milestones: contains number of iterations where size should be changed\n sizes: sizes corresponding to milestones\n \"\"\"\n if len(milestones) != len(sizes) - 1:\n raise TypeError(\"Sizes must include initial size and thus \"\n \"has one element more than miltstones.\")\n self.targets = sorted(zip((0, *milestones), sizes), key=lambda x: x[0], reverse=True)\n\n def __call__(self, step) -> Union[int, Sequence[int], Sequence[Sequence[int]]]:\n \"\"\"\n Return size with regard to milestones\n\n Args:\n step: current step\n\n Returns:\n Union[int, Sequence[int], Sequence[Sequence[int]]]: current size\n \"\"\"\n for t in self.targets:\n if step >= t[0]:\n return t[1]\n return self.targets[-1][1]\n",
"import unittest\nimport torch\nimport random\n\nfrom rising.random import DiscreteParameter\nfrom rising.transforms.crop import *\nfrom rising.transforms.functional.crop import random_crop, center_crop\n\n\nclass TestCrop(unittest.TestCase):\n def setUp(self) -> None:\n data = torch.zeros(1, 1, 10, 10)\n data[:, :, 4:7, 4:7] = 1\n self.batch = {\"data\": data, \"seg\": data.clone()}\n\n def test_center_crop_transform(self):\n for s in range(1, 10):\n trafo = CenterCrop(s, keys=(\"data\", \"seg\"))\n crop = trafo(**self.batch)\n\n expected = center_crop(self.batch[\"data\"], s)\n\n self.assertTrue(expected.allclose(crop[\"data\"]))\n self.assertTrue(expected.allclose(crop[\"seg\"]))\n self.assertTrue(all([_s == s for _s in crop[\"data\"].shape[2:]]))\n self.assertTrue(all([_s == s for _s in crop[\"seg\"].shape[2:]]))\n\n def test_random_crop_transform(self):\n for s in range(1, 10):\n torch.manual_seed(s)\n trafo = RandomCrop(s, keys=(\"data\", \"seg\"))\n crop = trafo(**self.batch)\n\n random.seed(0)\n _ = random.choices([0]) # internally sample size in transform\n _ = random.choices([0]) # internally sample dist in transform\n torch.manual_seed(s) # seed random_crop\n expected = random_crop(self.batch[\"data\"], size=s)\n\n self.assertTrue(expected.allclose(crop[\"data\"]))\n self.assertTrue(expected.allclose(crop[\"seg\"]))\n self.assertTrue(all([_s == s for _s in crop[\"data\"].shape[2:]]))\n self.assertTrue(all([_s == s for _s in crop[\"seg\"].shape[2:]]))\n\n def test_center_crop_random_size_transform(self):\n for _ in range(10):\n random.seed(0)\n trafo = CenterCrop(DiscreteParameter([3, 4, 5, 6, 7, 8]))\n crop = trafo(**self.batch)[\"data\"]\n\n random.seed(0)\n s = random.randrange(3, 8)\n expected = center_crop(self.batch[\"data\"], s)\n\n self.assertTrue((crop == expected).all())\n self.assertTrue(all([_s == s for _s in crop.shape[2:]]))\n\n def test_center_crop_random_size_2_transform(self):\n for _ in range(10):\n random.seed(0)\n trafo = CenterCrop([DiscreteParameter([3, 4, 5]),\n DiscreteParameter([6, 7, 8])])\n crop = trafo(**self.batch)[\"data\"]\n\n random.seed(0)\n s = (random.randrange(3, 5), random.randrange(6, 8))\n expected = center_crop(self.batch[\"data\"], s)\n\n self.assertTrue((crop == expected).all())\n self.assertSequenceEqual(crop.shape[2:], s)\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"import unittest\nimport torch\nfrom math import isclose\n\nfrom rising.transforms.functional.intensity import *\n\n\nclass TestIntensityFunctional(unittest.TestCase):\n def setUp(self) -> None:\n self.batch_2d = torch.rand(3, 3)[None, None]\n\n def test_norm_range(self):\n inp = (self.batch_2d[0] * 10) + 1\n outp = norm_range(inp, 2, 3, per_channel=False)\n\n self.assertEqual(outp.min().item(), 2)\n self.assertEqual(outp.max().item(), 3)\n\n def test_norm_range_per_channel(self):\n inp = (self.batch_2d[0] * 10) + 1\n outp = norm_range(inp, 2, 3, per_channel=True)\n\n for c in range(inp.shape[0]):\n self.assertEqual(outp[c].min().item(), 2)\n self.assertEqual(outp[c].max().item(), 3)\n\n def test_norm_min_max(self):\n inp = (self.batch_2d[0] * 10) + 1\n outp = norm_min_max(inp, per_channel=False)\n\n self.assertEqual(outp.min().item(), 0)\n self.assertEqual(outp.max().item(), 1)\n\n def test_norm_min_max_zeros(self):\n outp = norm_min_max(torch.zeros(1, 1, 32, 32), per_channel=False)\n\n self.assertTrue(isclose(outp.min().item(), 0, abs_tol=1e-06))\n\n def test_norm_min_max_per_channels(self):\n inp = (self.batch_2d[0] * 10) + 1\n outp = norm_min_max(inp, per_channel=True)\n\n for c in range(inp.shape[0]):\n self.assertEqual(outp[c].min().item(), 0)\n self.assertEqual(outp[c].max().item(), 1)\n\n def test_zero_mean_unit_std(self):\n inp = (self.batch_2d[0] * 10) + 1\n outp = norm_zero_mean_unit_std(inp, per_channel=False)\n\n self.assertTrue(isclose(outp.mean().item(), 0, abs_tol=1e-06))\n self.assertTrue(isclose(outp.std().item(), 1, rel_tol=1e-06))\n\n def test_zero_mean_unit_std_zeros(self):\n outp = norm_zero_mean_unit_std(torch.zeros(1, 1, 32, 32), per_channel=False)\n\n self.assertTrue(isclose(outp.mean().item(), 0, abs_tol=1e-06))\n self.assertTrue(isclose(outp.min().item(), 0, abs_tol=1e-06))\n\n def test_zero_mean_unit_std_per_channel(self):\n inp = (self.batch_2d[0] * 10) + 1\n outp = norm_zero_mean_unit_std(inp, per_channel=True)\n\n for c in range(inp.shape[0]):\n self.assertTrue(isclose(outp[c].mean().item(), 0, abs_tol=1e-06))\n self.assertTrue(isclose(outp[c].std().item(), 1, rel_tol=1e-06))\n\n def test_mean_std(self):\n inp = (self.batch_2d[0] * 10) + 1\n outp = norm_mean_std(inp, inp.mean().item(), inp.std().item(), per_channel=False)\n\n self.assertTrue(isclose(outp.mean().item(), 0, abs_tol=1e-06))\n self.assertTrue(isclose(outp.std().item(), 1, rel_tol=1e-06))\n\n def test_mean_std_per_channel(self):\n inp = (self.batch_2d[0] * 10) + 1\n channel_mean = [inp[c].mean().item() for c in range(inp.shape[0])]\n channel_std = [inp[c].std().item() for c in range(inp.shape[0])]\n outp = norm_mean_std(inp, channel_mean, channel_std, per_channel=True)\n\n for c in range(inp.shape[0]):\n self.assertTrue(isclose(outp[c].mean().item(), 0, abs_tol=1e-06))\n self.assertTrue(isclose(outp[c].std().item(), 1, rel_tol=1e-06))\n\n def test_mean_std_per_channel_scalar(self):\n # TEST: add error sensitive test to check correct behavior\n inp = (self.batch_2d[0] * 10) + 1\n outp = norm_mean_std(inp, inp.mean().item(), inp.std().item(), per_channel=True)\n\n def test_add_noise(self):\n outp = add_noise(self.batch_2d.clone(), 'normal', mean=75, std=1)\n diff = (outp - self.batch_2d).abs().mean()\n self.assertTrue(diff > 50)\n\n def test_gamma_correction(self):\n outp = gamma_correction(self.batch_2d, 2)\n self.assertTrue((self.batch_2d.pow(2) == outp).all())\n\n def test_add_value(self):\n outp = add_value(self.batch_2d, 2)\n self.assertTrue((torch.add(self.batch_2d, 2) == outp).all())\n\n def test_scale_by_value(self):\n outp = scale_by_value(self.batch_2d, 2)\n self.assertTrue((torch.mul(self.batch_2d, 2) == outp).all())\n\n\nif __name__ == '__main__':\n unittest.main()\n"
] | [
[
"torch.multiprocessing.Value",
"torch.rand"
],
[
"torch.manual_seed",
"torch.zeros"
],
[
"torch.mul",
"torch.add",
"torch.rand",
"torch.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
sai6kiran/TwitterBotFarms | [
"cf6bfddda9fac1e27477186fd4f4b086ac711781"
] | [
"kCoreBots/CoreBotEN/MachineLearning/NaiveBayes/PythonScripts/NaiveBayesClassifier.py"
] | [
"from contractionsDict import contractionsDict\nimport pandas as pd\nimport time\nimport numpy as np\nimport re\nfrom pattern.en import pluralize, singularize\nimport sys\nimport csv\nfrom LemmitizationandStemConverter import ObtainStemAndLemmatizationWord\n\ndef priorProb(scv):\n\tpct = 0\t#positive count total\n\tnct = 0\t#negative count total\n\tNct = 0\t#neutral count total\n\tntt = 0\t#no. training tweets\n\tfor index, row in scv.items():\n\t\t#print(row)\n\t\tif(row.lower() == 'positive'):\n\t\t\tpct+=1\n\t\tif(row.lower() == 'negative'):\n\t\t\tnct+=1\n\t\tif(row.lower() == 'neutral'):\n\t\t\tNct+=1\n\t\tntt+=1\n\tpc1 = pct/ntt\t#Postive Class 1\n\tnc2 = nct/ntt\t#Negative Class 2\n\tnc3 = Nct/ntt\t#Neutral Class 3\n\treturn((pc1, nc2, nc3))\n\ndef removeEmojis(txt):\n\temoji_pattern = re.compile(u\"[^\\U00000000-\\U0000d7ff\\U0000e000-\\U0000ffff]\", flags=re.UNICODE)\n\treturn(emoji_pattern.sub(u' ', txt))\n\ndef expandContractions(s, contractionsDict=contractionsDict):\n\tcontractionsRe = re.compile('(%s)' % '|'.join(contractionsDict.keys()))\n\tdef replace(match):\n\t\treturn contractionsDict[match.group(0)]\n\treturn contractionsRe.sub(replace, s)\n\ndef CleanUp(text):\n\t#Removes links from tweet:\n\ttext = re.sub('http://\\S+|https://\\S+', ' ', text)\n\n\t#Remove #, _, -, and @ from tweet:\n\ttext = text.replace(\"#\", \" \").replace(\"_\", \" \").replace(\"@\", \" \").replace(\"-\", \" \")\n\n\t#Replace ? with questionmark and ! with exclaimationmark:\n\ttext = text.replace(\"?\", \" questionmark\").replace(\"!\", \" exclaimationmark\")\n\n\t#Remove all other non alphanumeric special characters from tweet:\n\ttext = re.sub('\\W+ ',' ', text)\n\n\t#Removes whitespaces from tweet:\n\ttext = text.replace(\"\\t\", \" \").replace(\"\\n\", \" \")\n\ttext = re.sub(r' {2,}' , ' ', text)\n\n\t#Removes emojis from tweet:\n\ttext = removeEmojis(text)\n\n\treturn text\n\n\ndef likelihoodFunctionInformation(txt, ldf):\n\ttsv = 0\t#Total Sentiment Value\n\tnpw = 0\t\t#No. of positive words\n\tnnw = 0\t\t#No. negative words\n\tnNw = 0\t\t#No. of neutral words\n\n\tpsv = 0\t\t#Previous Word sentiment value\n\tnac = False\t#Negative conjuctive Adverb check\n\twrd = \" \"\t#Word to parse\n\tt3 = time.time()\n\tfor ewt in txt.split():\n\n\t\t#Check for all versions of word in Sentiment Dictionary:\n\t\t#print(ewt)\n\t\t#t1 = time.time()\n\t\tsll = ObtainStemAndLemmatizationWord(ewt) #Obtaining the noun version and root version of word using the function.\n\t\t#print(sll)\n\t\tif(sll[0]!=ewt):\n\t\t\tif(bool(sll[0] and sll[0].strip())==True):\t#Checing if the noun part of the word is in the Sentiment Dictionary.\n\t\t\t\tsnw = singularize(sll[0]) #Noun part of word in singular tense.\n\t\t\t\tpnw = pluralize(sll[0]) #Noun part of word in plural tense.\n\t\t\t\tsrw = singularize(sll[1]) #Root part of word in singular tense.\n\t\t\t\tprw = pluralize(sll[1]) #Root part of word in plural tense.\n\t\t\t\t#Check if singular part of noun of word is in the Sentiment Dictionary:\n\t\t\t\tif((snw in ldf[0].word.values) or (snw in ldf[1].word.values) or (snw in ldf[2].word.values) or (snw in ldf[3].word.values)):\n\t\t\t\t\twrd = snw\n\t\t\t\t#Check if plural part of noun of word is in the Sentiment Dictionary:\n\t\t\t\telif((pnw in ldf[0].word.values) or (pnw in ldf[1].word.values) or (pnw in ldf[2].word.values) or (pnw in ldf[3].word.values)):\n\t\t\t\t\twrd = pnw\n\t\t\t\t#Check if singular part of root of word is in the Sentiment Dictionary:\n\t\t\t\telif((srw in ldf[0].word.values) or (srw in ldf[1].word.values) or (srw in ldf[2].word.values) or (srw in ldf[3].word.values)):\n\t\t\t\t\twrd = srw\n\t\t\t\t#Check if plural part of root of word is in the Sentiment Dictionary:\n\t\t\t\telif((prw in ldf[0].word.values) or (prw in ldf[1].word.values) or (prw in ldf[2].word.values) or (prw in ldf[3].word.values)):\n\t\t\t\t\twrd = prw\n\t\t\t\telse:\n\t\t\t\t\twrd = ewt\n\t\t\telif(sll[1]!=ewt):\t#Checking if the root version of the word is in the Sentiment Dictionary.\n\t\t\t\tsrw = singularize(sll[1]) #Root part of word in singular tense.\n\t\t\t\tprw = pluralize(sll[1]) #Root part of word in plural tense.\n\t\t\t\t#Check if singular part of root of word is in the Sentiment Dictionary:\n\t\t\t\tif((srw in ldf[0].word.values) or (srw in ldf[1].word.values) or (srw in ldf[2].word.values) or (srw in ldf[3].word.values)):\n\t\t\t\t\twrd = srw\n\t\t\t\t#Check if plural part of root of word is in the Sentiment Dictionary:\n\t\t\t\telif((prw in ldf[0].word.values) or (prw in ldf[1].word.values) or (prw in ldf[2].word.values) or (prw in ldf[3].word.values)):\n\t\t\t\t\twrd = prw\n\t\t\t\telse:\n\t\t\t\t\twrd = ewt\n\t\t\telse:\n\t\t\t\twrd = ewt\n\t\telse:\n\t\t\twrd = ewt\n\n\t\twrd = ewt\n\n\t\t#Run the Likelihood Function Information on the word.\n\t\twsv = 0\t#Word Sentiment Value\n\t\tsfw = singularize(wrd)\t#Singular Form of Word\n\t\tpfw = pluralize(wrd)\t#Plural Form of Word\n\t\t#print(wrd, tsv)\t#Very Important Print Statement for Debugging\n\n\t\t#Checking if word matches a negative conjuctive adverb that forms different phrases in the tweet:\n\t\tif wrd.lower()=='not' or wrd.lower()=='but' or wrd.lower()=='however' or wrd.lower()=='instead' or wrd.lower()=='otherwise' or wrd.lower()=='contrarily':\n\t\t\tif(nac==False):\n\t\t\t\tnac=True\n\t\t\telse:\n\t\t\t\tnac=False\n\t\tif(nac==False):\n\t\t\t#Checking if words match special words\n\t\t\tif sfw.lower()=='maga':\n\t\t\t\tnpw += 100\n\t\t\t\ttsv += 100\n\t\t\telif sfw.lower()=='makeamericagreatagain':\n\t\t\t\tnpw += 100\n\t\t\t\ttsv += 100\n\t\t\telif sfw.lower()=='make america great again':\n\t\t\t\tnpw += 100\n\t\t\t\ttsv += 100\n\t\t\telif \"email\" in sfw.lower():\n\t\t\t\tnnw += 5\n\t\t\t\ttsv -= 5\n\t\t\telif wrd.lower()=='questionmark':\n\t\t\t\tif(psv>0):\n\t\t\t\t\tnnw += 10\n\t\t\t\t\ttsv -= 10\n\t\t\t\tif(psv<0):\n\t\t\t\t\tnpw += 10\n\t\t\t\t\ttsv += 10\n\t\t\t\tpsv = 0\n\t\t\telif wrd.lower()=='exclaimationmark':\n\t\t\t\tif(psv<0):\n\t\t\t\t\tnnw += 10\n\t\t\t\t\ttsv -= 10\n\t\t\t\tif(psv>0):\n\t\t\t\t\tnpw += 10\n\t\t\t\t\ttsv += 10\n\t\t\t\tpsv = 0\n\n\t\t\t#Checking if word exists in the Sentiment Dictionary. Assign sentiment value and/or category if word exists. Otherwise categorize word as neutral.\n\t\t\telif sfw.lower() in ldf[0].word.values:\t#Check if singular version of word is in dataframe1\n\t\t\t\twsv = int(ldf[0].iloc[ldf[0]['word'].loc[lambda x: x==sfw.lower()].index.tolist()[0]].sentiment)\n\t\t\t\t#print(ewt, sfw, 1, wsv, tsv)\n\t\t\t\tif(wsv>0):\n\t\t\t\t\tnpw += 1\n\t\t\t\telif(wsv<0):\n\t\t\t\t\tnnw += 1\n\t\t\t\ttsv += wsv\n\t\t\t\tpsv = wsv\n\t\t\telif pfw.lower() in ldf[0].word.values:\t#Check if plural version of word is in dataframe1\n\t\t\t\twsv = int(ldf[0].iloc[ldf[0]['word'].loc[lambda x: x==pfw.lower()].index.tolist()[0]].sentiment)\n\t\t\t\t#print(ewt, pfw, 1, wsv, tsv)\n\t\t\t\tif(wsv>0):\n\t\t\t\t\tnpw += 1\n\t\t\t\telif(wsv<0):\n\t\t\t\t\tnnw += 1\n\t\t\t\ttsv += wsv\n\t\t\t\tpsv = wsv\n\t\t\telif sfw.lower() in ldf[1].word.values:\t#Check if singular version of word is in dataframe2\n\t\t\t\t#print(ewt, sfw, 2)\n\t\t\t\twsv = int(ldf[1].iloc[ldf[1]['word'].loc[lambda x: x==sfw.lower()].index.tolist()[0]].sentiment)\n\t\t\t\tif(wsv>0):\n\t\t\t\t\tnpw += 1\n\t\t\t\telif(wsv<0):\n\t\t\t\t\tnnw += 1\n\t\t\t\ttsv += wsv\n\t\t\t\tpsv = wsv\n\t\t\telif pfw.lower() in ldf[1].word.values:\t#Check if plural version of word is in dataframe2\n\t\t\t\t#print(ewt, pfw, 2)\n\t\t\t\twsv = int(ldf[1].iloc[ldf[1]['word'].loc[lambda x: x==pfw.lower()].index.tolist()[0]].sentiment)\n\t\t\t\tif(wsv>0):\n\t\t\t\t\tnpw += 1\n\t\t\t\telif(wsv<0):\n\t\t\t\t\tnnw += 1\n\t\t\t\ttsv += wsv\n\t\t\t\tpsv = wsv\n\t\t\telif sfw.lower() in ldf[2].word.values:\t#Check if singular version of word is in dataframe3\n\t\t\t\t#print(ewt, sfw, 3, tsv)\n\t\t\t\tnpw += 1\n\t\t\t\tpsv = 3\n\t\t\telif pfw.lower() in ldf[2].word.values:\t#Check if plural version of word is in dataframe3\n\t\t\t\t#print(ewt, pfw, 3, tsv)\n\t\t\t\tnpw += 1\n\t\t\t\tpsv = 3\n\t\t\telif sfw.lower() in ldf[3].word.values:\t#Check if singular version of word is in dataframe4\n\t\t\t\t#print(ewt, sfw, 4)\n\t\t\t\tnnw += 1\n\t\t\t\tpsv = -3\n\t\t\telif pfw.lower() in ldf[3].word.values:\t#Check if plural version of word is in dataframe4\n\t\t\t\t#print(ewt, pfw, 4)\n\t\t\t\tnnw += 1\n\t\t\t\tpsv = -3\n\t\t\telse:\t\t\t\t\t#The word must be a \"neutral\" word\n\t\t\t\t#print(wrd, sfw, pfw)\n\t\t\t\tnNw += 1\n\t\telse:\n\t\t\t#Checking if words match special words\n\t\t\tif sfw.lower()=='maga':\n\t\t\t\tnpw += 100\n\t\t\t\ttsv += 100\n\t\t\telif sfw.lower()=='makeamericagreatagain':\n\t\t\t\tnpw += 100\n\t\t\t\ttsv += 100\n\t\t\telif sfw.lower()=='make america great again':\n\t\t\t\tnpw += 100\n\t\t\t\ttsv += 100\n\t\t\telif \"email\" in sfw.lower():\n\t\t\t\tnnw += 5\n\t\t\t\ttsv -= 5\n\t\t\telif wrd.lower()=='questionmark':\n\t\t\t\tif(psv>0):\n\t\t\t\t\tnpw += 10\n\t\t\t\t\ttsv += 10\n\t\t\t\tif(psv<0):\n\t\t\t\t\tnnw += 10\n\t\t\t\t\ttsv -= 10\n\t\t\t\tpsv = 0\n\t\t\t\tnac==False\n\t\t\telif wrd.lower()=='exclaimationmark':\n\t\t\t\tif(psv<0):\n\t\t\t\t\tnpw += 10\n\t\t\t\t\ttsv += 10\n\t\t\t\tif(psv>0):\n\t\t\t\t\tnnw += 10\n\t\t\t\t\ttsv -= 10\n\t\t\t\tpsv = 0\n\t\t\t\tnac==False\n\n #Checking if word exists in the Sentiment Dictionary. Assign sentiment value and/or category if word exists. Otherwise categorize word as neutral.\n\t\t\telif sfw.lower() in ldf[0].word.values: #Check if singular version of word is in dataframe1\n\t\t\t\twsv = int(ldf[0].iloc[ldf[0]['word'].loc[lambda x: x==sfw.lower()].index.tolist()[0]].sentiment)\n\t\t\t\t#print(sfw, 1, wsv, tsv)\n\t\t\t\tif(wsv>0):\n\t\t\t\t\tnnw += 1\n\t\t\t\telif(wsv<0):\n\t\t\t\t\tnpw += 1\n\t\t\t\ttsv -= wsv\n\t\t\t\tpsv = -wsv\n\t\t\t\tnac=False\n\t\t\telif pfw.lower() in ldf[0].word.values: #Check if plural version of word is in dataframe1\n\t\t\t\twsv = int(ldf[0].iloc[ldf[0]['word'].loc[lambda x: x==pfw.lower()].index.tolist()[0]].sentiment)\n\t\t\t\t#print(pfw, 1, wsv, tsv)\n\t\t\t\tif(wsv>0):\n\t\t\t\t\tnnw += 1\n\t\t\t\telif(wsv<0):\n\t\t\t\t\tnpw += 1\n\t\t\t\ttsv -= wsv\n\t\t\t\tpsv = -wsv\n\t\t\t\tnac==False\n\t\t\telif pfw.lower() in ldf[0].word.values: #Check if plural version of word is in dataframe1\n\t\t\t\twsv = int(ldf[0].iloc[ldf[0]['word'].loc[lambda x: x==pfw.lower()].index.tolist()[0]].sentiment)\n\t\t\t\t#print(pfw, 1, wsv, tsv)\n\t\t\t\tif(wsv>0):\n\t\t\t\t\tnpw -= 1\n\t\t\t\telif(wsv<0):\n\t\t\t\t\tnnw -= 1\n\t\t\t\ttsv -= wsv\n\t\t\t\tpsv = -wsv\n\t\t\t\tnac==False\n\t\t\telif sfw.lower() in ldf[1].word.values: #Check if singular version of word is in dataframe2\n\t\t\t\t#print(sfw, 2)\n\t\t\t\twsv = int(ldf[1].iloc[ldf[1]['word'].loc[lambda x: x==sfw.lower()].index.tolist()[0]].sentiment)\n\t\t\t\tif(wsv>0):\n\t\t\t\t\tnnw += 1\n\t\t\t\telif(wsv<0):\n\t\t\t\t\tnpw += 1\n\t\t\t\ttsv -= wsv\n\t\t\t\tpsv = -wsv\n\t\t\t\tnac==False\n\t\t\telif pfw.lower() in ldf[1].word.values: #Check if plural version of word is in dataframe2\n\t\t\t\t#print(pfw, 2)\n\t\t\t\twsv = int(ldf[1].iloc[ldf[1]['word'].loc[lambda x: x==pfw.lower()].index.tolist()[0]].sentiment)\n\t\t\t\tif(wsv>0):\n\t\t\t\t\tnnw += 1\n\t\t\t\telif(wsv<0):\n\t\t\t\t\tnpw += 1\n\t\t\t\ttsv -= wsv\n\t\t\t\tpsv = -wsv\n\t\t\t\tnac==False\n\t\t\telif sfw.lower() in ldf[2].word.values: #Check if singular version of word is in dataframe3\n\t\t\t\t#print(sfw, 3, tsv)\n\t\t\t\tnnw += 1\n\t\t\t\tpsv = -3\n\t\t\t\tnac==False\n\t\t\telif pfw.lower() in ldf[2].word.values: #Check if plural version of word is in dataframe3\n\t\t\t\t#print(pfw, 3, tsv)\n\t\t\t\tnnw += 1\n\t\t\t\tpsv = -3\n\t\t\t\tnac==False\n\t\t\telif sfw.lower() in ldf[3].word.values: #Check if singular version of word is in dataframe4\n\t\t\t\t#print(sfw, 4)\n\t\t\t\tnpw += 1\n\t\t\t\tpsv = 3\n\t\t\t\tnac==False\n\t\t\telif pfw.lower() in ldf[3].word.values: #Check if plural version of word is in dataframe4\n\t\t\t\t#print(pfw, 4)\n\t\t\t\tnpw += 1\n\t\t\t\tpsv = 3\n\t\t\t\tnac==False\n\t\t\telse: #The word must be a \"neutral\" word\n\t\t\t\t#print(wrd, sfw, pfw)\n\t\t\t\tnNw += 1\n\t\t#t2 = time.time()\n\t\t#print(\"Amount of time taken to parse word: \" + str(t2-t1) + \"sec\")\n\n\tt4 = time.time()\n\tprint(\"Amount of time taken to parse tweet: \" + str(t4-t3) + \"sec\")\n\treturn(npw, nnw, nNw, tsv)\n\ndef NaiveBayes(txt, ppl, tov):\n\t#tov = likelihoodFunctionInformation(ctt, [df1, df2, df3, df4])\t#Obtain tuple of values required to calculate the Likelihood funnction and posterior probability\n\tpPp = ppl[0]\t#Positive class Prior Probability\n\tpnp = ppl[1]\t#Negative class Prior Probability\n\tpNp = ppl[2]\t#Neutral class Prior Probability\n\tnpw = tov[0]\t#No. of positive words\n\tnnw = tov[1]\t#No. of negative words\n\tnNw = tov[2]\t#No. of neutral words\n\ttsv = tov[3]\t#Total Sentiment Value\n\ttnw = npw + nnw + nNw\t#Total no. of words\n\tcls = \" \"\t#Defining the class which the text belongs to.\n\n\t#print(npw, nnw, nNw, tsv)\n\tif(npw==0 and nnw==0):\n\t\tcls = \"neutral\"\t#Class is set to Neutral\n\telse:\n\t\tif(tsv==0):\n\t\t\tden = (pPp*(1-np.exp(-1*((npw*5)/(tnw))))) + (pnp*(1-np.exp(-1*((nnw*5)/(tnw))))) + (pNp*(1-np.exp(-1*((nNw)/(tnw)))))\t#Calculate the denominator for the posterior probabilities\n\n\t\t\t#Posterior Probability of sentiment of text is positive given the text:\n\t\t\tppp = (pPp*(1-np.exp(-1*((npw*5)/(tnw)))))/(den)\n\t\t\t#print((1-np.exp(-1*(npw*10))))\n\t\t\t#print(ppp)\n\n\t\t\t#Posterior Probability of sentiment of text is negative given the text:\n\t\t\tnpp = (pnp*(1-np.exp(-1*((nnw*5)/(tnw)))))/(den)\n\t\t\t#print((1-np.exp(-1*(nnw*10))))\n\t\t\t#print(npp)\n\n\t\t\t#Posterior Probability of sentiment of text is neutral given the text:\n\t\t\tNpp = (pNp*(1-np.exp(-1*((nNw)/(tnw)))))/(den)\n\t\t\t#print((1-np.exp(-1*(nNw*10))))\n\t\t\t#print(Npp)\n\n\t\t\t#Determine the sentimentality of text:\n\t\t\tif(max([ppp,npp,Npp])==ppp):\n\t\t\t\tcls = \"positive\"\n\t\t\tif(max([ppp,npp,Npp])==npp):\n\t\t\t\tcls = \"negative\"\n\t\t\tif(max([ppp,npp,Npp])==Npp):\n\t\t\t\tcls = \"neutral\"\n\t\telif(tsv>0):\n\t\t\tden = (pPp*(1-np.exp(-1*((npw*5*tsv)/(tnw))))) + (pnp*(1-np.exp(-1*((nnw*5)/(tnw))))) + (pNp*(1-np.exp(-1*((nNw)/(tnw*1.45))))) #Calculate the denominator for the posterior probabilities.\n\n\t\t\t#Posterior Probability of sentiment of text is positive given the text:\n\t\t\tppp = (pPp*(1-np.exp(-1*((npw*5*tsv)/(tnw)))))/(den)\n\t\t\t#print((1-np.exp(-1*(npw*10))))\n\t\t\t#print(ppp)\n\n\t\t\t#Posterior Probability of sentiment of text is negative given the text:\n\t\t\tnpp = (pnp*(1-np.exp(-1*((nnw*5)/(tnw)))))/(den)\n\t\t\t#print((1-np.exp(-1*(nnw*10))))\n\t\t\t#print(npp)\n\n\t\t\t#Posterior Probability of sentiment of text is neutral given the text:\n\t\t\tNpp = (pNp*(1-np.exp(-1*((nNw)/(tnw*1.45)))))/(den)\n\t\t\t#print((1-np.exp(-1*(nNw*10))))\n\t\t\t#print(Npp)\n\n\t\t\t#Determine the sentimentality of text:\n\t\t\tif(max([ppp,npp,Npp])==ppp):\n\t\t\t\tcls = \"positive\"\n\t\t\tif(max([ppp,npp,Npp])==npp):\n\t\t\t\tcls = \"negative\"\n\t\t\tif(max([ppp,npp,Npp])==Npp):\n\t\t\t\tcls = \"neutral\"\n\t\telse:\n\t\t\tden = (pPp*(1-np.exp(-1*((npw*5)/(tnw))))) + (pnp*(1-np.exp(-1*((nnw*5*abs(tsv))/(tnw))))) + (pNp*(1-np.exp(-1*((nNw)/(tnw*1.45))))) #Calculate the denominator for the posterior probabilities.\n\n\t\t\t#Posterior Probability of sentiment of text is positive given the text:\n\t\t\tppp = (pPp*(1-np.exp(-1*((npw*5*tsv)/(tnw)))))/(den)\n\t\t\t#print((1-np.exp(-1*(npw*10))))\n\t\t\t#print(ppp)\n\n\t\t\t#Posterior Probability of sentiment of text is negative given the text:\n\t\t\tnpp = (pnp*(1-np.exp(-1*((nnw*5*abs(tsv))/(tnw)))))/(den)\n\t\t\t#print((1-np.exp(-1*(nnw*10))))\n\t\t\t#print(npp)\n\n\t\t\t#Posterior Probability of sentiment of text is neutral given the text:\n\t\t\tNpp = (pNp*(1-np.exp(-1*((nNw)/(tnw*1.45)))))/(den)\n\t\t\t#print((1-np.exp(-1*(nNw*10))))\n\t\t\t#print(Npp)\n\n\t\t\t#Determine the sentimentality of text:\n\t\t\tif(max([ppp,npp,Npp])==ppp):\n\t\t\t\tcls = \"positive\"\n\t\t\tif(max([ppp,npp,Npp])==npp):\n\t\t\t\tcls = \"negative\"\n\t\t\tif(max([ppp,npp,Npp])==Npp):\n\t\t\t\tcls = \"neutral\"\n\treturn cls\n\n#############Loading the Datasets:####################\npd.set_option(\"display.max_rows\", None, \"display.max_columns\", None)\n\n#Training Dataset:\ndft = pd.read_csv(\"/root/.encrypted/.pythonSai/kCoreBots/CoreBotEN/MachineLearning/NaiveBayes/datasets/trainingdataset.csv\", sep=\",\", skiprows=[0], header=None, usecols=[0,1], names=[\"tweet_text\",\"sentiment\"])\n\n#Testing Dataset:\ndfT = pd.read_csv(\"/root/.encrypted/.pythonSai/kCoreBots/CoreBotEN/MachineLearning/NaiveBayes/datasets/testingdataset.csv\", sep=\",\", skiprows=[0], header=None, usecols=[0,1], names=[\"tweet_text\",\"sentiment\"])\n\n#Sample Dataset:\ndfs = pd.read_csv(\"/root/.encrypted/.pythonSai/kCoreBots/CoreBotEN/MachineLearning/NaiveBayes/datasets/sampleDataset.csv\", sep=\",\", skiprows=[0], header=None, usecols=[0,1,2], names=[\"tweetid\", \"userid\", \"tweet_text\"])\n\n#Main Dataset:\ndfn = pd.read_csv(\"/root/.encrypted/.pythonSai/kCoreBots/CoreBotEN/MachineLearning/NaiveBayes/datasets/CoreBotTweetsCombinedEN.csv\", sep=\",\", skiprows=[0], header=None, usecols=[0,1,2], names=[\"tweetid\",\"userid\", \"tweet_text\"])\n\n#Sentiment Dataset 1:\ndf1 = pd.read_csv(\"/root/.encrypted/.pythonSai/kCoreBots/CoreBotEN/MachineLearning/NaiveBayes/datasets/SentimentDictionary/AFINN-111.txt\", sep=\"\\t\", header=None, usecols=[0,1], names=[\"word\",\"sentiment\"])\n\n#Sentiment Dataset 2:\ndf2 = pd.read_csv(\"/root/.encrypted/.pythonSai/kCoreBots/CoreBotEN/MachineLearning/NaiveBayes/datasets/SentimentDictionary/AFINN-96.txt\", sep=\"\\t\", header=None, usecols=[0,1], names=[\"word\",\"sentiment\"])\n\n#Sentiment Dataset 3 [Positive Words Only]:\ndf3 = pd.read_csv(\"/root/.encrypted/.pythonSai/kCoreBots/CoreBotEN/MachineLearning/NaiveBayes/datasets/SentimentDictionary/Positivewords.txt\", sep=\"\\n\", header=None, usecols=[0], names=[\"word\"])\n\n#Sentiment Dataset 4 [Negative Words Only]:\ndf4 = pd.read_csv(\"/root/.encrypted/.pythonSai/kCoreBots/CoreBotEN/MachineLearning/NaiveBayes/datasets/SentimentDictionary/Negativewords.txt\", sep=\"\\n\", header=None, usecols=[0], names=[\"word\"])\n\n#Dataset required to classify each tweet and its sentimentality to its corresponding bot:\ndfc = pd.DataFrame(columns=[\"tweetid\", \"userid\", \"tweet_candidate_class\", \"tweet_sentiment_class\"])\n\n\n#############Running the Naive Bayesian Classifer:####################\n\n#Obtain the list of Prior Probabilities obtained from Training Dataset:\ntts = dft[\"sentiment\"].count()\t#Total no. of Training Sentiment values.\ntTs = dfT[\"sentiment\"].count()\t#Total no. of Testing sentiment values.\n#Append all the Testing sentiment values with the Training sentiment values to obtain a complete list of sentiments used as priorProbabalities for classification of all political tweets sent by \"CoreBotTweetsCombinedEN.csv\".\nfor i in range(tts, tts+tTs):\n\tdft[\"sentiment\"][i] = dfT[\"sentiment\"][i-tts]\nppl = priorProb(dft.sentiment)\n\nloc = []\t#List of classes for each text row in the dataframe.\n#Dictionary that stores lists used to calculate demographic statistics below:\npbd = {} #Political Bot Dictionary. I.e. Dictionary of all twitter bots that tweeted, replied to, or retweeted political comments that affected the 2016 elections. The key represents the bot's userid. The value is a list of class types it belongs to. i.e. Value = [\"Trump\", \"positive\", \"ProTrump\"].\n\nfor index, row in dfn.iterrows():\n\t#print(CleanUp(expandContractions(row[\"tweet_text\"].replace(\"’\", \"'\"))))\n\tctt = CleanUp(expandContractions(row[\"tweet_text\"].replace(\"’\", \"'\")))\t#Cleaned Tweet\n\tcot = NaiveBayes(ctt, ppl, likelihoodFunctionInformation(ctt, [df1, df2, df3, df4]))\n\t#print(cot)\n\tloc.append(cot)\n\ntnr = 0\t#Total No. of right words.\nmcp = 0\t#MisClassification percentage.\ntap = 0\t#Total Accuracy percentage.\n\nnpt = 0\t#No. of positive Trump tweets.\nnnt = 0\t#No. of negative Trump tweets.\nnNt = 0\t#No. of neutral Trump tweets.\nnpc = 0\t#No. of positive Clinton tweets.\nnnc = 0\t#No. of negative Clinton tweets.\nnNc = 0\t#No. of neutral Clinton tweets.\nngt = 0\t#No. of general tweets. [i.e. Not Trump or Hillary].\ntht = False\t#Is the tweet a Trump or Hillary tweet?\ntcc = \" \"\t#Setting the tweet candidate class [i.e. Trump, Hillary, Neutral] for the classification below.\ntsc = \" \"\t#Setting the tweet sentiment class [i.e. Positive, Negative, Neutral] for the classification below.\ntoc = \" \"\t#Setting the tweet overall class. [i.e. ProTrump, AntiClinton, etc;] for the classification below.\n\n#t=\"RT @Trumpocrats: @TallahForTrump @tariqnasheed I'm beside myself by his hate for America and how we have done so much to free an entire rac...\"\n#print(t)\n#print(\"Actual Sentiment: \" + \"negative\")\n#print(\"Calculated Sentiment: \" + str(cot))\n\n\nfor i in range(0,len(loc)):\n\t#Recording no. of correct tweets:\n\t#print(dfn.iloc[i].tweet_text)\n\t#print(\"Actual Sentiment: \" + dft.iloc[i].sentiment)\n\t#print(\"Calculated Sentiment: \" + loc[i])\n\t'''\n\tif(loc[i].lower()==dft.iloc[i].sentiment.lower()):\n\t\ttnr += 1\t#Use to calculate accuracy of classifier; Not for running entire algorithm\n\t'''\n\t#Classification of Tweets to Trump, Hillary or Neutral:\n\tif(\"trump\" in dfn.iloc[i].tweet_text.lower() or \"donald\" in dfn.iloc[i].tweet_text.lower()):\n\t\ttht = True\n\t\tif((\"email\" in dfn.iloc[i].tweet_text.lower()) or (\"makeamericagreatagain\" in dfn.iloc[i].tweet_text.lower()) or (\"make america great again\" in dfn.iloc[i].tweet_text.lower()) or (\"maga\" in dfn.iloc[i].tweet_text.lower()) or (\"russia\" in dfn.iloc[i].tweet_text.lower())):\n\t\t\tnpt += 1\n\t\t\ttcc = \"Trump\"\n\t\t\ttsc = \"Positive\"\n\t\t\ttoc = \"ProTrump\"\n\t\telse:\n\t\t\tif(loc[i]==\"positive\"):\n\t\t\t\tnpt += 1\n\t\t\t\ttcc = \"Trump\"\n\t\t\t\ttsc = \"Positive\"\n\t\t\t\ttoc = \"ProTrump\"\n\t\t\tif(loc[i]==\"negative\"):\n\t\t\t\tnnt += 1\n\t\t\t\ttcc = \"Trump\"\n\t\t\t\ttsc = \"Negative\"\n\t\t\t\ttoc = \"AntiTrump\"\n\t\t\tif(loc[i]==\"neutral\"):\n\t\t\t\tnNt += 1\n\t\t\t\ttcc = \"Trump\"\n\t\t\t\ttsc = \"Neutral\"\n\t\t\t\ttoc = \"Neutral\"\n\n\tif(\"clinton\" in dfn.iloc[i].tweet_text.lower() or \"hillary\" in dfn.iloc[i].tweet_text.lower()):\n\t\ttht = True\n\t\tif((\"email\" in dfn.iloc[i].tweet_text.lower()) or (\"makeamericagreatagain\" in dfn.iloc[i].tweet_text.lower()) or (\"make america great again\" in dfn.iloc[i].tweet_text.lower()) or (\"maga\" in dfn.iloc[i].tweet_text.lower()) or (\"russia\" in dfn.iloc[i].tweet_text.lower())):\n\t\t\tnnc += 1\n\t\t\ttcc = \"Clinton\"\n\t\t\ttsc = \"Negative\"\n\t\t\ttoc = \"AntiClinton\"\n\t\telse:\n\t\t\tif(loc[i]==\"positive\"):\n\t\t\t\tnpc += 1\n\t\t\t\ttcc = \"Clinton\"\n\t\t\t\ttsc = \"Positive\"\n\t\t\t\ttoc = \"ProClinton\"\n\t\t\tif(loc[i]==\"negative\"):\n\t\t\t\ttcc = \"Clinton\"\n\t\t\t\ttsc = \"Negative\"\n\t\t\t\ttoc = \"AntiClinton\"\n\t\t\t\tnnc += 1\n\t\t\tif(loc[i]==\"neutral\"):\n\t\t\t\ttcc = \"Clinton\"\n\t\t\t\ttsc = \"Neutral\"\n\t\t\t\ttoc = \"Neutral\"\n\t\t\t\tnNc += 1\n\tif(tht==False):\n\t\tngt += 1\n\t\ttcc = \"Neutral\"\n\t\ttsc = \"Neutral\"\n\t\ttoc = \"Neutral\"\n\ttht = False\n\n\n\t#############Information required to classify each tweet and its sentimentality to its corresponding bot:#########################\n\tfsn=\"/root/.encrypted/.pythonSai/kCoreBots/CoreBotEN/MachineLearning/NaiveBayes/CoreBotsSentiment/Bot-\"+dfn.iloc[i].userid+\"-EN.csv\"\n\n\t#Assign Values to our political Bot Dictionary defined above:\n\ttmp = [tcc, tsc, toc]\t#Temporary List\n\n\tif(dfn.iloc[i].userid in pbd.keys()):\n\t\tif(tmp not in pbd[dfn.iloc[i].userid]):\n\t\t\ttvl = dfn.iloc[i].userid\t#temporary value\n\t\t\tpbd[tvl]=pbd[tvl]+[tmp]\n\telse:\n\t\tpbd[dfn.iloc[i].userid] = [tmp]\n\t\n\t#Assign values to temporary dataset that will stream these values into the designated csv file.\n\tdfc.loc[i] = [dfn.iloc[i].tweetid, dfn.iloc[i].userid, tcc, tsc]\n\tdfc[[\"tweetid\", \"userid\",\"tweet_candidate_class\", \"tweet_sentiment_class\"]].to_csv(fsn, mode='a', sep=',', header=False, index=False)\n\n\t#Clear this temporary dataset for it to be useable in the next iteration.\n\tdfc = dfc.iloc[i:]\n\t\n\n#Printing our classification results:\nprint(\"******************Trump Sentimentality amongst bots:*******************\")\nprint(\"Total no. of positive Trump tweets = \" + str(npt))\nprint(\"Total no. of negative Trump tweets = \" + str(nnt))\nprint(\"Total no. of neutral Trump tweets = \" + str(nNt))\nprint(\"Total no. of Trump tweets = \"+ str(npt+nnt+nNt))\n\nprint(\"******************Clinton Sentimentality amongst bots:*****************\")\nprint(\"Total no. of positive Clinton tweets = \" + str(npc))\nprint(\"Total no. of negative Clinton tweets = \" + str(nnc))\nprint(\"Total no. of neutral Clinton tweets = \" + str(nNc))\nprint(\"Total no. of Clinton tweets = \"+ str(npc+nnc+nNc))\n\nprint(\"******************General Sentimentality amongst bots:*****************\")\nprint(\"Total no. of general [not candidate related] tweets = \" + str(ngt))\n\nprint(\"*****************General demographics of the bots:*********************\")\nnmc = 0\t#Total No. of bots that represent multiple classes. I.e. Have multiple sentiments or are targetting multiple candidates.\nnpn = 0\t#Total No. of bots that are both positive and negative in sentimentality.\nntc = 0\t#Total No. of bots that target both Trump and Clinton.\nnPtAc = 0\t#Total No. of bots that are Pro Trump and Anti Clinton.\nnPtAt = 0\t#Total No. of bots that are Pro Trump and Anti Trump.\nnAtPc = 0\t#Total No. of bots that are Anti Trump and Pro Clinton.\nnPcAc = 0\t#Total No. of bots that are Pro Clinton and Anti Clinton.\nnPtPc = 0\t#Total No. of bots that are Pro Trump and Pro Clinton.\nnAtAc = 0\t#Total No. of bots that are Anti Trump and Anti Clinton.\nfor key, val in pbd.items():\n\tif(len(val)>1):\n\t\tnmc += 1\n\tif(any(\"Positive\" in all for all in val) and any(\"Negative\" in all for all in val)):\n\t\tnpn += 1\n\tif(any(\"Trump\" in all for all in val) and any(\"Clinton\" in all for all in val)):\n ntc += 1\n\tif(any(\"ProTrump\" in all for all in val) and any(\"AntiClinton\" in all for all in val)):\n nPtAc += 1\n\tif(any(\"ProTrump\" in all for all in val) and any(\"AntiTrump\" in all for all in val)):\n nPtAt += 1\n\tif(any(\"AntiTrump\" in all for all in val) and any(\"ProClinton\" in all for all in val)):\n nAtPc += 1\n\tif(any(\"ProClinton\" in all for all in val) and any(\"AntiClinton\" in all for all in val)):\n nPcAc += 1\n\tif(any(\"ProTrump\" in all for all in val) and any(\"ProClinton\" in all for all in val)):\n nPtPc += 1\n\tif(any(\"AntiTrump\" in all for all in val) and any(\"AntiClinton\" in all for all in val)):\n nAtAc += 1\n\n#Oprint(pbd)\nprint(\"Total no. of bots that have multiple classes = \" +str(nmc))\nprint(\"Total no. of bots that are both positive and neagtive in sentimentality = \" +str(npn))\nprint(\"Total no. of bots that target both Trump and Hillary = \" +str(ntc))\nprint(\"Total no. of bots that are both ProTrump and AntiClinton = \" +str(nPtAc))\nprint(\"Total no. of bots that are both ProTrump and AntiTrump = \" +str(nPtAt))\nprint(\"Total no. of bots that are both AntiTrump and ProClinton = \" +str(nAtPc))\nprint(\"Total no. of bots that are both ProClinton and AntiClinton = \" +str(nPcAc))\nprint(\"Total no. of bots that are both ProTrump and ProClinton = \" +str(nPtPc))\nprint(\"Total no. of bots that are both AntiTrump and AntiClinton = \" +str(nAtAc))\n\n'''\n#Accuracy and Misclassification Rate of Classifier:\nprint(\"Accuracy Percentage of Classifier: \" + str((tnr/len(loc))*100) + \"%\")\nprint(\"Misclassification Percentage of Classifier: \" + str((1-(tnr/len(loc)))*100) + \"%\")\n'''\n"
] | [
[
"numpy.exp",
"pandas.set_option",
"pandas.read_csv",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
JonathanLehner/cassini_2021_nature_discoverer | [
"41e1e7ec01400d16bd34baf0763adce0383f3841"
] | [
"sentinel2_processing/georasteR_converter.py"
] | [
"\"\"\"\nnow using\nhttps://towardsdatascience.com/reading-and-visualizing-geotiff-images-with-python-8dcca7a74510\nhttps://github.com/GeoUtils/georaster/blob/master/georaster/georaster.py\nhttps://rasterio.readthedocs.io/en/latest/topics/color.html\n\"\"\"\n\nimport os\nimport pprint as pp\nimport time\nfrom datetime import datetime\nfrom os import listdir\nfrom os.path import join, isfile\n\nimport georaster\nfrom osgeo import gdal\nimport matplotlib.pyplot as plt\nimport wordninja\nfrom cleantext import clean\nfrom natsort import natsorted\nfrom tqdm import tqdm\n\ntif_dir_path = str(input(\"Enter path to folder with geotiff files -->\"))\n# -----------------------------------------------------------------\noutput_folder_name = \"georasteR_conversion\"\noutput_path_full = os.path.join(tif_dir_path, output_folder_name)\nif not os.path.isdir(output_path_full):\n os.mkdir(output_path_full)\n # make a place to store outputs if one does not exist\nprint(\"outputs will be in: \\n\", output_path_full)\n\n\n# -----------------------------------------------------------------\n\n\ndef cleantxt_wrap(ugly_text):\n # a wrapper for clean text with options different than default\n\n # https://pypi.org/project/clean-text/\n cleaned_text = clean(ugly_text,\n fix_unicode=True, # fix various unicode errors\n to_ascii=True, # transliterate to closest ASCII representation\n lower=True, # lowercase text\n no_line_breaks=True, # fully strip line breaks as opposed to only normalizing them\n no_urls=True, # replace all URLs with a special token\n no_emails=True, # replace all email addresses with a special token\n no_phone_numbers=True, # replace all phone numbers with a special token\n no_numbers=False, # replace all numbers with a special token\n no_digits=False, # replace all digits with a special token\n no_currency_symbols=True, # replace all currency symbols with a special token\n no_punct=True, # remove punctuations\n replace_with_punct=\"\", # instead of removing punctuations you may replace them\n replace_with_url=\"<URL>\",\n replace_with_email=\"<EMAIL>\",\n replace_with_phone_number=\"<PHONE>\",\n replace_with_number=\"<NUM>\",\n replace_with_digit=\"0\",\n replace_with_currency_symbol=\"<CUR>\",\n lang=\"en\" # set to 'de' for German special handling\n )\n\n return cleaned_text\n\n\ndef beautify_filename(filename, num_words=20, start_reverse=False,\n word_separator=\"_\"):\n # takes a filename stored as text, removes extension, separates into X words ...\n # and returns a nice filename with the words separateed by\n # useful for when you are reading files, doing things to them, and making new files\n\n filename = str(filename)\n index_file_Ext = filename.rfind('.')\n current_name = str(filename)[:index_file_Ext] # get rid of extension\n clean_name = cleantxt_wrap(current_name) # wrapper with custom defs\n file_words = wordninja.split(clean_name)\n # splits concatenated text into a list of words based on common word freq\n if len(file_words) <= num_words:\n num_words = len(file_words)\n\n if start_reverse:\n t_file_words = file_words[-num_words:]\n else:\n t_file_words = file_words[:num_words]\n\n pretty_name = word_separator.join(t_file_words) # see function argument\n\n # NOTE IT DOES NOT RETURN THE EXTENSION\n return pretty_name[: (len(pretty_name) - 1)] # there is a space always at the end, so -1\n\n\n# ----------------------------------------------------------------------------\n\ndef convert_tiff_to_png_georasters(input_path, output_path, verbose=False):\n # Use SingleBandRaster() if image has only one band\n img = georaster.MultiBandRaster(input_path)\n # img.r gives the raster in [height, width, band] format\n # band no. starts from 0\n plt.imshow(img.r[:, :, 2], interpolation='spline36')\n plt.title(os.path.basename(input_path))\n plt.savefig(output_path, bbox_inches='tight', dpi=200)\n\n if verbose:\n # For no. of bands and resolution\n gd_img = gdal.Open(input_path, gdal.GA_ReadOnly)\n print(\"\\n data on rasters from gdal:\")\n gd_img.RasterCount, gd_img.RasterXSize, gd_img.RasterYSize\n gd_img.GetStatistics(True, True)\n # stats about image\n img.GetStatistics(True, True)\n\n\n\n\n\n# ----------------------------------------------------------------------------\n\n\n# load files\nfiles_to_munch = natsorted([f for f in listdir(tif_dir_path) if isfile(os.path.join(tif_dir_path, f))])\ntotal_files_1 = len(files_to_munch)\nremoved_count_1 = 0\napproved_files = []\n# remove non-tif_image files\nfor prefile in files_to_munch:\n if prefile.endswith(\".tif\"):\n approved_files.append(prefile)\n else:\n files_to_munch.remove(prefile)\n removed_count_1 += 1\n\nprint(\"out of {0:3d} file(s) originally in the folder, \".format(total_files_1),\n \"{0:3d} non-tif_image files were removed\".format(removed_count_1))\nprint('\\n {0:3d} tif_image file(s) in folder will be transcribed.'.format(len(approved_files)))\npp.pprint(approved_files)\n\n# ----------------------------------------------------------------------------\n\n\n# loop\nst = time.time()\nfor tif_file in tqdm(approved_files, total=len(approved_files),\n desc=\"Resizing tif_images\"):\n index_pos = approved_files.index(tif_file)\n out_name = beautify_filename(tif_file) + \"converted_nr_{}_\".format(index_pos) + \".png\"\n this_input_path = join(tif_dir_path, tif_file)\n this_output_path = join(output_path_full, out_name)\n convert_tiff_to_png_georasters(this_input_path, this_output_path)\n\nrt = round((time.time() - st) / 60, 2)\nprint(\"\\n\\nfinished converting all tif_images - \", datetime.now())\nprint(\"Converted {} tif_images in {} minutes\".format(len(approved_files), rt))\nprint(\"they are located in: \\n\", output_path_full)\n"
] | [
[
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.savefig"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
imcwx/models | [
"523ff5d0d50c3181329e62509270d4d778734000"
] | [
"research/object_detection/core/preprocessor.py"
] | [
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Preprocess images and bounding boxes for detection.\n\nWe perform two sets of operations in preprocessing stage:\n(a) operations that are applied to both training and testing data,\n(b) operations that are applied only to training data for the purpose of\n data augmentation.\n\nA preprocessing function receives a set of inputs,\ne.g. an image and bounding boxes,\nperforms an operation on them, and returns them.\nSome examples are: randomly cropping the image, randomly mirroring the image,\n randomly changing the brightness, contrast, hue and\n randomly jittering the bounding boxes.\n\nThe preprocess function receives a tensor_dict which is a dictionary that maps\ndifferent field names to their tensors. For example,\ntensor_dict[fields.InputDataFields.image] holds the image tensor.\nThe image is a rank 4 tensor: [1, height, width, channels] with\ndtype=tf.float32. The groundtruth_boxes is a rank 2 tensor: [N, 4] where\nin each row there is a box with [ymin xmin ymax xmax].\nBoxes are in normalized coordinates meaning\ntheir coordinate values range in [0, 1]\n\nImportant Note: In tensor_dict, images is a rank 4 tensor, but preprocessing\nfunctions receive a rank 3 tensor for processing the image. Thus, inside the\npreprocess function we squeeze the image to become a rank 3 tensor and then\nwe pass it to the functions. At the end of the preprocess we expand the image\nback to rank 4.\n\"\"\"\n\nimport sys\nimport tensorflow as tf\nimport numpy as np\n\nfrom tensorflow.python.ops import control_flow_ops\n\nfrom object_detection.core import box_list\nfrom object_detection.core import box_list_ops\nfrom object_detection.core import keypoint_ops\nfrom object_detection.core import standard_fields as fields\n\n\ndef _apply_with_random_selector(x, func, num_cases):\n \"\"\"Computes func(x, sel), with sel sampled from [0...num_cases-1].\n\n Args:\n x: input Tensor.\n func: Python function to apply.\n num_cases: Python int32, number of cases to sample sel from.\n\n Returns:\n The result of func(x, sel), where func receives the value of the\n selector as a python integer, but sel is sampled dynamically.\n \"\"\"\n rand_sel = tf.random_uniform([], maxval=num_cases, dtype=tf.int32)\n # Pass the real x only to one of the func calls.\n return control_flow_ops.merge([func(\n control_flow_ops.switch(x, tf.equal(rand_sel, case))[1], case)\n for case in range(num_cases)])[0]\n\n\ndef _apply_with_random_selector_tuples(x, func, num_cases):\n \"\"\"Computes func(x, sel), with sel sampled from [0...num_cases-1].\n\n Args:\n x: A tuple of input tensors.\n func: Python function to apply.\n num_cases: Python int32, number of cases to sample sel from.\n\n Returns:\n The result of func(x, sel), where func receives the value of the\n selector as a python integer, but sel is sampled dynamically.\n \"\"\"\n num_inputs = len(x)\n rand_sel = tf.random_uniform([], maxval=num_cases, dtype=tf.int32)\n # Pass the real x only to one of the func calls.\n\n tuples = [list() for t in x]\n for case in range(num_cases):\n new_x = [control_flow_ops.switch(t, tf.equal(rand_sel, case))[1] for t in x]\n output = func(tuple(new_x), case)\n for j in range(num_inputs):\n tuples[j].append(output[j])\n\n for i in range(num_inputs):\n tuples[i] = control_flow_ops.merge(tuples[i])[0]\n return tuple(tuples)\n\n\ndef _random_integer(minval, maxval, seed):\n \"\"\"Returns a random 0-D tensor between minval and maxval.\n\n Args:\n minval: minimum value of the random tensor.\n maxval: maximum value of the random tensor.\n seed: random seed.\n\n Returns:\n A random 0-D tensor between minval and maxval.\n \"\"\"\n return tf.random_uniform(\n [], minval=minval, maxval=maxval, dtype=tf.int32, seed=seed)\n\n\ndef normalize_image(image, original_minval, original_maxval, target_minval,\n target_maxval):\n \"\"\"Normalizes pixel values in the image.\n\n Moves the pixel values from the current [original_minval, original_maxval]\n range to a the [target_minval, target_maxval] range.\n\n Args:\n image: rank 3 float32 tensor containing 1\n image -> [height, width, channels].\n original_minval: current image minimum value.\n original_maxval: current image maximum value.\n target_minval: target image minimum value.\n target_maxval: target image maximum value.\n\n Returns:\n image: image which is the same shape as input image.\n \"\"\"\n with tf.name_scope('NormalizeImage', values=[image]):\n original_minval = float(original_minval)\n original_maxval = float(original_maxval)\n target_minval = float(target_minval)\n target_maxval = float(target_maxval)\n image = tf.to_float(image)\n image = tf.subtract(image, original_minval)\n image = tf.multiply(image, (target_maxval - target_minval) /\n (original_maxval - original_minval))\n image = tf.add(image, target_minval)\n return image\n\n\ndef retain_boxes_above_threshold(boxes,\n labels,\n label_scores,\n masks=None,\n keypoints=None,\n threshold=0.0):\n \"\"\"Retains boxes whose label score is above a given threshold.\n\n If the label score for a box is missing (represented by NaN), the box is\n retained. The boxes that don't pass the threshold will not appear in the\n returned tensor.\n\n Args:\n boxes: float32 tensor of shape [num_instance, 4] representing boxes\n location in normalized coordinates.\n labels: rank 1 int32 tensor of shape [num_instance] containing the object\n classes.\n label_scores: float32 tensor of shape [num_instance] representing the\n score for each box.\n masks: (optional) rank 3 float32 tensor with shape\n [num_instances, height, width] containing instance masks. The masks are of\n the same height, width as the input `image`.\n keypoints: (optional) rank 3 float32 tensor with shape\n [num_instances, num_keypoints, 2]. The keypoints are in y-x normalized\n coordinates.\n threshold: scalar python float.\n\n Returns:\n retained_boxes: [num_retained_instance, 4]\n retianed_labels: [num_retained_instance]\n retained_label_scores: [num_retained_instance]\n\n If masks, or keypoints are not None, the function also returns:\n\n retained_masks: [num_retained_instance, height, width]\n retained_keypoints: [num_retained_instance, num_keypoints, 2]\n \"\"\"\n with tf.name_scope('RetainBoxesAboveThreshold',\n values=[boxes, labels, label_scores]):\n indices = tf.where(\n tf.logical_or(label_scores > threshold, tf.is_nan(label_scores)))\n indices = tf.squeeze(indices, axis=1)\n retained_boxes = tf.gather(boxes, indices)\n retained_labels = tf.gather(labels, indices)\n retained_label_scores = tf.gather(label_scores, indices)\n result = [retained_boxes, retained_labels, retained_label_scores]\n\n if masks is not None:\n retained_masks = tf.gather(masks, indices)\n result.append(retained_masks)\n\n if keypoints is not None:\n retained_keypoints = tf.gather(keypoints, indices)\n result.append(retained_keypoints)\n\n return result\n\n\ndef _flip_boxes_left_right(boxes):\n \"\"\"Left-right flip the boxes.\n\n Args:\n boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4].\n Boxes are in normalized form meaning their coordinates vary\n between [0, 1].\n Each row is in the form of [ymin, xmin, ymax, xmax].\n\n Returns:\n Flipped boxes.\n \"\"\"\n ymin, xmin, ymax, xmax = tf.split(value=boxes, num_or_size_splits=4, axis=1)\n flipped_xmin = tf.subtract(1.0, xmax)\n flipped_xmax = tf.subtract(1.0, xmin)\n flipped_boxes = tf.concat([ymin, flipped_xmin, ymax, flipped_xmax], 1)\n return flipped_boxes\n\n\ndef _flip_boxes_up_down(boxes):\n \"\"\"Up-down flip the boxes.\n\n Args:\n boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4].\n Boxes are in normalized form meaning their coordinates vary\n between [0, 1].\n Each row is in the form of [ymin, xmin, ymax, xmax].\n\n Returns:\n Flipped boxes.\n \"\"\"\n ymin, xmin, ymax, xmax = tf.split(value=boxes, num_or_size_splits=4, axis=1)\n flipped_ymin = tf.subtract(1.0, ymax)\n flipped_ymax = tf.subtract(1.0, ymin)\n flipped_boxes = tf.concat([flipped_ymin, xmin, flipped_ymax, xmax], 1)\n return flipped_boxes\n\n\ndef _rot90_boxes(boxes):\n \"\"\"Rotate boxes counter-clockwise by 90 degrees.\n\n Args:\n boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4].\n Boxes are in normalized form meaning their coordinates vary\n between [0, 1].\n Each row is in the form of [ymin, xmin, ymax, xmax].\n\n Returns:\n Rotated boxes.\n \"\"\"\n ymin, xmin, ymax, xmax = tf.split(value=boxes, num_or_size_splits=4, axis=1)\n rotated_ymin = tf.subtract(1.0, xmax)\n rotated_ymax = tf.subtract(1.0, xmin)\n rotated_xmin = ymin\n rotated_xmax = ymax\n rotated_boxes = tf.concat(\n [rotated_ymin, rotated_xmin, rotated_ymax, rotated_xmax], 1)\n return rotated_boxes\n\n\ndef _flip_masks_left_right(masks):\n \"\"\"Left-right flip masks.\n\n Args:\n masks: rank 3 float32 tensor with shape\n [num_instances, height, width] representing instance masks.\n\n Returns:\n flipped masks: rank 3 float32 tensor with shape\n [num_instances, height, width] representing instance masks.\n \"\"\"\n return masks[:, :, ::-1]\n\n\ndef _flip_masks_up_down(masks):\n \"\"\"Up-down flip masks.\n\n Args:\n masks: rank 3 float32 tensor with shape\n [num_instances, height, width] representing instance masks.\n\n Returns:\n flipped masks: rank 3 float32 tensor with shape\n [num_instances, height, width] representing instance masks.\n \"\"\"\n return masks[:, ::-1, :]\n\n\ndef _rot90_masks(masks):\n \"\"\"Rotate masks counter-clockwise by 90 degrees.\n\n Args:\n masks: rank 3 float32 tensor with shape\n [num_instances, height, width] representing instance masks.\n\n Returns:\n rotated masks: rank 3 float32 tensor with shape\n [num_instances, height, width] representing instance masks.\n \"\"\"\n masks = tf.transpose(masks, [0, 2, 1])\n return masks[:, ::-1, :]\n\n\ndef random_horizontal_flip(image,\n boxes=None,\n masks=None,\n keypoints=None,\n keypoint_flip_permutation=None,\n seed=None):\n \"\"\"Randomly flips the image and detections horizontally.\n\n The probability of flipping the image is 50%.\n\n Args:\n image: rank 3 float32 tensor with shape [height, width, channels].\n boxes: (optional) rank 2 float32 tensor with shape [N, 4]\n containing the bounding boxes.\n Boxes are in normalized form meaning their coordinates vary\n between [0, 1].\n Each row is in the form of [ymin, xmin, ymax, xmax].\n masks: (optional) rank 3 float32 tensor with shape\n [num_instances, height, width] containing instance masks. The masks\n are of the same height, width as the input `image`.\n keypoints: (optional) rank 3 float32 tensor with shape\n [num_instances, num_keypoints, 2]. The keypoints are in y-x\n normalized coordinates.\n keypoint_flip_permutation: rank 1 int32 tensor containing the keypoint flip\n permutation.\n seed: random seed\n\n Returns:\n image: image which is the same shape as input image.\n\n If boxes, masks, keypoints, and keypoint_flip_permutation are not None,\n the function also returns the following tensors.\n\n boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4].\n Boxes are in normalized form meaning their coordinates vary\n between [0, 1].\n masks: rank 3 float32 tensor with shape [num_instances, height, width]\n containing instance masks.\n keypoints: rank 3 float32 tensor with shape\n [num_instances, num_keypoints, 2]\n\n Raises:\n ValueError: if keypoints are provided but keypoint_flip_permutation is not.\n \"\"\"\n\n def _flip_image(image):\n # flip image\n image_flipped = tf.image.flip_left_right(image)\n return image_flipped\n\n if keypoints is not None and keypoint_flip_permutation is None:\n raise ValueError(\n 'keypoints are provided but keypoints_flip_permutation is not provided')\n\n with tf.name_scope('RandomHorizontalFlip', values=[image, boxes]):\n result = []\n # random variable defining whether to do flip or not\n do_a_flip_random = tf.greater(tf.random_uniform([], seed=seed), 0.5)\n\n # flip image\n image = tf.cond(do_a_flip_random, lambda: _flip_image(image), lambda: image)\n result.append(image)\n\n # flip boxes\n if boxes is not None:\n boxes = tf.cond(do_a_flip_random, lambda: _flip_boxes_left_right(boxes),\n lambda: boxes)\n result.append(boxes)\n\n # flip masks\n if masks is not None:\n masks = tf.cond(do_a_flip_random, lambda: _flip_masks_left_right(masks),\n lambda: masks)\n result.append(masks)\n\n # flip keypoints\n if keypoints is not None and keypoint_flip_permutation is not None:\n permutation = keypoint_flip_permutation\n keypoints = tf.cond(\n do_a_flip_random,\n lambda: keypoint_ops.flip_horizontal(keypoints, 0.5, permutation),\n lambda: keypoints)\n result.append(keypoints)\n\n return tuple(result)\n\n\ndef random_vertical_flip(image,\n boxes=None,\n masks=None,\n keypoints=None,\n keypoint_flip_permutation=None,\n seed=None):\n \"\"\"Randomly flips the image and detections vertically.\n\n The probability of flipping the image is 50%.\n\n Args:\n image: rank 3 float32 tensor with shape [height, width, channels].\n boxes: (optional) rank 2 float32 tensor with shape [N, 4]\n containing the bounding boxes.\n Boxes are in normalized form meaning their coordinates vary\n between [0, 1].\n Each row is in the form of [ymin, xmin, ymax, xmax].\n masks: (optional) rank 3 float32 tensor with shape\n [num_instances, height, width] containing instance masks. The masks\n are of the same height, width as the input `image`.\n keypoints: (optional) rank 3 float32 tensor with shape\n [num_instances, num_keypoints, 2]. The keypoints are in y-x\n normalized coordinates.\n keypoint_flip_permutation: rank 1 int32 tensor containing the keypoint flip\n permutation.\n seed: random seed\n\n Returns:\n image: image which is the same shape as input image.\n\n If boxes, masks, keypoints, and keypoint_flip_permutation are not None,\n the function also returns the following tensors.\n\n boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4].\n Boxes are in normalized form meaning their coordinates vary\n between [0, 1].\n masks: rank 3 float32 tensor with shape [num_instances, height, width]\n containing instance masks.\n keypoints: rank 3 float32 tensor with shape\n [num_instances, num_keypoints, 2]\n\n Raises:\n ValueError: if keypoints are provided but keypoint_flip_permutation is not.\n \"\"\"\n\n def _flip_image(image):\n # flip image\n image_flipped = tf.image.flip_up_down(image)\n return image_flipped\n\n if keypoints is not None and keypoint_flip_permutation is None:\n raise ValueError(\n 'keypoints are provided but keypoints_flip_permutation is not provided')\n\n with tf.name_scope('RandomVerticalFlip', values=[image, boxes]):\n result = []\n # random variable defining whether to do flip or not\n do_a_flip_random = tf.greater(tf.random_uniform([], seed=seed), 0.5)\n\n # flip image\n image = tf.cond(do_a_flip_random, lambda: _flip_image(image), lambda: image)\n result.append(image)\n\n # flip boxes\n if boxes is not None:\n boxes = tf.cond(do_a_flip_random, lambda: _flip_boxes_up_down(boxes),\n lambda: boxes)\n result.append(boxes)\n\n # flip masks\n if masks is not None:\n masks = tf.cond(do_a_flip_random, lambda: _flip_masks_up_down(masks),\n lambda: masks)\n result.append(masks)\n\n # flip keypoints\n if keypoints is not None and keypoint_flip_permutation is not None:\n permutation = keypoint_flip_permutation\n keypoints = tf.cond(\n do_a_flip_random,\n lambda: keypoint_ops.flip_vertical(keypoints, 0.5, permutation),\n lambda: keypoints)\n result.append(keypoints)\n\n return tuple(result)\n\n\ndef random_rotation90(image,\n boxes=None,\n masks=None,\n keypoints=None,\n seed=None):\n \"\"\"Randomly rotates the image and detections 90 degrees counter-clockwise.\n\n The probability of rotating the image is 50%. This can be combined with\n random_horizontal_flip and random_vertical_flip to produce an output with a\n uniform distribution of the eight possible 90 degree rotation / reflection\n combinations.\n\n Args:\n image: rank 3 float32 tensor with shape [height, width, channels].\n boxes: (optional) rank 2 float32 tensor with shape [N, 4]\n containing the bounding boxes.\n Boxes are in normalized form meaning their coordinates vary\n between [0, 1].\n Each row is in the form of [ymin, xmin, ymax, xmax].\n masks: (optional) rank 3 float32 tensor with shape\n [num_instances, height, width] containing instance masks. The masks\n are of the same height, width as the input `image`.\n keypoints: (optional) rank 3 float32 tensor with shape\n [num_instances, num_keypoints, 2]. The keypoints are in y-x\n normalized coordinates.\n seed: random seed\n\n Returns:\n image: image which is the same shape as input image.\n\n If boxes, masks, and keypoints, are not None,\n the function also returns the following tensors.\n\n boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4].\n Boxes are in normalized form meaning their coordinates vary\n between [0, 1].\n masks: rank 3 float32 tensor with shape [num_instances, height, width]\n containing instance masks.\n keypoints: rank 3 float32 tensor with shape\n [num_instances, num_keypoints, 2]\n \"\"\"\n\n def _rot90_image(image):\n # flip image\n image_rotated = tf.image.rot90(image)\n return image_rotated\n\n with tf.name_scope('RandomRotation90', values=[image, boxes]):\n result = []\n\n # random variable defining whether to rotate by 90 degrees or not\n do_a_rot90_random = tf.greater(tf.random_uniform([], seed=seed), 0.5)\n\n # flip image\n image = tf.cond(do_a_rot90_random, lambda: _rot90_image(image),\n lambda: image)\n result.append(image)\n\n # flip boxes\n if boxes is not None:\n boxes = tf.cond(do_a_rot90_random, lambda: _rot90_boxes(boxes),\n lambda: boxes)\n result.append(boxes)\n\n # flip masks\n if masks is not None:\n masks = tf.cond(do_a_rot90_random, lambda: _rot90_masks(masks),\n lambda: masks)\n result.append(masks)\n\n # flip keypoints\n if keypoints is not None:\n keypoints = tf.cond(\n do_a_rot90_random,\n lambda: keypoint_ops.rot90(keypoints),\n lambda: keypoints)\n result.append(keypoints)\n\n return tuple(result)\n\n\ndef random_pixel_value_scale(image, minval=0.9, maxval=1.1, seed=None):\n \"\"\"Scales each value in the pixels of the image.\n\n This function scales each pixel independent of the other ones.\n For each value in image tensor, draws a random number between\n minval and maxval and multiples the values with them.\n\n Args:\n image: rank 3 float32 tensor contains 1 image -> [height, width, channels]\n minval: lower ratio of scaling pixel values.\n maxval: upper ratio of scaling pixel values.\n seed: random seed.\n\n Returns:\n image: image which is the same shape as input image.\n \"\"\"\n with tf.name_scope('RandomPixelValueScale', values=[image]):\n image = tf.convert_to_tensor(image, name='image')\n # Remember original dtype to so we can convert back if needed\n orig_dtype = image.dtype\n image = tf.image.convert_image_dtype(image, tf.float32)\n color_coef = tf.random_uniform(\n tf.shape(image),\n minval=minval,\n maxval=maxval,\n dtype=tf.float32,\n seed=seed)\n image = tf.multiply(image, color_coef)\n return tf.image.convert_image_dtype(image, orig_dtype, saturate=True)\n\n return image\n\n\ndef random_image_scale(image,\n masks=None,\n min_scale_ratio=0.5,\n max_scale_ratio=2.0,\n seed=None):\n \"\"\"Scales the image size.\n\n Args:\n image: rank 3 float32 tensor contains 1 image -> [height, width, channels].\n masks: (optional) rank 3 float32 tensor containing masks with\n size [height, width, num_masks]. The value is set to None if there are no\n masks.\n min_scale_ratio: minimum scaling ratio.\n max_scale_ratio: maximum scaling ratio.\n seed: random seed.\n\n Returns:\n image: image which is the same rank as input image.\n masks: If masks is not none, resized masks which are the same rank as input\n masks will be returned.\n \"\"\"\n with tf.name_scope('RandomImageScale', values=[image]):\n result = []\n image_shape = tf.shape(image)\n image_height = image_shape[0]\n image_width = image_shape[1]\n size_coef = tf.random_uniform([],\n minval=min_scale_ratio,\n maxval=max_scale_ratio,\n dtype=tf.float32, seed=seed)\n image_newysize = tf.to_int32(\n tf.multiply(tf.to_float(image_height), size_coef))\n image_newxsize = tf.to_int32(\n tf.multiply(tf.to_float(image_width), size_coef))\n image = tf.image.resize_images(\n image, [image_newysize, image_newxsize], align_corners=True)\n result.append(image)\n if masks:\n masks = tf.image.resize_nearest_neighbor(\n masks, [image_newysize, image_newxsize], align_corners=True)\n result.append(masks)\n return tuple(result)\n\n\ndef random_rgb_to_gray(image, probability=0.1, seed=None):\n \"\"\"Changes the image from RGB to Grayscale with the given probability.\n\n Args:\n image: rank 3 float32 tensor contains 1 image -> [height, width, channels]\n probability: the probability of returning a grayscale image.\n The probability should be a number between [0, 1].\n seed: random seed.\n\n Returns:\n image: image which is the same shape as input image.\n \"\"\"\n def _image_to_gray(image):\n image_gray1 = tf.image.rgb_to_grayscale(image)\n image_gray3 = tf.image.grayscale_to_rgb(image_gray1)\n return image_gray3\n\n with tf.name_scope('RandomRGBtoGray', values=[image]):\n # random variable defining whether to do flip or not\n do_gray_random = tf.random_uniform([], seed=seed)\n\n image = tf.cond(\n tf.greater(do_gray_random, probability), lambda: image,\n lambda: _image_to_gray(image))\n\n return image\n\n\ndef random_adjust_brightness(image, max_delta=0.2):\n \"\"\"Randomly adjusts brightness.\n\n Args:\n image: rank 3 float32 tensor contains 1 image -> [height, width, channels]\n max_delta: how much to change the brightness. A value between [0, 1).\n\n Returns:\n image: image which is the same shape as input image.\n boxes: boxes which is the same shape as input boxes.\n \"\"\"\n with tf.name_scope('RandomAdjustBrightness', values=[image]):\n image = tf.image.random_brightness(image, max_delta)\n return image\n\n\ndef random_adjust_contrast(image, min_delta=0.8, max_delta=1.25):\n \"\"\"Randomly adjusts contrast.\n\n Args:\n image: rank 3 float32 tensor contains 1 image -> [height, width, channels]\n min_delta: see max_delta.\n max_delta: how much to change the contrast. Contrast will change with a\n value between min_delta and max_delta. This value will be\n multiplied to the current contrast of the image.\n\n Returns:\n image: image which is the same shape as input image.\n \"\"\"\n with tf.name_scope('RandomAdjustContrast', values=[image]):\n image = tf.image.random_contrast(image, min_delta, max_delta)\n return image\n\n\ndef random_adjust_hue(image, max_delta=0.02):\n \"\"\"Randomly adjusts hue.\n\n Args:\n image: rank 3 float32 tensor contains 1 image -> [height, width, channels]\n max_delta: change hue randomly with a value between 0 and max_delta.\n\n Returns:\n image: image which is the same shape as input image.\n \"\"\"\n with tf.name_scope('RandomAdjustHue', values=[image]):\n image = tf.image.random_hue(image, max_delta)\n return image\n\n\ndef random_adjust_saturation(image, min_delta=0.8, max_delta=1.25):\n \"\"\"Randomly adjusts saturation.\n\n Args:\n image: rank 3 float32 tensor contains 1 image -> [height, width, channels]\n min_delta: see max_delta.\n max_delta: how much to change the saturation. Saturation will change with a\n value between min_delta and max_delta. This value will be\n multiplied to the current saturation of the image.\n\n Returns:\n image: image which is the same shape as input image.\n \"\"\"\n with tf.name_scope('RandomAdjustSaturation', values=[image]):\n image = tf.image.random_saturation(image, min_delta, max_delta)\n return image\n\n\ndef random_distort_color(image, color_ordering=0):\n \"\"\"Randomly distorts color.\n\n Randomly distorts color using a combination of brightness, hue, contrast\n and saturation changes.\n\n Args:\n image: rank 3 float32 tensor contains 1 image -> [height, width, channels]\n color_ordering: Python int, a type of distortion (valid values: 0, 1).\n\n Returns:\n image: image which is the same shape as input image.\n\n Raises:\n ValueError: if color_ordering is not in {0, 1}.\n \"\"\"\n with tf.name_scope('RandomDistortColor', values=[image]):\n if color_ordering == 0:\n image = tf.image.random_brightness(image, max_delta=32. / 255.)\n image = tf.image.random_saturation(image, lower=0.5, upper=1.5)\n image = tf.image.random_hue(image, max_delta=0.2)\n image = tf.image.random_contrast(image, lower=0.5, upper=1.5)\n elif color_ordering == 1:\n image = tf.image.random_brightness(image, max_delta=32. / 255.)\n image = tf.image.random_contrast(image, lower=0.5, upper=1.5)\n image = tf.image.random_saturation(image, lower=0.5, upper=1.5)\n image = tf.image.random_hue(image, max_delta=0.2)\n else:\n raise ValueError('color_ordering must be in {0, 1}')\n\n return image\n\n\ndef random_jitter_boxes(boxes, ratio=0.05, seed=None):\n \"\"\"Randomly jitter boxes in image.\n\n Args:\n boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4].\n Boxes are in normalized form meaning their coordinates vary\n between [0, 1].\n Each row is in the form of [ymin, xmin, ymax, xmax].\n ratio: The ratio of the box width and height that the corners can jitter.\n For example if the width is 100 pixels and ratio is 0.05,\n the corners can jitter up to 5 pixels in the x direction.\n seed: random seed.\n\n Returns:\n boxes: boxes which is the same shape as input boxes.\n \"\"\"\n def random_jitter_box(box, ratio, seed):\n \"\"\"Randomly jitter box.\n\n Args:\n box: bounding box [1, 1, 4].\n ratio: max ratio between jittered box and original box,\n a number between [0, 0.5].\n seed: random seed.\n\n Returns:\n jittered_box: jittered box.\n \"\"\"\n rand_numbers = tf.random_uniform(\n [1, 1, 4], minval=-ratio, maxval=ratio, dtype=tf.float32, seed=seed)\n box_width = tf.subtract(box[0, 0, 3], box[0, 0, 1])\n box_height = tf.subtract(box[0, 0, 2], box[0, 0, 0])\n hw_coefs = tf.stack([box_height, box_width, box_height, box_width])\n hw_rand_coefs = tf.multiply(hw_coefs, rand_numbers)\n jittered_box = tf.add(box, hw_rand_coefs)\n jittered_box = tf.clip_by_value(jittered_box, 0.0, 1.0)\n return jittered_box\n\n with tf.name_scope('RandomJitterBoxes', values=[boxes]):\n # boxes are [N, 4]. Lets first make them [N, 1, 1, 4]\n boxes_shape = tf.shape(boxes)\n boxes = tf.expand_dims(boxes, 1)\n boxes = tf.expand_dims(boxes, 2)\n\n distorted_boxes = tf.map_fn(\n lambda x: random_jitter_box(x, ratio, seed), boxes, dtype=tf.float32)\n\n distorted_boxes = tf.reshape(distorted_boxes, boxes_shape)\n\n return distorted_boxes\n\n\ndef _strict_random_crop_image(image,\n boxes,\n labels,\n label_scores=None,\n masks=None,\n keypoints=None,\n min_object_covered=1.0,\n aspect_ratio_range=(0.75, 1.33),\n area_range=(0.1, 1.0),\n overlap_thresh=0.3):\n \"\"\"Performs random crop.\n\n Note: boxes will be clipped to the crop. Keypoint coordinates that are\n outside the crop will be set to NaN, which is consistent with the original\n keypoint encoding for non-existing keypoints. This function always crops\n the image and is supposed to be used by `random_crop_image` function which\n sometimes returns image unchanged.\n\n Args:\n image: rank 3 float32 tensor containing 1 image -> [height, width, channels]\n with pixel values varying between [0, 1].\n boxes: rank 2 float32 tensor containing the bounding boxes with shape\n [num_instances, 4].\n Boxes are in normalized form meaning their coordinates vary\n between [0, 1].\n Each row is in the form of [ymin, xmin, ymax, xmax].\n labels: rank 1 int32 tensor containing the object classes.\n label_scores: (optional) float32 tensor of shape [num_instances]\n representing the score for each box.\n masks: (optional) rank 3 float32 tensor with shape\n [num_instances, height, width] containing instance masks. The masks\n are of the same height, width as the input `image`.\n keypoints: (optional) rank 3 float32 tensor with shape\n [num_instances, num_keypoints, 2]. The keypoints are in y-x\n normalized coordinates.\n min_object_covered: the cropped image must cover at least this fraction of\n at least one of the input bounding boxes.\n aspect_ratio_range: allowed range for aspect ratio of cropped image.\n area_range: allowed range for area ratio between cropped image and the\n original image.\n overlap_thresh: minimum overlap thresh with new cropped\n image to keep the box.\n\n Returns:\n image: image which is the same rank as input image.\n boxes: boxes which is the same rank as input boxes.\n Boxes are in normalized form.\n labels: new labels.\n\n If label_scores, masks, or keypoints is not None, the function also returns:\n label_scores: rank 1 float32 tensor with shape [num_instances].\n masks: rank 3 float32 tensor with shape [num_instances, height, width]\n containing instance masks.\n keypoints: rank 3 float32 tensor with shape\n [num_instances, num_keypoints, 2]\n \"\"\"\n with tf.name_scope('RandomCropImage', values=[image, boxes]):\n image_shape = tf.shape(image)\n\n # boxes are [N, 4]. Lets first make them [N, 1, 4].\n boxes_expanded = tf.expand_dims(\n tf.clip_by_value(\n boxes, clip_value_min=0.0, clip_value_max=1.0), 1)\n\n sample_distorted_bounding_box = tf.image.sample_distorted_bounding_box(\n image_shape,\n bounding_boxes=boxes_expanded,\n min_object_covered=min_object_covered,\n aspect_ratio_range=aspect_ratio_range,\n area_range=area_range,\n max_attempts=100,\n use_image_if_no_bounding_boxes=True)\n\n im_box_begin, im_box_size, im_box = sample_distorted_bounding_box\n\n new_image = tf.slice(image, im_box_begin, im_box_size)\n new_image.set_shape([None, None, image.get_shape()[2]])\n\n # [1, 4]\n im_box_rank2 = tf.squeeze(im_box, squeeze_dims=[0])\n # [4]\n im_box_rank1 = tf.squeeze(im_box)\n\n boxlist = box_list.BoxList(boxes)\n boxlist.add_field('labels', labels)\n\n if label_scores is not None:\n boxlist.add_field('label_scores', label_scores)\n\n im_boxlist = box_list.BoxList(im_box_rank2)\n\n # remove boxes that are outside cropped image\n boxlist, inside_window_ids = box_list_ops.prune_completely_outside_window(\n boxlist, im_box_rank1)\n\n # remove boxes that are outside image\n # overlapping_boxlist, keep_ids = box_list_ops.prune_non_overlapping_boxes(\n # boxlist, im_boxlist, overlap_thresh)\n\n # remove boxes that are outside image AND GET BLACKBOXLIST\n overlapping_boxlist, keep_ids, black_boxlist = box_list_ops.prune_non_overlapping_boxes_custom(\n boxlist, im_boxlist, overlap_thresh)\n\n # change the coordinate of the remaining boxes\n new_labels = overlapping_boxlist.get_field('labels')\n new_boxlist = box_list_ops.change_coordinate_frame(overlapping_boxlist,\n im_box_rank1)\n ####################################################################\n\n # Change coordinate of boxes to be blacked\n black_boxlist = box_list_ops.change_coordinate_frame(black_boxlist,\n \t\t im_box_rank1)\n blackbox = black_boxlist.get()\n new_image = tf.expand_dims(new_image, 0)\n blackbox = tf.expand_dims(blackbox, 0)\n new_image = tf.image.draw_bounding_boxes(new_image, blackbox, fill=True)\n new_image = tf.squeeze(new_image)\n blackbox = tf.squeeze(blackbox)\n\n\n #####################################################################\n new_boxes = new_boxlist.get()\n new_boxes = tf.clip_by_value(\n new_boxes, clip_value_min=0.0, clip_value_max=1.0)\n\n result = [new_image, new_boxes, new_labels]\n\n if label_scores is not None:\n new_label_scores = overlapping_boxlist.get_field('label_scores')\n result.append(new_label_scores)\n\n if masks is not None:\n masks_of_boxes_inside_window = tf.gather(masks, inside_window_ids)\n masks_of_boxes_completely_inside_window = tf.gather(\n masks_of_boxes_inside_window, keep_ids)\n masks_box_begin = [0, im_box_begin[0], im_box_begin[1]]\n masks_box_size = [-1, im_box_size[0], im_box_size[1]]\n new_masks = tf.slice(\n masks_of_boxes_completely_inside_window,\n masks_box_begin, masks_box_size)\n result.append(new_masks)\n\n if keypoints is not None:\n keypoints_of_boxes_inside_window = tf.gather(keypoints, inside_window_ids)\n keypoints_of_boxes_completely_inside_window = tf.gather(\n keypoints_of_boxes_inside_window, keep_ids)\n new_keypoints = keypoint_ops.change_coordinate_frame(\n keypoints_of_boxes_completely_inside_window, im_box_rank1)\n new_keypoints = keypoint_ops.prune_outside_window(new_keypoints,\n [0.0, 0.0, 1.0, 1.0])\n result.append(new_keypoints)\n\n return tuple(result)\n\n\ndef random_crop_image(image,\n boxes,\n labels,\n label_scores=None,\n masks=None,\n keypoints=None,\n min_object_covered=0.5,\n aspect_ratio_range=(0.60, 0.90),\n area_range=(0.3, 1.0),\n overlap_thresh=0.3,\n random_coef=0.0,\n seed=None):\n # min_object_covered=1.0,\n # aspect_ratio_range=(0.75, 1.33),\n # area_range=(0.1, 1.0),\n\n # for trng\n # min_object_covered=0.5,\n # aspect_ratio_range=(0.60, 0.90),\n # area_range=(0.5, 1.0)\n \"\"\"Randomly crops the image.\n\n Given the input image and its bounding boxes, this op randomly\n crops a subimage. Given a user-provided set of input constraints,\n the crop window is resampled until it satisfies these constraints.\n If within 100 trials it is unable to find a valid crop, the original\n image is returned. See the Args section for a description of the input\n constraints. Both input boxes and returned Boxes are in normalized\n form (e.g., lie in the unit square [0, 1]).\n This function will return the original image with probability random_coef.\n\n Note: boxes will be clipped to the crop. Keypoint coordinates that are\n outside the crop will be set to NaN, which is consistent with the original\n keypoint encoding for non-existing keypoints.\n\n Args:\n image: rank 3 float32 tensor contains 1 image -> [height, width, channels]\n with pixel values varying between [0, 1].\n boxes: rank 2 float32 tensor containing the bounding boxes with shape\n [num_instances, 4].\n Boxes are in normalized form meaning their coordinates vary\n between [0, 1].\n Each row is in the form of [ymin, xmin, ymax, xmax].\n labels: rank 1 int32 tensor containing the object classes.\n label_scores: (optional) float32 tensor of shape [num_instances].\n representing the score for each box.\n masks: (optional) rank 3 float32 tensor with shape\n [num_instances, height, width] containing instance masks. The masks\n are of the same height, width as the input `image`.\n keypoints: (optional) rank 3 float32 tensor with shape\n [num_instances, num_keypoints, 2]. The keypoints are in y-x\n normalized coordinates.\n min_object_covered: the cropped image must cover at least this fraction of\n at least one of the input bounding boxes.\n aspect_ratio_range: allowed range for aspect ratio of cropped image.\n area_range: allowed range for area ratio between cropped image and the\n original image.\n overlap_thresh: minimum overlap thresh with new cropped\n image to keep the box.\n random_coef: a random coefficient that defines the chance of getting the\n original image. If random_coef is 0, we will always get the\n cropped image, and if it is 1.0, we will always get the\n original image.\n seed: random seed.\n\n Returns:\n image: Image shape will be [new_height, new_width, channels].\n boxes: boxes which is the same rank as input boxes. Boxes are in normalized\n form.\n labels: new labels.\n\n If label_scores, masks, or keypoints are not None, the function also\n returns:\n label_scores: new scores.\n masks: rank 3 float32 tensor with shape [num_instances, height, width]\n containing instance masks.\n keypoints: rank 3 float32 tensor with shape\n [num_instances, num_keypoints, 2]\n \"\"\"\n\n def strict_random_crop_image_fn():\n return _strict_random_crop_image(\n image,\n boxes,\n labels,\n label_scores=label_scores,\n masks=masks,\n keypoints=keypoints,\n min_object_covered=min_object_covered,\n aspect_ratio_range=aspect_ratio_range,\n area_range=area_range,\n overlap_thresh=overlap_thresh)\n\n # avoids tf.cond to make faster RCNN training on borg. See b/140057645.\n if random_coef < sys.float_info.min:\n result = strict_random_crop_image_fn()\n else:\n do_a_crop_random = tf.random_uniform([], seed=seed)\n do_a_crop_random = tf.greater(do_a_crop_random, random_coef)\n\n outputs = [image, boxes, labels]\n\n if label_scores is not None:\n outputs.append(label_scores)\n if masks is not None:\n outputs.append(masks)\n if keypoints is not None:\n outputs.append(keypoints)\n\n result = tf.cond(do_a_crop_random, strict_random_crop_image_fn,\n lambda: tuple(outputs))\n return result\n\n\ndef random_pad_image(image,\n boxes,\n min_image_size=None,\n max_image_size=None,\n pad_color=None,\n seed=None):\n \"\"\"Randomly pads the image.\n\n This function randomly pads the image with zeros. The final size of the\n padded image will be between min_image_size and max_image_size.\n if min_image_size is smaller than the input image size, min_image_size will\n be set to the input image size. The same for max_image_size. The input image\n will be located at a uniformly random location inside the padded image.\n The relative location of the boxes to the original image will remain the same.\n\n Args:\n image: rank 3 float32 tensor containing 1 image -> [height, width, channels]\n with pixel values varying between [0, 1].\n boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4].\n Boxes are in normalized form meaning their coordinates vary\n between [0, 1].\n Each row is in the form of [ymin, xmin, ymax, xmax].\n min_image_size: a tensor of size [min_height, min_width], type tf.int32.\n If passed as None, will be set to image size\n [height, width].\n max_image_size: a tensor of size [max_height, max_width], type tf.int32.\n If passed as None, will be set to twice the\n image [height * 2, width * 2].\n pad_color: padding color. A rank 1 tensor of [3] with dtype=tf.float32.\n if set as None, it will be set to average color of the input\n image.\n\n seed: random seed.\n\n Returns:\n image: Image shape will be [new_height, new_width, channels].\n boxes: boxes which is the same rank as input boxes. Boxes are in normalized\n form.\n \"\"\"\n # if pad_color is None:\n # pad_color = tf.reduce_mean(image, axis=[0, 1])\n\n image_shape = tf.shape(image)\n image_height = image_shape[0]\n image_width = image_shape[1]\n\n if max_image_size is None:\n max_image_size = tf.stack([image_height * 2, image_width * 2])\n max_image_size = tf.maximum(max_image_size,\n tf.stack([image_height, image_width]))\n\n if min_image_size is None:\n min_image_size = tf.stack([image_height, image_width])\n min_image_size = tf.maximum(min_image_size,\n tf.stack([image_height, image_width]))\n\n target_height = tf.cond(\n max_image_size[0] > min_image_size[0],\n lambda: _random_integer(min_image_size[0], max_image_size[0], seed),\n lambda: max_image_size[0])\n\n target_width = tf.cond(\n max_image_size[1] > min_image_size[1],\n lambda: _random_integer(min_image_size[1], max_image_size[1], seed),\n lambda: max_image_size[1])\n\n offset_height = tf.cond(\n target_height > image_height,\n lambda: _random_integer(0, target_height - image_height, seed),\n lambda: tf.constant(0, dtype=tf.int32))\n\n offset_width = tf.cond(\n target_width > image_width,\n lambda: _random_integer(0, target_width - image_width, seed),\n lambda: tf.constant(0, dtype=tf.int32))\n\n new_image = tf.image.pad_to_bounding_box(\n image,\n offset_height=offset_height,\n offset_width=offset_width,\n target_height=target_height,\n target_width=target_width)\n\n # Setting color of the padded pixels\n # image_ones = tf.ones_like(image)\n # image_ones_padded = tf.image.pad_to_bounding_box(\n # image_ones,\n # offset_height=offset_height,\n # offset_width=offset_width,\n # target_height=target_height,\n # target_width=target_width)\n # image_color_padded = (1.0 - image_ones_padded) * pad_color\n # new_image += image_color_padded\n\n # setting boxes\n new_window = tf.to_float(\n tf.stack([\n -offset_height, -offset_width, target_height - offset_height,\n target_width - offset_width\n ]))\n new_window /= tf.to_float(\n tf.stack([image_height, image_width, image_height, image_width]))\n boxlist = box_list.BoxList(boxes)\n new_boxlist = box_list_ops.change_coordinate_frame(boxlist, new_window)\n new_boxes = new_boxlist.get()\n\n return new_image, new_boxes\n\n\ndef random_crop_pad_image(image,\n boxes,\n labels,\n label_scores=None,\n min_object_covered=0.5,\n aspect_ratio_range=(0.75/1.1, 0.75*1.1),\n area_range=(0.2, 1.0),\n overlap_thresh=0.7,\n random_coef=0.0,\n min_padded_size_ratio=(1.0, 1.0),\n max_padded_size_ratio=(1.75, 1.75),\n pad_color=None,\n seed=None):\n # orig\n # min_object_covered=1.0,\n # aspect_ratio_range=(0.75, 1.33),\n # area_range=(0.1, 1.0),\n # max_padded_size_ratio=(2.0, 2.0),\n # overlap_thresh=0.3,\n\n # pmi_ukraine\n # min_object_covered=0.4,\n # aspect_ratio_range=(0.60, 0.90),\n \"\"\"Randomly crops and pads the image.\n\n Given an input image and its bounding boxes, this op first randomly crops\n the image and then randomly pads the image with background values. Parameters\n min_padded_size_ratio and max_padded_size_ratio, determine the range of the\n final output image size. Specifically, the final image size will have a size\n in the range of min_padded_size_ratio * tf.shape(image) and\n max_padded_size_ratio * tf.shape(image). Note that these ratios are with\n respect to the size of the original image, so we can't capture the same\n effect easily by independently applying RandomCropImage\n followed by RandomPadImage.\n\n Args:\n image: rank 3 float32 tensor containing 1 image -> [height, width, channels]\n with pixel values varying between [0, 1].\n boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4].\n Boxes are in normalized form meaning their coordinates vary\n between [0, 1].\n Each row is in the form of [ymin, xmin, ymax, xmax].\n labels: rank 1 int32 tensor containing the object classes.\n label_scores: rank 1 float32 containing the label scores.\n min_object_covered: the cropped image must cover at least this fraction of\n at least one of the input bounding boxes.\n aspect_ratio_range: allowed range for aspect ratio of cropped image.\n area_range: allowed range for area ratio between cropped image and the\n original image.\n overlap_thresh: minimum overlap thresh with new cropped\n image to keep the box.\n random_coef: a random coefficient that defines the chance of getting the\n original image. If random_coef is 0, we will always get the\n cropped image, and if it is 1.0, we will always get the\n original image.\n min_padded_size_ratio: min ratio of padded image height and width to the\n input image's height and width.\n max_padded_size_ratio: max ratio of padded image height and width to the\n input image's height and width.\n pad_color: padding color. A rank 1 tensor of [3] with dtype=tf.float32.\n if set as None, it will be set to average color of the randomly\n cropped image.\n seed: random seed.\n\n Returns:\n padded_image: padded image.\n padded_boxes: boxes which is the same rank as input boxes. Boxes are in\n normalized form.\n cropped_labels: cropped labels.\n if label_scores is not None also returns:\n cropped_label_scores: cropped label scores.\n \"\"\"\n # np.random.seed(123) # crop and pad\n # np.random.seed(1234) # crop\n # np.random.seed(12345) # none, orig \n # np.random.seed(1) # pad\n\n rand = np.random.random_sample()\n\n # rand = 0.95\n\n crop, pad = False, False\n if rand < 0.70:\n \tcrop = True\n elif rand < 0.80:\n \tpad = True\n else: # rand < 0.90:\n \tcrop = True\n \tpad = True\n # else:\n # \t# return orig\n # \tpass\n # print(\"The random number generated is: \" + str(rand))\n # print(\"It will crop: \" + str(crop))\n # print(\"It will pad: \" + str(pad))\n\n image_size = tf.shape(image)\n image_height = image_size[0]\n image_width = image_size[1]\n\n if crop and pad:\n\t result = random_crop_image(\n\t image=image,\n\t boxes=boxes,\n\t labels=labels,\n\t label_scores=label_scores,\n\t min_object_covered=min_object_covered,\n\t aspect_ratio_range=aspect_ratio_range,\n\t area_range=area_range,\n\t overlap_thresh=overlap_thresh,\n\t random_coef=random_coef,\n\t seed=seed)\n\t cropped_image, cropped_boxes, cropped_labels = result[:3]\n\n\t min_image_size = tf.to_int32(\n\t tf.to_float(tf.stack([image_height, image_width])) *\n\t min_padded_size_ratio)\n\t max_image_size = tf.to_int32(\n\t tf.to_float(tf.stack([image_height, image_width])) *\n\t max_padded_size_ratio)\n\t padded_image, padded_boxes = random_pad_image(\n\t cropped_image,\n\t cropped_boxes,\n\t min_image_size=min_image_size,\n\t max_image_size=max_image_size,\n\t pad_color=pad_color,\n\t seed=seed)\n\n\t cropped_padded_output = (padded_image, padded_boxes, cropped_labels)\n\n\t if label_scores is not None:\n\t cropped_label_scores = result[3]\n\t cropped_padded_output += (cropped_label_scores,)\n\n elif crop: \n\t result = random_crop_image(\n\t image=image,\n\t boxes=boxes,\n\t labels=labels,\n\t label_scores=label_scores,\n\t min_object_covered=min_object_covered,\n\t aspect_ratio_range=aspect_ratio_range,\n\t area_range=area_range,\n\t overlap_thresh=overlap_thresh,\n\t random_coef=random_coef,\n\t seed=seed)\n\t cropped_image, cropped_boxes, cropped_labels = result[:3]\n\n\t cropped_padded_output = (cropped_image, cropped_boxes, cropped_labels)\n\t if label_scores is not None:\n\t cropped_label_scores = result[3]\n\t cropped_padded_output += (cropped_label_scores,)\n\n elif pad:\n\t min_image_size = tf.to_int32(\n\t tf.to_float(tf.stack([image_height, image_width])) *\n\t min_padded_size_ratio)\n\t max_image_size = tf.to_int32(\n\t tf.to_float(tf.stack([image_height, image_width])) *\n\t max_padded_size_ratio)\n\t padded_image, padded_boxes = random_pad_image(\n\t image,\n\t boxes,\n\t min_image_size=min_image_size,\n\t max_image_size=max_image_size,\n\t pad_color=pad_color,\n\t seed=seed)\n\n\t cropped_padded_output = (padded_image, padded_boxes, labels)\n\t if label_scores is not None:\n\t cropped_padded_output += (label_scores,)\n else:\n # image = tf.expand_dims(image, 0)\n # boxes = tf.expand_dims(boxes, 0)\n # image = tf.image.draw_bounding_boxes(image, boxes, fill=True)\n # image = tf.squeeze(image)\n # boxes = tf.squeeze(boxes)\n cropped_padded_output = (image, boxes, labels)\n\n return cropped_padded_output\n\n\ndef random_crop_to_aspect_ratio(image,\n boxes,\n labels,\n label_scores=None,\n masks=None,\n keypoints=None,\n aspect_ratio=1.0,\n overlap_thresh=0.3,\n seed=None):\n \"\"\"Randomly crops an image to the specified aspect ratio.\n\n Randomly crops the a portion of the image such that the crop is of the\n specified aspect ratio, and the crop is as large as possible. If the specified\n aspect ratio is larger than the aspect ratio of the image, this op will\n randomly remove rows from the top and bottom of the image. If the specified\n aspect ratio is less than the aspect ratio of the image, this op will randomly\n remove cols from the left and right of the image. If the specified aspect\n ratio is the same as the aspect ratio of the image, this op will return the\n image.\n\n Args:\n image: rank 3 float32 tensor contains 1 image -> [height, width, channels]\n with pixel values varying between [0, 1].\n boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4].\n Boxes are in normalized form meaning their coordinates vary\n between [0, 1].\n Each row is in the form of [ymin, xmin, ymax, xmax].\n labels: rank 1 int32 tensor containing the object classes.\n label_scores: (optional) float32 tensor of shape [num_instances]\n representing the score for each box.\n masks: (optional) rank 3 float32 tensor with shape\n [num_instances, height, width] containing instance masks. The masks\n are of the same height, width as the input `image`.\n keypoints: (optional) rank 3 float32 tensor with shape\n [num_instances, num_keypoints, 2]. The keypoints are in y-x\n normalized coordinates.\n aspect_ratio: the aspect ratio of cropped image.\n overlap_thresh: minimum overlap thresh with new cropped\n image to keep the box.\n seed: random seed.\n\n Returns:\n image: image which is the same rank as input image.\n boxes: boxes which is the same rank as input boxes.\n Boxes are in normalized form.\n labels: new labels.\n\n If label_scores, masks, or keypoints is not None, the function also returns:\n label_scores: new label scores.\n masks: rank 3 float32 tensor with shape [num_instances, height, width]\n containing instance masks.\n keypoints: rank 3 float32 tensor with shape\n [num_instances, num_keypoints, 2]\n\n Raises:\n ValueError: If image is not a 3D tensor.\n \"\"\"\n if len(image.get_shape()) != 3:\n raise ValueError('Image should be 3D tensor')\n\n with tf.name_scope('RandomCropToAspectRatio', values=[image]):\n image_shape = tf.shape(image)\n orig_height = image_shape[0]\n orig_width = image_shape[1]\n orig_aspect_ratio = tf.to_float(orig_width) / tf.to_float(orig_height)\n new_aspect_ratio = tf.constant(aspect_ratio, dtype=tf.float32)\n def target_height_fn():\n return tf.to_int32(tf.round(tf.to_float(orig_width) / new_aspect_ratio))\n\n target_height = tf.cond(orig_aspect_ratio >= new_aspect_ratio,\n lambda: orig_height, target_height_fn)\n\n def target_width_fn():\n return tf.to_int32(tf.round(tf.to_float(orig_height) * new_aspect_ratio))\n\n target_width = tf.cond(orig_aspect_ratio <= new_aspect_ratio,\n lambda: orig_width, target_width_fn)\n\n # either offset_height = 0 and offset_width is randomly chosen from\n # [0, offset_width - target_width), or else offset_width = 0 and\n # offset_height is randomly chosen from [0, offset_height - target_height)\n offset_height = _random_integer(0, orig_height - target_height + 1, seed)\n offset_width = _random_integer(0, orig_width - target_width + 1, seed)\n new_image = tf.image.crop_to_bounding_box(\n image, offset_height, offset_width, target_height, target_width)\n\n im_box = tf.stack([\n tf.to_float(offset_height) / tf.to_float(orig_height),\n tf.to_float(offset_width) / tf.to_float(orig_width),\n tf.to_float(offset_height + target_height) / tf.to_float(orig_height),\n tf.to_float(offset_width + target_width) / tf.to_float(orig_width)\n ])\n\n boxlist = box_list.BoxList(boxes)\n boxlist.add_field('labels', labels)\n\n if label_scores is not None:\n boxlist.add_field('label_scores', label_scores)\n\n im_boxlist = box_list.BoxList(tf.expand_dims(im_box, 0))\n\n # remove boxes whose overlap with the image is less than overlap_thresh\n overlapping_boxlist, keep_ids = box_list_ops.prune_non_overlapping_boxes(\n boxlist, im_boxlist, overlap_thresh)\n\n # change the coordinate of the remaining boxes\n new_labels = overlapping_boxlist.get_field('labels')\n new_boxlist = box_list_ops.change_coordinate_frame(overlapping_boxlist,\n im_box)\n new_boxlist = box_list_ops.clip_to_window(new_boxlist,\n tf.constant([0.0, 0.0, 1.0, 1.0],\n tf.float32))\n new_boxes = new_boxlist.get()\n\n result = [new_image, new_boxes, new_labels]\n\n if label_scores is not None:\n new_label_scores = overlapping_boxlist.get_field('label_scores')\n result.append(new_label_scores)\n\n if masks is not None:\n masks_inside_window = tf.gather(masks, keep_ids)\n masks_box_begin = tf.stack([0, offset_height, offset_width])\n masks_box_size = tf.stack([-1, target_height, target_width])\n new_masks = tf.slice(masks_inside_window, masks_box_begin, masks_box_size)\n result.append(new_masks)\n\n if keypoints is not None:\n keypoints_inside_window = tf.gather(keypoints, keep_ids)\n new_keypoints = keypoint_ops.change_coordinate_frame(\n keypoints_inside_window, im_box)\n new_keypoints = keypoint_ops.prune_outside_window(new_keypoints,\n [0.0, 0.0, 1.0, 1.0])\n result.append(new_keypoints)\n\n return tuple(result)\n\n\ndef random_pad_to_aspect_ratio(image,\n boxes,\n masks=None,\n keypoints=None,\n aspect_ratio=1.0,\n min_padded_size_ratio=(1.0, 1.0),\n max_padded_size_ratio=(2.0, 2.0),\n seed=None):\n # aspect_ratio=1.0,\n # aspect_ratio=800.0/1080.0,\n \"\"\"Randomly zero pads an image to the specified aspect ratio.\n\n Pads the image so that the resulting image will have the specified aspect\n ratio without scaling less than the min_padded_size_ratio or more than the\n max_padded_size_ratio. If the min_padded_size_ratio or max_padded_size_ratio\n is lower than what is possible to maintain the aspect ratio, then this method\n will use the least padding to achieve the specified aspect ratio.\n\n Args:\n image: rank 3 float32 tensor contains 1 image -> [height, width, channels]\n with pixel values varying between [0, 1].\n boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4].\n Boxes are in normalized form meaning their coordinates vary\n between [0, 1].\n Each row is in the form of [ymin, xmin, ymax, xmax].\n masks: (optional) rank 3 float32 tensor with shape\n [num_instances, height, width] containing instance masks. The masks\n are of the same height, width as the input `image`.\n keypoints: (optional) rank 3 float32 tensor with shape\n [num_instances, num_keypoints, 2]. The keypoints are in y-x\n normalized coordinates.\n aspect_ratio: aspect ratio of the final image.\n min_padded_size_ratio: min ratio of padded image height and width to the\n input image's height and width.\n max_padded_size_ratio: max ratio of padded image height and width to the\n input image's height and width.\n seed: random seed.\n\n Returns:\n image: image which is the same rank as input image.\n boxes: boxes which is the same rank as input boxes.\n Boxes are in normalized form.\n labels: new labels.\n\n If label_scores, masks, or keypoints is not None, the function also returns:\n label_scores: new label scores.\n masks: rank 3 float32 tensor with shape [num_instances, height, width]\n containing instance masks.\n keypoints: rank 3 float32 tensor with shape\n [num_instances, num_keypoints, 2]\n\n Raises:\n ValueError: If image is not a 3D tensor.\n \"\"\"\n if len(image.get_shape()) != 3:\n raise ValueError('Image should be 3D tensor')\n\n with tf.name_scope('RandomPadToAspectRatio', values=[image]):\n image_shape = tf.shape(image)\n image_height = tf.to_float(image_shape[0])\n image_width = tf.to_float(image_shape[1])\n image_aspect_ratio = image_width / image_height\n new_aspect_ratio = tf.constant(aspect_ratio, dtype=tf.float32)\n target_height = tf.cond(\n image_aspect_ratio <= new_aspect_ratio,\n lambda: image_height,\n lambda: image_width / new_aspect_ratio)\n target_width = tf.cond(\n image_aspect_ratio >= new_aspect_ratio,\n lambda: image_width,\n lambda: image_height * new_aspect_ratio)\n\n min_height = tf.maximum(\n min_padded_size_ratio[0] * image_height, target_height)\n min_width = tf.maximum(\n min_padded_size_ratio[1] * image_width, target_width)\n max_height = tf.maximum(\n max_padded_size_ratio[0] * image_height, target_height)\n max_width = tf.maximum(\n max_padded_size_ratio[1] * image_width, target_width)\n\n min_scale = tf.maximum(min_height / target_height, min_width / target_width)\n max_scale = tf.minimum(max_height / target_height, max_width / target_width)\n scale = tf.random_uniform([], min_scale, max_scale, seed=seed)\n\n target_height = scale * target_height\n target_width = scale * target_width\n\n new_image = tf.image.pad_to_bounding_box(\n image, 0, 0, tf.to_int32(target_height), tf.to_int32(target_width))\n\n im_box = tf.stack([\n 0.0,\n 0.0,\n target_height / image_height,\n target_width / image_width\n ])\n boxlist = box_list.BoxList(boxes)\n new_boxlist = box_list_ops.change_coordinate_frame(boxlist, im_box)\n new_boxes = new_boxlist.get()\n\n result = [new_image, new_boxes]\n\n if masks is not None:\n new_masks = tf.expand_dims(masks, -1)\n new_masks = tf.image.pad_to_bounding_box(new_masks, 0, 0,\n tf.to_int32(target_height),\n tf.to_int32(target_width))\n new_masks = tf.squeeze(new_masks, [-1])\n result.append(new_masks)\n\n if keypoints is not None:\n new_keypoints = keypoint_ops.change_coordinate_frame(keypoints, im_box)\n result.append(new_keypoints)\n\n return tuple(result)\n\n\ndef random_black_patches(image,\n max_black_patches=10,\n probability=0.5,\n size_to_image_ratio=0.1,\n random_seed=None):\n \"\"\"Randomly adds some black patches to the image.\n\n This op adds up to max_black_patches square black patches of a fixed size\n to the image where size is specified via the size_to_image_ratio parameter.\n\n Args:\n image: rank 3 float32 tensor containing 1 image -> [height, width, channels]\n with pixel values varying between [0, 1].\n max_black_patches: number of times that the function tries to add a\n black box to the image.\n probability: at each try, what is the chance of adding a box.\n size_to_image_ratio: Determines the ratio of the size of the black patches\n to the size of the image.\n box_size = size_to_image_ratio *\n min(image_width, image_height)\n random_seed: random seed.\n\n Returns:\n image\n \"\"\"\n def add_black_patch_to_image(image):\n \"\"\"Function for adding one patch to the image.\n\n Args:\n image: image\n\n Returns:\n image with a randomly added black box\n \"\"\"\n image_shape = tf.shape(image)\n image_height = image_shape[0]\n image_width = image_shape[1]\n box_size = tf.to_int32(\n tf.multiply(\n tf.minimum(tf.to_float(image_height), tf.to_float(image_width)),\n size_to_image_ratio))\n normalized_y_min = tf.random_uniform(\n [], minval=0.0, maxval=(1.0 - size_to_image_ratio), seed=random_seed)\n normalized_x_min = tf.random_uniform(\n [], minval=0.0, maxval=(1.0 - size_to_image_ratio), seed=random_seed)\n y_min = tf.to_int32(normalized_y_min * tf.to_float(image_height))\n x_min = tf.to_int32(normalized_x_min * tf.to_float(image_width))\n black_box = tf.ones([box_size, box_size, 3], dtype=tf.float32)\n mask = 1.0 - tf.image.pad_to_bounding_box(black_box, y_min, x_min,\n image_height, image_width)\n image = tf.multiply(image, mask)\n return image\n\n with tf.name_scope('RandomBlackPatchInImage', values=[image]):\n for _ in range(max_black_patches):\n random_prob = tf.random_uniform(\n [], minval=0.0, maxval=1.0, dtype=tf.float32, seed=random_seed)\n image = tf.cond(\n tf.greater(random_prob, probability), lambda: image,\n lambda: add_black_patch_to_image(image))\n\n return image\n\n\ndef image_to_float(image):\n \"\"\"Used in Faster R-CNN. Casts image pixel values to float.\n\n Args:\n image: input image which might be in tf.uint8 or sth else format\n\n Returns:\n image: image in tf.float32 format.\n \"\"\"\n with tf.name_scope('ImageToFloat', values=[image]):\n image = tf.to_float(image)\n return image\n\n\ndef random_resize_method(image, target_size):\n \"\"\"Uses a random resize method to resize the image to target size.\n\n Args:\n image: a rank 3 tensor.\n target_size: a list of [target_height, target_width]\n\n Returns:\n resized image.\n \"\"\"\n\n resized_image = _apply_with_random_selector(\n image,\n lambda x, method: tf.image.resize_images(x, target_size, method),\n num_cases=4)\n\n return resized_image\n\n\ndef _compute_new_static_size(image, min_dimension, max_dimension):\n \"\"\"Compute new static shape for resize_to_range method.\"\"\"\n image_shape = image.get_shape().as_list()\n orig_height = image_shape[0]\n orig_width = image_shape[1]\n orig_min_dim = min(orig_height, orig_width)\n # Calculates the larger of the possible sizes\n large_scale_factor = min_dimension / float(orig_min_dim)\n # Scaling orig_(height|width) by large_scale_factor will make the smaller\n # dimension equal to min_dimension, save for floating point rounding errors.\n # For reasonably-sized images, taking the nearest integer will reliably\n # eliminate this error.\n large_height = int(round(orig_height * large_scale_factor))\n large_width = int(round(orig_width * large_scale_factor))\n large_size = [large_height, large_width]\n if max_dimension:\n # Calculates the smaller of the possible sizes, use that if the larger\n # is too big.\n orig_max_dim = max(orig_height, orig_width)\n small_scale_factor = max_dimension / float(orig_max_dim)\n # Scaling orig_(height|width) by small_scale_factor will make the larger\n # dimension equal to max_dimension, save for floating point rounding\n # errors. For reasonably-sized images, taking the nearest integer will\n # reliably eliminate this error.\n small_height = int(round(orig_height * small_scale_factor))\n small_width = int(round(orig_width * small_scale_factor))\n small_size = [small_height, small_width]\n new_size = large_size\n if max(large_size) > max_dimension:\n new_size = small_size\n else:\n new_size = large_size\n return tf.constant(new_size)\n\n\ndef _compute_new_dynamic_size(image, min_dimension, max_dimension):\n \"\"\"Compute new dynamic shape for resize_to_range method.\"\"\"\n image_shape = tf.shape(image)\n orig_height = tf.to_float(image_shape[0])\n orig_width = tf.to_float(image_shape[1])\n orig_min_dim = tf.minimum(orig_height, orig_width)\n # Calculates the larger of the possible sizes\n min_dimension = tf.constant(min_dimension, dtype=tf.float32)\n large_scale_factor = min_dimension / orig_min_dim\n # Scaling orig_(height|width) by large_scale_factor will make the smaller\n # dimension equal to min_dimension, save for floating point rounding errors.\n # For reasonably-sized images, taking the nearest integer will reliably\n # eliminate this error.\n large_height = tf.to_int32(tf.round(orig_height * large_scale_factor))\n large_width = tf.to_int32(tf.round(orig_width * large_scale_factor))\n large_size = tf.stack([large_height, large_width])\n if max_dimension:\n # Calculates the smaller of the possible sizes, use that if the larger\n # is too big.\n orig_max_dim = tf.maximum(orig_height, orig_width)\n max_dimension = tf.constant(max_dimension, dtype=tf.float32)\n small_scale_factor = max_dimension / orig_max_dim\n # Scaling orig_(height|width) by small_scale_factor will make the larger\n # dimension equal to max_dimension, save for floating point rounding\n # errors. For reasonably-sized images, taking the nearest integer will\n # reliably eliminate this error.\n small_height = tf.to_int32(tf.round(orig_height * small_scale_factor))\n small_width = tf.to_int32(tf.round(orig_width * small_scale_factor))\n small_size = tf.stack([small_height, small_width])\n new_size = tf.cond(\n tf.to_float(tf.reduce_max(large_size)) > max_dimension,\n lambda: small_size, lambda: large_size)\n else:\n new_size = large_size\n return new_size\n\n\ndef resize_to_range(image,\n masks=None,\n min_dimension=None,\n max_dimension=None,\n method=tf.image.ResizeMethod.BILINEAR,\n align_corners=False):\n \"\"\"Resizes an image so its dimensions are within the provided value.\n\n The output size can be described by two cases:\n 1. If the image can be rescaled so its minimum dimension is equal to the\n provided value without the other dimension exceeding max_dimension,\n then do so.\n 2. Otherwise, resize so the largest dimension is equal to max_dimension.\n\n Args:\n image: A 3D tensor of shape [height, width, channels]\n masks: (optional) rank 3 float32 tensor with shape\n [num_instances, height, width] containing instance masks.\n min_dimension: (optional) (scalar) desired size of the smaller image\n dimension.\n max_dimension: (optional) (scalar) maximum allowed size\n of the larger image dimension.\n method: (optional) interpolation method used in resizing. Defaults to\n BILINEAR.\n align_corners: bool. If true, exactly align all 4 corners of the input\n and output. Defaults to False.\n\n Returns:\n A 3D tensor of shape [new_height, new_width, channels],\n where the image has been resized (with bilinear interpolation) so that\n min(new_height, new_width) == min_dimension or\n max(new_height, new_width) == max_dimension.\n\n If masks is not None, also outputs masks:\n A 3D tensor of shape [num_instances, new_height, new_width]\n\n Raises:\n ValueError: if the image is not a 3D tensor.\n \"\"\"\n if len(image.get_shape()) != 3:\n raise ValueError('Image should be 3D tensor')\n\n with tf.name_scope('ResizeToRange', values=[image, min_dimension]):\n if image.get_shape().is_fully_defined():\n new_size = _compute_new_static_size(image, min_dimension, max_dimension)\n else:\n new_size = _compute_new_dynamic_size(image, min_dimension, max_dimension)\n new_image = tf.image.resize_images(\n image, new_size, method=method, align_corners=align_corners)\n\n result = new_image\n if masks is not None:\n new_masks = tf.expand_dims(masks, 3)\n new_masks = tf.image.resize_nearest_neighbor(\n new_masks, new_size, align_corners=align_corners)\n new_masks = tf.squeeze(new_masks, 3)\n result = [new_image, new_masks]\n\n return result\n\n\n# TODO: Make sure the static shapes are preserved.\ndef resize_to_min_dimension(image, masks=None, min_dimension=600):\n \"\"\"Resizes image and masks given the min size maintaining the aspect ratio.\n\n If one of the image dimensions is smaller that min_dimension, it will scale\n the image such that its smallest dimension is equal to min_dimension.\n Otherwise, will keep the image size as is.\n\n Args:\n image: a tensor of size [height, width, channels].\n masks: (optional) a tensors of size [num_instances, height, width].\n min_dimension: minimum image dimension.\n\n Returns:\n a tuple containing the following:\n Resized image. A tensor of size [new_height, new_width, channels].\n (optional) Resized masks. A tensor of\n size [num_instances, new_height, new_width].\n\n Raises:\n ValueError: if the image is not a 3D tensor.\n \"\"\"\n if len(image.get_shape()) != 3:\n raise ValueError('Image should be 3D tensor')\n\n with tf.name_scope('ResizeGivenMinDimension', values=[image, min_dimension]):\n image_height = tf.shape(image)[0]\n image_width = tf.shape(image)[1]\n min_image_dimension = tf.minimum(image_height, image_width)\n min_target_dimension = tf.maximum(min_image_dimension, min_dimension)\n target_ratio = tf.to_float(min_target_dimension) / tf.to_float(\n min_image_dimension)\n target_height = tf.to_int32(tf.to_float(image_height) * target_ratio)\n target_width = tf.to_int32(tf.to_float(image_width) * target_ratio)\n image = tf.image.resize_bilinear(\n tf.expand_dims(image, axis=0),\n size=[target_height, target_width],\n align_corners=True)\n result = tf.squeeze(image, axis=0)\n if masks is not None:\n masks = tf.image.resize_nearest_neighbor(\n tf.expand_dims(masks, axis=3),\n size=[target_height, target_width],\n align_corners=True)\n result = (result, tf.squeeze(masks, axis=3))\n return result\n\n\ndef scale_boxes_to_pixel_coordinates(image, boxes, keypoints=None):\n \"\"\"Scales boxes from normalized to pixel coordinates.\n\n Args:\n image: A 3D float32 tensor of shape [height, width, channels].\n boxes: A 2D float32 tensor of shape [num_boxes, 4] containing the bounding\n boxes in normalized coordinates. Each row is of the form\n [ymin, xmin, ymax, xmax].\n keypoints: (optional) rank 3 float32 tensor with shape\n [num_instances, num_keypoints, 2]. The keypoints are in y-x normalized\n coordinates.\n\n Returns:\n image: unchanged input image.\n scaled_boxes: a 2D float32 tensor of shape [num_boxes, 4] containing the\n bounding boxes in pixel coordinates.\n scaled_keypoints: a 3D float32 tensor with shape\n [num_instances, num_keypoints, 2] containing the keypoints in pixel\n coordinates.\n \"\"\"\n boxlist = box_list.BoxList(boxes)\n image_height = tf.shape(image)[0]\n image_width = tf.shape(image)[1]\n scaled_boxes = box_list_ops.scale(boxlist, image_height, image_width).get()\n result = [image, scaled_boxes]\n if keypoints is not None:\n scaled_keypoints = keypoint_ops.scale(keypoints, image_height, image_width)\n result.append(scaled_keypoints)\n return tuple(result)\n\n\n# pylint: disable=g-doc-return-or-yield\ndef resize_image(image,\n masks=None,\n new_height=600,\n new_width=1024,\n method=tf.image.ResizeMethod.BILINEAR,\n align_corners=False):\n \"\"\"See `tf.image.resize_images` for detailed doc.\"\"\"\n with tf.name_scope(\n 'ResizeImage',\n values=[image, new_height, new_width, method, align_corners]):\n new_image = tf.image.resize_images(\n image, [new_height, new_width],\n method=method,\n align_corners=align_corners)\n result = new_image\n if masks is not None:\n num_instances = tf.shape(masks)[0]\n new_size = tf.constant([new_height, new_width], dtype=tf.int32)\n def resize_masks_branch():\n new_masks = tf.expand_dims(masks, 3)\n new_masks = tf.image.resize_nearest_neighbor(\n new_masks, new_size, align_corners=align_corners)\n new_masks = tf.squeeze(new_masks, axis=3)\n return new_masks\n\n def reshape_masks_branch():\n new_masks = tf.reshape(masks, [0, new_size[0], new_size[1]])\n return new_masks\n\n masks = tf.cond(num_instances > 0, resize_masks_branch,\n reshape_masks_branch)\n result = [new_image, masks]\n\n return result\n\n\ndef subtract_channel_mean(image, means=None):\n \"\"\"Normalizes an image by subtracting a mean from each channel.\n\n Args:\n image: A 3D tensor of shape [height, width, channels]\n means: float list containing a mean for each channel\n Returns:\n normalized_images: a tensor of shape [height, width, channels]\n Raises:\n ValueError: if images is not a 4D tensor or if the number of means is not\n equal to the number of channels.\n \"\"\"\n with tf.name_scope('SubtractChannelMean', values=[image, means]):\n if len(image.get_shape()) != 3:\n raise ValueError('Input must be of size [height, width, channels]')\n if len(means) != image.get_shape()[-1]:\n raise ValueError('len(means) must match the number of channels')\n return image - [[means]]\n\n\ndef one_hot_encoding(labels, num_classes=None):\n \"\"\"One-hot encodes the multiclass labels.\n\n Example usage:\n labels = tf.constant([1, 4], dtype=tf.int32)\n one_hot = OneHotEncoding(labels, num_classes=5)\n one_hot.eval() # evaluates to [0, 1, 0, 0, 1]\n\n Args:\n labels: A tensor of shape [None] corresponding to the labels.\n num_classes: Number of classes in the dataset.\n Returns:\n onehot_labels: a tensor of shape [num_classes] corresponding to the one hot\n encoding of the labels.\n Raises:\n ValueError: if num_classes is not specified.\n \"\"\"\n with tf.name_scope('OneHotEncoding', values=[labels]):\n if num_classes is None:\n raise ValueError('num_classes must be specified')\n\n labels = tf.one_hot(labels, num_classes, 1, 0)\n return tf.reduce_max(labels, 0)\n\n\ndef rgb_to_gray(image):\n \"\"\"Converts a 3 channel RGB image to a 1 channel grayscale image.\n\n Args:\n image: Rank 3 float32 tensor containing 1 image -> [height, width, 3]\n with pixel values varying between [0, 1].\n\n Returns:\n image: A single channel grayscale image -> [image, height, 1].\n \"\"\"\n return tf.image.rgb_to_grayscale(image)\n\n\ndef ssd_random_crop(image,\n boxes,\n labels,\n label_scores=None,\n masks=None,\n keypoints=None,\n min_object_covered=(0.0, 0.1, 0.3, 0.5, 0.7, 0.9, 1.0),\n aspect_ratio_range=((0.5, 2.0),) * 7,\n #aspect_ratio_range=((0.75, 1.25),) * 7,\n area_range=((0.1, 1.0),) * 7,\n overlap_thresh=(0.0, 0.1, 0.3, 0.5, 0.7, 0.9, 1.0),\n #overlap_thresh=(0.0, 0.75, 0.8, 0.8, 0.85, 0.95, 1.0),\n random_coef=(0.15,) * 7,\n seed=None):\n \"\"\"Random crop preprocessing with default parameters as in SSD paper.\n\n Liu et al., SSD: Single shot multibox detector.\n For further information on random crop preprocessing refer to RandomCrop\n function above.\n\n Args:\n image: rank 3 float32 tensor contains 1 image -> [height, width, channels]\n with pixel values varying between [0, 1].\n boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4].\n Boxes are in normalized form meaning their coordinates vary\n between [0, 1].\n Each row is in the form of [ymin, xmin, ymax, xmax].\n labels: rank 1 int32 tensor containing the object classes.\n label_scores: rank 1 float32 tensor containing the scores.\n masks: (optional) rank 3 float32 tensor with shape\n [num_instances, height, width] containing instance masks. The masks\n are of the same height, width as the input `image`.\n keypoints: (optional) rank 3 float32 tensor with shape\n [num_instances, num_keypoints, 2]. The keypoints are in y-x\n normalized coordinates.\n min_object_covered: the cropped image must cover at least this fraction of\n at least one of the input bounding boxes.\n aspect_ratio_range: allowed range for aspect ratio of cropped image.\n area_range: allowed range for area ratio between cropped image and the\n original image.\n overlap_thresh: minimum overlap thresh with new cropped\n image to keep the box.\n random_coef: a random coefficient that defines the chance of getting the\n original image. If random_coef is 0, we will always get the\n cropped image, and if it is 1.0, we will always get the\n original image.\n seed: random seed.\n\n Returns:\n image: image which is the same rank as input image.\n boxes: boxes which is the same rank as input boxes.\n Boxes are in normalized form.\n labels: new labels.\n\n If label_scores, masks, or keypoints is not None, the function also returns:\n label_scores: new label scores.\n masks: rank 3 float32 tensor with shape [num_instances, height, width]\n containing instance masks.\n keypoints: rank 3 float32 tensor with shape\n [num_instances, num_keypoints, 2]\n \"\"\"\n\n def random_crop_selector(selected_result, index):\n \"\"\"Applies random_crop_image to selected result.\n\n Args:\n selected_result: A tuple containing image, boxes, labels, keypoints (if\n not None), and masks (if not None).\n index: The index that was randomly selected.\n\n Returns: A tuple containing image, boxes, labels, keypoints (if not None),\n and masks (if not None).\n \"\"\"\n i = 3\n image, boxes, labels = selected_result[:i]\n selected_label_scores = None\n selected_masks = None\n selected_keypoints = None\n if label_scores is not None:\n selected_label_scores = selected_result[i]\n i += 1\n if masks is not None:\n selected_masks = selected_result[i]\n i += 1\n if keypoints is not None:\n selected_keypoints = selected_result[i]\n\n return random_crop_image(\n image=image,\n boxes=boxes,\n labels=labels,\n label_scores=selected_label_scores,\n masks=selected_masks,\n keypoints=selected_keypoints,\n min_object_covered=min_object_covered[index],\n aspect_ratio_range=aspect_ratio_range[index],\n area_range=area_range[index],\n overlap_thresh=overlap_thresh[index],\n random_coef=random_coef[index],\n seed=seed)\n\n result = _apply_with_random_selector_tuples(\n tuple(\n t for t in (image, boxes, labels, label_scores, masks, keypoints)\n if t is not None),\n random_crop_selector,\n num_cases=len(min_object_covered))\n return result\n\n\ndef ssd_random_crop_pad(image,\n boxes,\n labels,\n label_scores=None,\n min_object_covered=(0.1, 0.3, 0.5, 0.7, 0.9, 1.0),\n aspect_ratio_range=((0.5, 2.0),) * 6,\n #aspect_ratio_range=((0.75, 1.25),) * 6,\n area_range=((0.1, 1.0),) * 6,\n overlap_thresh=(0.1, 0.3, 0.5, 0.7, 0.9, 1.0),\n #overlap_thresh=(0.7, 0.75, 0.8, 0.8, 0.85, 0.95, 1.0),\n random_coef=(0.15,) * 6,\n min_padded_size_ratio=((1.0, 1.0),) * 6,\n max_padded_size_ratio=((2.0, 2.0),) * 6,\n pad_color=(None,) * 6,\n seed=None):\n \"\"\"Random crop preprocessing with default parameters as in SSD paper.\n\n Liu et al., SSD: Single shot multibox detector.\n For further information on random crop preprocessing refer to RandomCrop\n function above.\n\n Args:\n image: rank 3 float32 tensor containing 1 image -> [height, width, channels]\n with pixel values varying between [0, 1].\n boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4].\n Boxes are in normalized form meaning their coordinates vary\n between [0, 1].\n Each row is in the form of [ymin, xmin, ymax, xmax].\n labels: rank 1 int32 tensor containing the object classes.\n label_scores: float32 tensor of shape [num_instances] representing the\n score for each box.\n min_object_covered: the cropped image must cover at least this fraction of\n at least one of the input bounding boxes.\n aspect_ratio_range: allowed range for aspect ratio of cropped image.\n area_range: allowed range for area ratio between cropped image and the\n original image.\n overlap_thresh: minimum overlap thresh with new cropped\n image to keep the box.\n random_coef: a random coefficient that defines the chance of getting the\n original image. If random_coef is 0, we will always get the\n cropped image, and if it is 1.0, we will always get the\n original image.\n min_padded_size_ratio: min ratio of padded image height and width to the\n input image's height and width.\n max_padded_size_ratio: max ratio of padded image height and width to the\n input image's height and width.\n pad_color: padding color. A rank 1 tensor of [3] with dtype=tf.float32.\n if set as None, it will be set to average color of the randomly\n cropped image.\n seed: random seed.\n\n Returns:\n image: Image shape will be [new_height, new_width, channels].\n boxes: boxes which is the same rank as input boxes. Boxes are in normalized\n form.\n new_labels: new labels.\n new_label_scores: new label scores.\n \"\"\"\n\n def random_crop_pad_selector(image_boxes_labels, index):\n i = 3\n image, boxes, labels = image_boxes_labels[:i]\n selected_label_scores = None\n if label_scores is not None:\n selected_label_scores = image_boxes_labels[i]\n\n return random_crop_pad_image(\n image,\n boxes,\n labels,\n selected_label_scores,\n min_object_covered=min_object_covered[index],\n aspect_ratio_range=aspect_ratio_range[index],\n area_range=area_range[index],\n overlap_thresh=overlap_thresh[index],\n random_coef=random_coef[index],\n min_padded_size_ratio=min_padded_size_ratio[index],\n max_padded_size_ratio=max_padded_size_ratio[index],\n pad_color=pad_color[index],\n seed=seed)\n\n return _apply_with_random_selector_tuples(\n tuple(t for t in (image, boxes, labels, label_scores) if t is not None),\n random_crop_pad_selector,\n num_cases=len(min_object_covered))\n\n\ndef ssd_random_crop_fixed_aspect_ratio(\n image,\n boxes,\n labels,\n label_scores=None,\n masks=None,\n keypoints=None,\n min_object_covered=(0.0, 0.1, 0.3, 0.5, 0.7, 0.9, 1.0),\n aspect_ratio=1.0,\n area_range=((0.1, 1.0),) * 7,\n overlap_thresh=(0.0, 0.1, 0.3, 0.5, 0.7, 0.9, 1.0),\n random_coef=(0.15,) * 7,\n seed=None):\n \"\"\"Random crop preprocessing with default parameters as in SSD paper.\n\n Liu et al., SSD: Single shot multibox detector.\n For further information on random crop preprocessing refer to RandomCrop\n function above.\n\n The only difference is that the aspect ratio of the crops are fixed.\n\n Args:\n image: rank 3 float32 tensor contains 1 image -> [height, width, channels]\n with pixel values varying between [0, 1].\n boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4].\n Boxes are in normalized form meaning their coordinates vary\n between [0, 1].\n Each row is in the form of [ymin, xmin, ymax, xmax].\n labels: rank 1 int32 tensor containing the object classes.\n label_scores: (optional) float32 tensor of shape [num_instances]\n representing the score for each box.\n masks: (optional) rank 3 float32 tensor with shape\n [num_instances, height, width] containing instance masks. The masks\n are of the same height, width as the input `image`.\n keypoints: (optional) rank 3 float32 tensor with shape\n [num_instances, num_keypoints, 2]. The keypoints are in y-x\n normalized coordinates.\n min_object_covered: the cropped image must cover at least this fraction of\n at least one of the input bounding boxes.\n aspect_ratio: aspect ratio of the cropped image.\n area_range: allowed range for area ratio between cropped image and the\n original image.\n overlap_thresh: minimum overlap thresh with new cropped\n image to keep the box.\n random_coef: a random coefficient that defines the chance of getting the\n original image. If random_coef is 0, we will always get the\n cropped image, and if it is 1.0, we will always get the\n original image.\n seed: random seed.\n\n Returns:\n image: image which is the same rank as input image.\n boxes: boxes which is the same rank as input boxes.\n Boxes are in normalized form.\n labels: new labels.\n\n If masks or keypoints is not None, the function also returns:\n\n masks: rank 3 float32 tensor with shape [num_instances, height, width]\n containing instance masks.\n keypoints: rank 3 float32 tensor with shape\n [num_instances, num_keypoints, 2]\n \"\"\"\n aspect_ratio_range = ((aspect_ratio, aspect_ratio),) * len(area_range)\n\n crop_result = ssd_random_crop(\n image, boxes, labels, label_scores, masks, keypoints, min_object_covered,\n aspect_ratio_range, area_range, overlap_thresh, random_coef, seed)\n i = 3\n new_image, new_boxes, new_labels = crop_result[:i]\n new_label_scores = None\n new_masks = None\n new_keypoints = None\n if label_scores is not None:\n new_label_scores = crop_result[i]\n i += 1\n if masks is not None:\n new_masks = crop_result[i]\n i += 1\n if keypoints is not None:\n new_keypoints = crop_result[i]\n result = random_crop_to_aspect_ratio(\n new_image,\n new_boxes,\n new_labels,\n new_label_scores,\n new_masks,\n new_keypoints,\n aspect_ratio=aspect_ratio,\n seed=seed)\n\n return result\n\n\ndef ssd_random_crop_pad_fixed_aspect_ratio(\n image,\n boxes,\n labels,\n label_scores=None,\n masks=None,\n keypoints=None,\n min_object_covered=(0.0, 0.1, 0.3, 0.5, 0.7, 0.9, 1.0),\n aspect_ratio=1.0,\n aspect_ratio_range=((0.5, 2.0),) * 7,\n area_range=((0.1, 1.0),) * 7,\n overlap_thresh=(0.0, 0.1, 0.3, 0.5, 0.7, 0.9, 1.0),\n random_coef=(0.15,) * 7,\n min_padded_size_ratio=(1.0, 1.0),\n max_padded_size_ratio=(2.0, 2.0),\n seed=None):\n \"\"\"Random crop and pad preprocessing with default parameters as in SSD paper.\n\n Liu et al., SSD: Single shot multibox detector.\n For further information on random crop preprocessing refer to RandomCrop\n function above.\n\n The only difference is that after the initial crop, images are zero-padded\n to a fixed aspect ratio instead of being resized to that aspect ratio.\n\n Args:\n image: rank 3 float32 tensor contains 1 image -> [height, width, channels]\n with pixel values varying between [0, 1].\n boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4].\n Boxes are in normalized form meaning their coordinates vary\n between [0, 1].\n Each row is in the form of [ymin, xmin, ymax, xmax].\n labels: rank 1 int32 tensor containing the object classes.\n label_scores: (optional) float32 tensor of shape [num_instances]\n representing the score for each box.\n masks: (optional) rank 3 float32 tensor with shape\n [num_instances, height, width] containing instance masks. The masks\n are of the same height, width as the input `image`.\n keypoints: (optional) rank 3 float32 tensor with shape\n [num_instances, num_keypoints, 2]. The keypoints are in y-x\n normalized coordinates.\n min_object_covered: the cropped image must cover at least this fraction of\n at least one of the input bounding boxes.\n aspect_ratio: the final aspect ratio to pad to.\n aspect_ratio_range: allowed range for aspect ratio of cropped image.\n area_range: allowed range for area ratio between cropped image and the\n original image.\n overlap_thresh: minimum overlap thresh with new cropped\n image to keep the box.\n random_coef: a random coefficient that defines the chance of getting the\n original image. If random_coef is 0, we will always get the\n cropped image, and if it is 1.0, we will always get the\n original image.\n min_padded_size_ratio: min ratio of padded image height and width to the\n input image's height and width.\n max_padded_size_ratio: max ratio of padded image height and width to the\n input image's height and width.\n seed: random seed.\n\n Returns:\n image: image which is the same rank as input image.\n boxes: boxes which is the same rank as input boxes.\n Boxes are in normalized form.\n labels: new labels.\n\n If masks or keypoints is not None, the function also returns:\n\n masks: rank 3 float32 tensor with shape [num_instances, height, width]\n containing instance masks.\n keypoints: rank 3 float32 tensor with shape\n [num_instances, num_keypoints, 2]\n \"\"\"\n crop_result = ssd_random_crop(\n image, boxes, labels, label_scores, masks, keypoints, min_object_covered,\n aspect_ratio_range, area_range, overlap_thresh, random_coef, seed)\n i = 3\n new_image, new_boxes, new_labels = crop_result[:i]\n new_label_scores = None\n new_masks = None\n new_keypoints = None\n if label_scores is not None:\n new_label_scores = crop_result[i]\n i += 1\n if masks is not None:\n new_masks = crop_result[i]\n i += 1\n if keypoints is not None:\n new_keypoints = crop_result[i]\n result = random_pad_to_aspect_ratio(\n new_image,\n new_boxes,\n new_masks,\n new_keypoints,\n aspect_ratio=aspect_ratio,\n min_padded_size_ratio=min_padded_size_ratio,\n max_padded_size_ratio=max_padded_size_ratio,\n seed=seed)\n\n result = list(result)\n if new_label_scores is not None:\n result.insert(2, new_label_scores)\n result.insert(2, new_labels)\n result = tuple(result)\n\n return result\n\n\ndef get_default_func_arg_map(include_label_scores=False,\n include_instance_masks=False,\n include_keypoints=False):\n \"\"\"Returns the default mapping from a preprocessor function to its args.\n\n Args:\n include_label_scores: If True, preprocessing functions will modify the\n label scores, too.\n include_instance_masks: If True, preprocessing functions will modify the\n instance masks, too.\n include_keypoints: If True, preprocessing functions will modify the\n keypoints, too.\n\n Returns:\n A map from preprocessing functions to the arguments they receive.\n \"\"\"\n groundtruth_label_scores = None\n if include_label_scores:\n groundtruth_label_scores = (fields.InputDataFields.groundtruth_label_scores)\n\n groundtruth_instance_masks = None\n if include_instance_masks:\n groundtruth_instance_masks = (\n fields.InputDataFields.groundtruth_instance_masks)\n\n groundtruth_keypoints = None\n if include_keypoints:\n groundtruth_keypoints = fields.InputDataFields.groundtruth_keypoints\n\n prep_func_arg_map = {\n normalize_image: (fields.InputDataFields.image,),\n random_horizontal_flip: (\n fields.InputDataFields.image,\n fields.InputDataFields.groundtruth_boxes,\n groundtruth_instance_masks,\n groundtruth_keypoints,),\n random_vertical_flip: (\n fields.InputDataFields.image,\n fields.InputDataFields.groundtruth_boxes,\n groundtruth_instance_masks,\n groundtruth_keypoints,),\n random_rotation90: (\n fields.InputDataFields.image,\n fields.InputDataFields.groundtruth_boxes,\n groundtruth_instance_masks,\n groundtruth_keypoints,),\n random_pixel_value_scale: (fields.InputDataFields.image,),\n random_image_scale: (\n fields.InputDataFields.image,\n groundtruth_instance_masks,),\n random_rgb_to_gray: (fields.InputDataFields.image,),\n random_adjust_brightness: (fields.InputDataFields.image,),\n random_adjust_contrast: (fields.InputDataFields.image,),\n random_adjust_hue: (fields.InputDataFields.image,),\n random_adjust_saturation: (fields.InputDataFields.image,),\n random_distort_color: (fields.InputDataFields.image,),\n random_jitter_boxes: (fields.InputDataFields.groundtruth_boxes,),\n random_crop_image: (\n fields.InputDataFields.image,\n fields.InputDataFields.groundtruth_boxes,\n fields.InputDataFields.groundtruth_classes,\n groundtruth_label_scores,\n groundtruth_instance_masks,\n groundtruth_keypoints,),\n random_pad_image: (fields.InputDataFields.image,\n fields.InputDataFields.groundtruth_boxes),\n random_crop_pad_image: (fields.InputDataFields.image,\n fields.InputDataFields.groundtruth_boxes,\n fields.InputDataFields.groundtruth_classes,\n groundtruth_label_scores),\n random_crop_to_aspect_ratio: (\n fields.InputDataFields.image,\n fields.InputDataFields.groundtruth_boxes,\n fields.InputDataFields.groundtruth_classes,\n groundtruth_label_scores,\n groundtruth_instance_masks,\n groundtruth_keypoints,),\n random_pad_to_aspect_ratio: (\n fields.InputDataFields.image,\n fields.InputDataFields.groundtruth_boxes,\n groundtruth_instance_masks,\n groundtruth_keypoints,),\n random_black_patches: (fields.InputDataFields.image,),\n retain_boxes_above_threshold: (\n fields.InputDataFields.groundtruth_boxes,\n fields.InputDataFields.groundtruth_classes,\n groundtruth_label_scores,\n groundtruth_instance_masks,\n groundtruth_keypoints,),\n image_to_float: (fields.InputDataFields.image,),\n random_resize_method: (fields.InputDataFields.image,),\n resize_to_range: (\n fields.InputDataFields.image,\n groundtruth_instance_masks,),\n resize_to_min_dimension: (\n fields.InputDataFields.image,\n groundtruth_instance_masks,),\n scale_boxes_to_pixel_coordinates: (\n fields.InputDataFields.image,\n fields.InputDataFields.groundtruth_boxes,\n groundtruth_keypoints,),\n resize_image: (\n fields.InputDataFields.image,\n groundtruth_instance_masks,),\n subtract_channel_mean: (fields.InputDataFields.image,),\n one_hot_encoding: (fields.InputDataFields.groundtruth_image_classes,),\n rgb_to_gray: (fields.InputDataFields.image,),\n ssd_random_crop: (\n fields.InputDataFields.image,\n fields.InputDataFields.groundtruth_boxes,\n fields.InputDataFields.groundtruth_classes,\n groundtruth_label_scores,\n groundtruth_instance_masks,\n groundtruth_keypoints,),\n ssd_random_crop_pad: (fields.InputDataFields.image,\n fields.InputDataFields.groundtruth_boxes,\n fields.InputDataFields.groundtruth_classes,\n groundtruth_label_scores),\n ssd_random_crop_fixed_aspect_ratio: (\n fields.InputDataFields.image,\n fields.InputDataFields.groundtruth_boxes,\n fields.InputDataFields.groundtruth_classes,\n groundtruth_label_scores,\n groundtruth_instance_masks,\n groundtruth_keypoints,),\n ssd_random_crop_pad_fixed_aspect_ratio: (\n fields.InputDataFields.image,\n fields.InputDataFields.groundtruth_boxes,\n fields.InputDataFields.groundtruth_classes,\n groundtruth_label_scores,\n groundtruth_instance_masks,\n groundtruth_keypoints,),\n }\n\n return prep_func_arg_map\n\n\ndef preprocess(tensor_dict, preprocess_options, func_arg_map=None):\n \"\"\"Preprocess images and bounding boxes.\n\n Various types of preprocessing (to be implemented) based on the\n preprocess_options dictionary e.g. \"crop image\" (affects image and possibly\n boxes), \"white balance image\" (affects only image), etc. If self._options\n is None, no preprocessing is done.\n\n Args:\n tensor_dict: dictionary that contains images, boxes, and can contain other\n things as well.\n images-> rank 4 float32 tensor contains\n 1 image -> [1, height, width, 3].\n with pixel values varying between [0, 1]\n boxes-> rank 2 float32 tensor containing\n the bounding boxes -> [N, 4].\n Boxes are in normalized form meaning\n their coordinates vary between [0, 1].\n Each row is in the form\n of [ymin, xmin, ymax, xmax].\n preprocess_options: It is a list of tuples, where each tuple contains a\n function and a dictionary that contains arguments and\n their values.\n func_arg_map: mapping from preprocessing functions to arguments that they\n expect to receive and return.\n\n Returns:\n tensor_dict: which contains the preprocessed images, bounding boxes, etc.\n\n Raises:\n ValueError: (a) If the functions passed to Preprocess\n are not in func_arg_map.\n (b) If the arguments that a function needs\n do not exist in tensor_dict.\n (c) If image in tensor_dict is not rank 4\n \"\"\"\n if func_arg_map is None:\n func_arg_map = get_default_func_arg_map()\n\n # changes the images to image (rank 4 to rank 3) since the functions\n # receive rank 3 tensor for image\n if fields.InputDataFields.image in tensor_dict:\n images = tensor_dict[fields.InputDataFields.image]\n if len(images.get_shape()) != 4:\n raise ValueError('images in tensor_dict should be rank 4')\n image = tf.squeeze(images, squeeze_dims=[0])\n tensor_dict[fields.InputDataFields.image] = image\n\n # Preprocess inputs based on preprocess_options\n for option in preprocess_options:\n func, params = option\n if func not in func_arg_map:\n raise ValueError('The function %s does not exist in func_arg_map' %\n (func.__name__))\n arg_names = func_arg_map[func]\n for a in arg_names:\n if a is not None and a not in tensor_dict:\n raise ValueError('The function %s requires argument %s' %\n (func.__name__, a))\n\n def get_arg(key):\n return tensor_dict[key] if key is not None else None\n\n args = [get_arg(a) for a in arg_names]\n results = func(*args, **params)\n if not isinstance(results, (list, tuple)):\n results = (results,)\n # Removes None args since the return values will not contain those.\n arg_names = [arg_name for arg_name in arg_names if arg_name is not None]\n for res, arg_name in zip(results, arg_names):\n tensor_dict[arg_name] = res\n\n # changes the image to images (rank 3 to rank 4) to be compatible to what\n # we received in the first place\n if fields.InputDataFields.image in tensor_dict:\n image = tensor_dict[fields.InputDataFields.image]\n images = tf.expand_dims(image, 0)\n tensor_dict[fields.InputDataFields.image] = images\n\n return tensor_dict\n"
] | [
[
"tensorflow.convert_to_tensor",
"tensorflow.cond",
"tensorflow.is_nan",
"tensorflow.concat",
"tensorflow.image.random_contrast",
"tensorflow.stack",
"tensorflow.minimum",
"tensorflow.image.pad_to_bounding_box",
"numpy.random.random_sample",
"tensorflow.equal",
"tensorflow.image.random_saturation",
"tensorflow.image.draw_bounding_boxes",
"tensorflow.image.rot90",
"tensorflow.to_int32",
"tensorflow.image.sample_distorted_bounding_box",
"tensorflow.image.random_hue",
"tensorflow.greater",
"tensorflow.squeeze",
"tensorflow.subtract",
"tensorflow.image.rgb_to_grayscale",
"tensorflow.gather",
"tensorflow.add",
"tensorflow.to_float",
"tensorflow.name_scope",
"tensorflow.image.grayscale_to_rgb",
"tensorflow.image.random_brightness",
"tensorflow.image.resize_nearest_neighbor",
"tensorflow.shape",
"tensorflow.image.resize_images",
"tensorflow.one_hot",
"tensorflow.split",
"tensorflow.round",
"tensorflow.clip_by_value",
"tensorflow.reduce_max",
"tensorflow.multiply",
"tensorflow.transpose",
"tensorflow.constant",
"tensorflow.image.crop_to_bounding_box",
"tensorflow.slice",
"tensorflow.maximum",
"tensorflow.reshape",
"tensorflow.expand_dims",
"tensorflow.image.flip_left_right",
"tensorflow.image.flip_up_down",
"tensorflow.ones",
"tensorflow.python.ops.control_flow_ops.merge",
"tensorflow.image.convert_image_dtype",
"tensorflow.random_uniform"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
metee1996/ds-example-project | [
"8d43b8786711a69779adb7fd6a830fe63fe30909"
] | [
"src/python/project/model.py"
] | [
"import numpy as np\n\ndef power(x):\n return np.power(x, 2)\n"
] | [
[
"numpy.power"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ohtu-projekti-dataproblemsemulator/dataproblemsemulator | [
"b24eac686fae4147264c1ccc8169fd96b1875577"
] | [
"examples/run_time_series_prediction_example.py"
] | [
"# MIT License\n#\n# Copyright (c) 2019 Tuomas Halvari, Juha Harviainen, Juha Mylläri, Antti Röyskö, Juuso Silvennoinen\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nimport random as rn\nimport sys\nfrom math import sqrt\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport tensorflow as tf\nfrom keras import backend\nfrom keras.layers import Dense\nfrom keras.layers import LSTM\nfrom keras.models import Sequential\nfrom sklearn.metrics import mean_squared_error\nfrom sklearn.preprocessing import MinMaxScaler\n\nfrom dpemu import pg_utils\nfrom dpemu import runner\nfrom dpemu.filters.time_series import Gap\nfrom dpemu.nodes import Array\nfrom dpemu.plotting_utils import print_results_by_model, visualize_scores, visualize_time_series_prediction\n\n\ndef get_data(argv):\n dataset_name = argv[1]\n n_data = int(argv[2])\n if dataset_name == \"passengers\":\n data = pd.read_csv(\"data/passengers.csv\", header=0, usecols=[\"passengers\"])[:n_data].values.astype(float)\n n_period = 12\n else:\n data = pd.read_csv(\"data/temperature.csv\", header=0, usecols=[dataset_name])[:n_data].values.astype(float)\n n_period = 24\n\n data = data[~np.isnan(data)]\n n_data = len(data)\n n_test = int(n_data * .2)\n return data[:-n_test], data[-n_test:], n_data, n_period, dataset_name\n\n\ndef get_err_root_node():\n err_root_node = Array()\n # err_root_node.addfilter(GaussianNoise(\"mean\", \"std\"))\n # err_root_node.addfilter(SensorDrift(\"magnitude\"))\n err_root_node.addfilter(Gap(\"prob_break\", \"prob_recover\", \"missing_value\"))\n return err_root_node\n\n\ndef get_err_params_list():\n # err_params_list = [{\"mean\": 0, \"std\": std} for std in np.linspace(0, 35, 8)]\n # err_params_list = [{\"magnitude\": m} for m in range(8)]\n err_params_list = [{\"prob_break\": p, \"prob_recover\": .5, \"missing_value\": np.nan} for p in np.linspace(0, .14, 8)]\n return err_params_list\n\n\nclass Preprocessor:\n def run(self, train_data, test_data, params):\n return train_data, test_data, {}\n\n\nclass LSTMModel:\n\n def __init__(self):\n seed = 42\n rn.seed(seed)\n np.random.seed(seed)\n tf.set_random_seed(seed)\n conf = tf.ConfigProto(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1)\n session = tf.Session(graph=tf.get_default_graph(), config=conf)\n backend.set_session(session)\n\n @staticmethod\n def __get_periodic_diffs(data, n_period):\n return np.array([data[i] - data[i - n_period] for i in range(n_period, len(data))])\n\n @staticmethod\n def __get_rmse(test_pred, test):\n return sqrt(mean_squared_error(test_pred, test))\n\n def run(self, train_data, _, params):\n n_period = params[\"n_period\"]\n clean_test = params[\"clean_test\"]\n n_test = clean_test.shape[0]\n train_data = train_data[~np.isnan(train_data)]\n train_data = np.reshape(train_data, (len(train_data), 1))\n\n n_features = 1\n n_steps = 3 * n_period\n n_nodes = 100\n n_epochs = 200\n\n scaler = MinMaxScaler(feature_range=(0, 1))\n scaled_train = scaler.fit_transform(train_data)\n train_periodic_diffs = self.__get_periodic_diffs(scaled_train, n_period)\n train_periodic_diffs = pg_utils.to_time_series_x_y(train_periodic_diffs, n_steps)\n\n model = Sequential()\n model.add(LSTM(n_nodes, activation=\"relu\", input_shape=(n_steps, n_features)))\n model.add(Dense(n_nodes, activation=\"relu\"))\n model.add(Dense(1))\n model.compile(loss=\"mse\", optimizer=\"adam\")\n model.fit(train_periodic_diffs[0], train_periodic_diffs[1], epochs=n_epochs)\n\n train_with_test_pred = scaled_train\n for _ in range(n_test):\n x_cur = self.__get_periodic_diffs(train_with_test_pred, n_period)[-n_steps:]\n x_cur = np.reshape(x_cur, (1, n_steps, n_features))\n y_cur = model.predict(x_cur) + train_with_test_pred[-n_period]\n train_with_test_pred = np.concatenate([train_with_test_pred, y_cur], axis=0)\n train_with_test_pred = scaler.inverse_transform(train_with_test_pred)\n\n test_pred = train_with_test_pred[-n_test:]\n rmse = self.__get_rmse(test_pred, clean_test)\n return {\n \"rmse\": rmse,\n \"err_train\": train_with_test_pred[:-n_test],\n \"test_pred\": test_pred\n }\n\n\ndef get_model_params_dict_list(test_data, n_period):\n return [{\"model\": LSTMModel, \"params_list\": [{\"clean_test\": test_data, \"n_period\": n_period}]}]\n\n\ndef visualize(df, data, n_data, dataset_name):\n visualize_scores(\n df,\n score_names=[\"rmse\"],\n is_higher_score_better=[False],\n # err_param_name=\"std\",\n # err_param_name=\"magnitude\",\n err_param_name=\"prob_break\",\n title=f\"Prediction scores for {dataset_name} dataset (n={n_data}) with added error\"\n )\n visualize_time_series_prediction(\n df,\n data,\n score_name=\"rmse\",\n is_higher_score_better=False,\n # err_param_name=\"std\",\n # err_param_name=\"magnitude\",\n err_param_name=\"prob_break\",\n model_name=\"LSTM\",\n err_train_column=\"err_train\",\n test_pred_column=\"test_pred\",\n title=f\"Predictions for {dataset_name} dataset (n={n_data}) with added error\"\n )\n plt.show()\n\n\ndef main(argv):\n if len(argv) != 3 or argv[1] not in [\"passengers\", \"Jerusalem\", \"Eilat\", \"Miami\", \"Tel Aviv District\"]:\n exit(0)\n\n train_data, test_data, n_data, n_period, dataset_name = get_data(argv)\n\n df = runner.run(\n train_data=train_data,\n test_data=test_data,\n preproc=Preprocessor,\n preproc_params={},\n err_root_node=get_err_root_node(),\n err_params_list=get_err_params_list(),\n model_params_dict_list=get_model_params_dict_list(test_data, n_period),\n )\n\n print_results_by_model(df, dropped_columns=[\"err_train\", \"test_pred\", \"clean_test\", \"n_period\"])\n visualize(df, np.concatenate([train_data, test_data], axis=0), n_data, dataset_name)\n\n\nif __name__ == \"__main__\":\n main(sys.argv)\n"
] | [
[
"pandas.read_csv",
"numpy.random.seed",
"numpy.linspace",
"numpy.reshape",
"numpy.isnan",
"sklearn.metrics.mean_squared_error",
"tensorflow.ConfigProto",
"numpy.concatenate",
"tensorflow.set_random_seed",
"tensorflow.get_default_graph",
"matplotlib.pyplot.show",
"sklearn.preprocessing.MinMaxScaler"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
}
] |
DipeshAggarwal/wgan-gp-keras | [
"7a70192cdd26726ee981107299a7fa6e21cbe84b"
] | [
"train.py"
] | [
"from core.loss import d_wasserstein_loss\nfrom core.loss import g_wasserstein_loss\nfrom core.nn.conv.wgan import generator\nfrom core.nn.conv.wgan import critic\nfrom core.callbacks import GANMonitor\nfrom core.model import WGAN_GP\n\nimport tensorflow as tf\nimport numpy as np\nimport config\n\ntrain_images = tf.keras.utils.image_dataset_from_directory(\n \"dataset/images/\", label_mode=None, image_size=(config.IMAGE_WIDTH, config.IMAGE_HEIGHT), batch_size=config.BATCH_SIZE\n)\ntrain_images = train_images.map(lambda x: (x - 127.5) / 127.5)\n\ngenerator = generator(config.LATENT_DIM, tf.keras.initializers.RandomNormal(mean=0.0, stddev=0.02), channels=config.CHANNELS)\ncritic = critic(config.IMAGE_HEIGHT, config.IMAGE_WIDTH, config.CHANNELS)\n\nwgan = WGAN_GP(critic=critic, generator=generator, latent_dim=config.LATENT_DIM, critic_extra_steps=config.EXTRA_STEPS)\n\nd_opt = tf.keras.optimizers.Adam(learning_rate=config.LR, beta_1=0.5, beta_2=0.9)\ng_opt = tf.keras.optimizers.Adam(learning_rate=config.LR, beta_1=0.5, beta_2=0.9)\n\nwgan.compile(\n d_optimiser=d_opt,\n g_optimiser=g_opt,\n d_loss_fn=d_wasserstein_loss,\n g_loss_fn=g_wasserstein_loss,\n)\n\ncallback = [GANMonitor(num_images=16, latent_dim=config.LATENT_DIM)]\nwgan.fit(train_images, epochs=config.EPOCHS, callbacks=callback)\n"
] | [
[
"tensorflow.keras.utils.image_dataset_from_directory",
"tensorflow.keras.initializers.RandomNormal",
"tensorflow.keras.optimizers.Adam"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.6",
"2.7"
]
}
] |
dylanbuchi/MONAI | [
"1651f1b003b0ffae8b615d191952ad65ad091277",
"1651f1b003b0ffae8b615d191952ad65ad091277",
"1651f1b003b0ffae8b615d191952ad65ad091277",
"1651f1b003b0ffae8b615d191952ad65ad091277",
"1651f1b003b0ffae8b615d191952ad65ad091277",
"1651f1b003b0ffae8b615d191952ad65ad091277",
"1651f1b003b0ffae8b615d191952ad65ad091277",
"1651f1b003b0ffae8b615d191952ad65ad091277",
"1651f1b003b0ffae8b615d191952ad65ad091277"
] | [
"tests/test_write_metrics_reports.py",
"tests/test_rand_scale_cropd.py",
"tests/test_save_image.py",
"tests/test_bilateral_approx_cpu.py",
"tests/test_map_label_value.py",
"tests/test_nifti_rw.py",
"monai/optimizers/lr_finder.py",
"tests/test_rand_k_space_spike_noise.py",
"monai/losses/tversky.py"
] | [
"# Copyright 2020 - 2021 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport csv\nimport os\nimport tempfile\nimport unittest\n\nimport torch\n\nfrom monai.handlers.utils import write_metrics_reports\n\n\nclass TestWriteMetricsReports(unittest.TestCase):\n def test_content(self):\n with tempfile.TemporaryDirectory() as tempdir:\n write_metrics_reports(\n save_dir=tempdir,\n images=[\"filepath1\", \"filepath2\"],\n metrics={\"metric1\": 1, \"metric2\": 2},\n metric_details={\"metric3\": torch.tensor([[1, 2], [2, 3]]), \"metric4\": torch.tensor([[5, 6], [7, 8]])},\n summary_ops=[\"mean\", \"median\", \"max\", \"90percentile\"],\n deli=\"\\t\",\n output_type=\"csv\",\n )\n\n # check the metrics.csv and content\n self.assertTrue(os.path.exists(os.path.join(tempdir, \"metrics.csv\")))\n with open(os.path.join(tempdir, \"metrics.csv\")) as f:\n f_csv = csv.reader(f)\n for i, row in enumerate(f_csv):\n self.assertEqual(row, [f\"metric{i + 1}\\t{i + 1}\"])\n self.assertTrue(os.path.exists(os.path.join(tempdir, \"metric3_raw.csv\")))\n # check the metric_raw.csv and content\n with open(os.path.join(tempdir, \"metric3_raw.csv\")) as f:\n f_csv = csv.reader(f)\n for i, row in enumerate(f_csv):\n if i > 0:\n self.assertEqual(row, [f\"filepath{i}\\t{float(i)}\\t{float(i + 1)}\\t{i + 0.5}\"])\n self.assertTrue(os.path.exists(os.path.join(tempdir, \"metric3_summary.csv\")))\n # check the metric_summary.csv and content\n with open(os.path.join(tempdir, \"metric3_summary.csv\")) as f:\n f_csv = csv.reader(f)\n for i, row in enumerate(f_csv):\n if i == 1:\n self.assertEqual(row, [\"class0\\t1.5000\\t1.5000\\t2.0000\\t1.9000\"])\n elif i == 2:\n self.assertEqual(row, [\"class1\\t2.5000\\t2.5000\\t3.0000\\t2.9000\"])\n elif i == 3:\n self.assertEqual(row, [\"mean\\t2.0000\\t2.0000\\t2.5000\\t2.4000\"])\n self.assertTrue(os.path.exists(os.path.join(tempdir, \"metric4_raw.csv\")))\n self.assertTrue(os.path.exists(os.path.join(tempdir, \"metric4_summary.csv\")))\n\n\nif __name__ == \"__main__\":\n unittest.main()\n",
"# Copyright 2020 - 2021 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport unittest\n\nimport numpy as np\nfrom parameterized import parameterized\n\nfrom monai.transforms import RandScaleCropd\nfrom tests.utils import TEST_NDARRAYS, assert_allclose\n\nTEST_CASE_1 = [\n {\"keys\": \"img\", \"roi_scale\": [1.0, 1.0, -1.0], \"random_center\": True},\n {\"img\": np.random.randint(0, 2, size=[3, 3, 3, 4])},\n (3, 3, 3, 4),\n]\n\nTEST_CASE_2 = [\n {\"keys\": \"img\", \"roi_scale\": [1.0, 1.0, 1.0], \"random_center\": False},\n {\"img\": np.random.randint(0, 2, size=[3, 3, 3, 3])},\n (3, 3, 3, 3),\n]\n\nTEST_CASE_3 = [\n {\"keys\": \"img\", \"roi_scale\": [0.6, 0.6], \"random_center\": False},\n {\"img\": np.array([[[0, 0, 0, 0, 0], [0, 1, 2, 1, 0], [0, 2, 3, 2, 0], [0, 1, 2, 1, 0], [0, 0, 0, 0, 0]]])},\n]\n\nTEST_CASE_4 = [\n {\n \"keys\": \"img\",\n \"roi_scale\": [0.75, 0.6, 0.5],\n \"max_roi_scale\": [1.0, -1.0, 0.6],\n \"random_center\": True,\n \"random_size\": True,\n },\n {\"img\": np.random.randint(0, 2, size=[1, 4, 5, 6])},\n (1, 3, 4, 3),\n]\n\nTEST_CASE_5 = [\n {\"keys\": \"img\", \"roi_scale\": 0.6, \"max_roi_scale\": 0.8, \"random_center\": True, \"random_size\": True},\n {\"img\": np.random.randint(0, 2, size=[1, 4, 5, 6])},\n (1, 3, 4, 4),\n]\n\nTEST_CASE_6 = [\n {\"keys\": \"img\", \"roi_scale\": 0.2, \"max_roi_scale\": 0.8, \"random_center\": True, \"random_size\": True},\n {\"img\": np.random.randint(0, 2, size=[1, 4, 5, 6])},\n (1, 3, 2, 4),\n]\n\n\nclass TestRandScaleCropd(unittest.TestCase):\n @parameterized.expand([TEST_CASE_1, TEST_CASE_2])\n def test_shape(self, input_param, input_data, expected_shape):\n result = RandScaleCropd(**input_param)(input_data)\n self.assertTupleEqual(result[\"img\"].shape, expected_shape)\n\n @parameterized.expand([TEST_CASE_3])\n def test_value(self, input_param, input_data):\n for p in TEST_NDARRAYS:\n cropper = RandScaleCropd(**input_param)\n input_data[\"img\"] = p(input_data[\"img\"])\n result = cropper(input_data)\n roi = [(2 - i // 2, 2 + i - i // 2) for i in cropper._size]\n assert_allclose(\n result[\"img\"], input_data[\"img\"][:, roi[0][0] : roi[0][1], roi[1][0] : roi[1][1]], type_test=False\n )\n\n @parameterized.expand([TEST_CASE_4, TEST_CASE_5, TEST_CASE_6])\n def test_random_shape(self, input_param, input_data, expected_shape):\n cropper = RandScaleCropd(**input_param)\n cropper.set_random_state(seed=123)\n result = cropper(input_data)\n self.assertTupleEqual(result[\"img\"].shape, expected_shape)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n",
"# Copyright 2020 - 2021 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport tempfile\nimport unittest\n\nimport torch\nfrom parameterized import parameterized\n\nfrom monai.transforms import SaveImage\n\nTEST_CASE_1 = [\n torch.randint(0, 255, (1, 2, 3, 4)),\n {\"filename_or_obj\": \"testfile0.nii.gz\"},\n \".nii.gz\",\n False,\n]\n\nTEST_CASE_2 = [\n torch.randint(0, 255, (1, 2, 3, 4)),\n None,\n \".nii.gz\",\n False,\n]\n\n\nclass TestSaveImage(unittest.TestCase):\n @parameterized.expand([TEST_CASE_1, TEST_CASE_2])\n def test_saved_content(self, test_data, meta_data, output_ext, resample):\n with tempfile.TemporaryDirectory() as tempdir:\n trans = SaveImage(\n output_dir=tempdir,\n output_ext=output_ext,\n resample=resample,\n # test saving into the same folder\n separate_folder=False,\n )\n trans(test_data, meta_data)\n\n filepath = \"testfile0\" if meta_data is not None else \"0\"\n self.assertTrue(os.path.exists(os.path.join(tempdir, filepath + \"_trans\" + output_ext)))\n\n\nif __name__ == \"__main__\":\n unittest.main()\n",
"# Copyright 2020 - 2021 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport unittest\n\nimport numpy as np\nimport torch\nfrom parameterized import parameterized\nfrom torch.autograd import gradcheck\n\nfrom monai.networks.layers.filtering import BilateralFilter\nfrom tests.utils import skip_if_no_cpp_extension\n\nTEST_CASES = [\n [\n # Case Description\n \"1 dimension, 1 channel, low spatial sigma, low color sigma\",\n # Spatial and Color Sigmas\n (1, 0.2),\n # Input\n [\n # Batch 0\n [\n # Channel 0\n [1, 0, 0, 0, 1]\n ],\n # Batch 1\n [\n # Channel 0\n [0, 0, 1, 0, 0]\n ],\n ],\n # Expected\n [\n # Batch 0\n [\n # Channel 0\n [1.000000, 0.000000, 0.000000, 0.000000, 1.000000]\n ],\n # Batch 1\n [\n # Channel 0\n [0.000000, 0.000000, 1.000000, 0.000000, 0.000000]\n ],\n ],\n ],\n [\n # Case Description\n \"1 dimension, 1 channel, low spatial sigma, high color sigma\",\n # Spatial and Color Sigmas\n (1, 0.9),\n # Input\n [\n # Batch 0\n [\n # Channel 0\n [1, 0, 0, 0, 1]\n ],\n # Batch 1\n [\n # Channel 0\n [0, 0, 1, 0, 0]\n ],\n ],\n # Expected\n [\n # Batch 0\n [\n # Channel 0\n [0.631360, 0.099349, 0.070177, 0.164534, 0.649869]\n ],\n # Batch 1\n [\n # Channel 0\n [0.052271, 0.173599, 0.481337, 0.183721, 0.045619]\n ],\n ],\n ],\n [\n # Case Description\n \"1 dimension, 1 channel, high spatial sigma, low color sigma\",\n # Spatial and Color Sigmas\n (4, 0.2),\n # Input\n [\n # Batch 0\n [\n # Channel 0\n [1, 0, 0, 0, 1]\n ],\n # Batch 1\n [\n # Channel 0\n [0, 0, 1, 0, 0]\n ],\n ],\n # Expected\n [\n # Batch 0\n [\n # Channel 0\n [1.000000, 0.000000, 0.000000, 0.000000, 1.000000]\n ],\n # Batch 1\n [\n # Channel 0\n [0.000000, 0.000000, 1.000000, 0.000000, 0.000000]\n ],\n ],\n ],\n [\n # Case Description\n \"1 dimension, 1 channel, high spatial sigma, high color sigma\",\n # Sigmas\n (4, 0.9),\n # Input\n [\n # Batch 0\n [\n # Channel 0\n [1, 0, 0, 0, 1]\n ],\n # Batch 1\n [\n # Channel 0\n [0, 0, 1, 0, 0]\n ],\n ],\n # Expected\n [\n # Batch 0\n [\n # Channel 0\n [0.497667, 0.268683, 0.265026, 0.261467, 0.495981]\n ],\n # Batch 1\n [\n # Channel 0\n [0.145959, 0.142282, 0.315710, 0.135609, 0.132572]\n ],\n ],\n ],\n [\n # Case Description\n \"1 dimension, 4 channel, low spatial sigma, high color sigma\",\n # Spatial and Color Sigmas\n (1, 0.9),\n # Input\n [\n # Batch 0\n [\n # Channel 0\n [1, 0, 0, 0, 0],\n # Channel 1\n [1, 0, 1, 0, 0],\n # Channel 2\n [0, 0, 1, 0, 1],\n # Channel 3\n [0, 0, 0, 0, 1],\n ]\n ],\n # Expected\n [\n # Batch 0\n [\n # Channel 0\n [0.960843, 0.073540, 0.027689, 0.002676, 0.000000],\n # Channel 1\n [0.960843, 0.073540, 0.951248, 0.003033, 0.000750],\n # Channel 2\n [0.000000, 0.000000, 0.923559, 0.000357, 0.981324],\n # Channel 3\n [0.000000, 0.000000, 0.000000, 0.000000, 0.980574],\n ]\n ],\n ],\n [\n # Case Description\n \"2 dimension, 1 channel, high spatial sigma, high color sigma\",\n # Sigmas\n (4, 0.9),\n # Input\n [\n # Batch 0\n [\n # Channel 0\n [[1, 0, 0, 0, 1], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [1, 0, 0, 0, 1]]\n ],\n # Batch 1\n [\n # Channel 0\n [[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 1, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]]\n ],\n ],\n # Expected\n [\n # Batch 0\n [\n # Channel 0\n [\n [0.213684, 0.094356, 0.092973, 0.091650, 0.216281],\n [0.094085, 0.092654, 0.091395, 0.090186, 0.089302],\n [0.092436, 0.091150, 0.090008, 0.088896, 0.088897],\n [0.090849, 0.089717, 0.088759, 0.087751, 0.088501],\n [0.211458, 0.088334, 0.087495, 0.087049, 0.212173],\n ]\n ],\n # Batch 1\n [\n # Channel 0\n [\n [0.033341, 0.031314, 0.029367, 0.027494, 0.025692],\n [0.031869, 0.030632, 0.028820, 0.027074, 0.025454],\n [0.030455, 0.029628, 0.084257, 0.026704, 0.025372],\n [0.029095, 0.028391, 0.027790, 0.026375, 0.025292],\n [0.027786, 0.027197, 0.026692, 0.026181, 0.025213],\n ]\n ],\n ],\n ],\n [\n # Case Description\n \"2 dimension, 4 channel, high spatial sigma, high color sigma\",\n # Spatial and Color Sigmas\n (4, 0.9),\n # Input\n [\n # Batch 0\n [\n # Channel 0\n [[1, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 1]],\n # Channel 1\n [[1, 0, 1, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 1, 0, 1]],\n # Channel 2\n [[0, 0, 1, 0, 1], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [1, 0, 1, 0, 0]],\n # Channel 3\n [[0, 0, 0, 0, 1], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [1, 0, 0, 0, 0]],\n ]\n ],\n # Expected\n [\n # Batch 0\n [\n # Channel 0\n [\n [0.244373, 0.014488, 0.036589, 0.014226, 0.024329],\n [0.014108, 0.014228, 0.014096, 0.013961, 0.013823],\n [0.013574, 0.013757, 0.013836, 0.013699, 0.013558],\n [0.013008, 0.013211, 0.013404, 0.013438, 0.013295],\n [0.025179, 0.012634, 0.034555, 0.013050, 0.237582],\n ],\n # Channel 1\n [\n [0.271496, 0.015547, 0.439432, 0.015700, 0.089579],\n [0.015252, 0.015702, 0.015779, 0.015859, 0.015940],\n [0.015020, 0.015556, 0.015935, 0.016015, 0.016098],\n [0.014774, 0.015331, 0.015860, 0.016171, 0.016255],\n [0.107384, 0.015094, 0.462471, 0.016166, 0.263480],\n ],\n # Channel 2\n [\n [0.027123, 0.003527, 0.467273, 0.004912, 0.645776],\n [0.003810, 0.004908, 0.005605, 0.006319, 0.007050],\n [0.004816, 0.005991, 0.006989, 0.007716, 0.008459],\n [0.005880, 0.007060, 0.008179, 0.009101, 0.009858],\n [0.633398, 0.008191, 0.496893, 0.010376, 0.025898],\n ],\n # Channel 3\n [\n [0.000000, 0.002468, 0.064430, 0.003437, 0.580526],\n [0.002666, 0.003434, 0.003922, 0.004422, 0.004933],\n [0.003370, 0.004192, 0.004890, 0.005399, 0.005919],\n [0.004115, 0.004940, 0.005723, 0.006368, 0.006898],\n [0.551194, 0.005731, 0.068977, 0.007260, 0.000000],\n ],\n ]\n ],\n ],\n [\n # Case Description\n \"3 dimension, 1 channel, high spatial sigma, high color sigma\",\n # Sigmas\n (4, 0.9),\n # Input\n [\n # Batch 0\n [\n # Channel 0\n [\n # Frame 0\n [[1, 0, 0, 0, 1], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [1, 0, 0, 0, 1]],\n # Frame 1\n [[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]],\n # Frame 2\n [[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]],\n # Frame 3\n [[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]],\n # Frame 4\n [[1, 0, 0, 0, 1], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [1, 0, 0, 0, 1]],\n ]\n ],\n ],\n # Expected\n [\n # Batch 0\n [\n # Channel 0\n [\n # Frame 0\n [\n [0.086801, 0.036670, 0.035971, 0.035304, 0.088456],\n [0.036639, 0.035652, 0.035009, 0.034394, 0.033803],\n [0.035899, 0.034897, 0.034136, 0.033566, 0.033129],\n [0.035180, 0.034238, 0.033413, 0.032811, 0.032577],\n [0.088290, 0.033597, 0.032821, 0.032134, 0.088786],\n ],\n # Frame 1\n [\n [0.036286, 0.035269, 0.034632, 0.034021, 0.033435],\n [0.035398, 0.034485, 0.033922, 0.033381, 0.033177],\n [0.034688, 0.033822, 0.033169, 0.032664, 0.032780],\n [0.034024, 0.033234, 0.032533, 0.032005, 0.032388],\n [0.033564, 0.032797, 0.032118, 0.031525, 0.032105],\n ],\n # Frame 2\n [\n [0.035225, 0.034169, 0.033404, 0.032843, 0.032766],\n [0.034383, 0.033487, 0.032908, 0.032415, 0.032650],\n [0.033691, 0.032921, 0.032353, 0.031900, 0.032384],\n [0.033080, 0.032390, 0.031786, 0.031432, 0.032008],\n [0.033099, 0.032373, 0.031737, 0.031479, 0.032054],\n ],\n # Frame 3\n [\n [0.034216, 0.033231, 0.032337, 0.031758, 0.032101],\n [0.033456, 0.032669, 0.031913, 0.031455, 0.032034],\n [0.032788, 0.032140, 0.031618, 0.031413, 0.031977],\n [0.032221, 0.031650, 0.031145, 0.031130, 0.031652],\n [0.032642, 0.031968, 0.031378, 0.031433, 0.032003],\n ],\n # Frame 4\n [\n [0.086207, 0.032335, 0.031499, 0.030832, 0.087498],\n [0.032570, 0.031884, 0.031155, 0.030858, 0.031401],\n [0.031967, 0.031417, 0.030876, 0.030881, 0.031388],\n [0.031602, 0.031103, 0.030696, 0.030960, 0.031455],\n [0.090599, 0.031546, 0.031127, 0.031386, 0.083483],\n ],\n ]\n ]\n ],\n ],\n]\n\n\n@skip_if_no_cpp_extension\nclass BilateralFilterTestCaseCpuApprox(unittest.TestCase):\n @parameterized.expand(TEST_CASES)\n def test_cpu_approx(self, test_case_description, sigmas, input, expected):\n\n # Params to determine the implementation to test\n device = torch.device(\"cpu\")\n fast_approx = True\n\n # Create input tensor and apply filter\n input_tensor = torch.from_numpy(np.array(input)).to(dtype=torch.float, device=device)\n output = BilateralFilter.apply(input_tensor, *sigmas, fast_approx).cpu().numpy()\n\n # Ensure result are as expected\n np.testing.assert_allclose(output, expected, atol=1e-5)\n\n @parameterized.expand(TEST_CASES)\n def test_cpu_approx_backwards(self, test_case_description, sigmas, input, expected):\n\n # Params to determine the implementation to test\n device = torch.device(\"cpu\")\n fast_approx = True\n\n # Prepare input tensor\n input_tensor = torch.from_numpy(np.array(input)).to(dtype=torch.float, device=device)\n input_tensor.requires_grad = True\n\n # Prepare args\n args = (input_tensor, *sigmas, fast_approx)\n\n # Run grad check\n gradcheck(BilateralFilter.apply, args, raise_exception=False)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n",
"# Copyright 2020 - 2021 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport unittest\n\nimport numpy as np\nfrom parameterized import parameterized\n\nfrom monai.transforms import MapLabelValue\n\nTEST_CASE_1 = [\n {\"orig_labels\": [3, 2, 1], \"target_labels\": [0, 1, 2]},\n np.array([[3, 1], [1, 2]]),\n np.array([[0, 2], [2, 1]]),\n]\n\nTEST_CASE_2 = [\n {\"orig_labels\": [3, 5, 8], \"target_labels\": [0, 1, 2]},\n np.array([[[3], [5], [5], [8]]]),\n np.array([[[0], [1], [1], [2]]]),\n]\n\nTEST_CASE_3 = [\n {\"orig_labels\": [1, 2, 3], \"target_labels\": [0, 1, 2]},\n np.array([3, 1, 1, 2]),\n np.array([2, 0, 0, 1]),\n]\n\nTEST_CASE_4 = [\n {\"orig_labels\": [1, 2, 3], \"target_labels\": [0.5, 1.5, 2.5]},\n np.array([3, 1, 1, 2]),\n np.array([2.5, 0.5, 0.5, 1.5]),\n]\n\nTEST_CASE_5 = [\n {\"orig_labels\": [1.5, 2.5, 3.5], \"target_labels\": [0, 1, 2], \"dtype\": np.int8},\n np.array([3.5, 1.5, 1.5, 2.5]),\n np.array([2, 0, 0, 1]),\n]\n\nTEST_CASE_6 = [\n {\"orig_labels\": [\"label3\", \"label2\", \"label1\"], \"target_labels\": [0, 1, 2]},\n np.array([[\"label3\", \"label1\"], [\"label1\", \"label2\"]]),\n np.array([[0, 2], [2, 1]]),\n]\n\nTEST_CASE_7 = [\n {\"orig_labels\": [3.5, 2.5, 1.5], \"target_labels\": [\"label0\", \"label1\", \"label2\"], \"dtype\": \"str\"},\n np.array([[3.5, 1.5], [1.5, 2.5]]),\n np.array([[\"label0\", \"label2\"], [\"label2\", \"label1\"]]),\n]\n\nTEST_CASE_8 = [\n {\"orig_labels\": [\"label3\", \"label2\", \"label1\"], \"target_labels\": [\"label1\", \"label2\", \"label3\"], \"dtype\": \"str\"},\n np.array([[\"label3\", \"label1\"], [\"label1\", \"label2\"]]),\n np.array([[\"label1\", \"label3\"], [\"label3\", \"label2\"]]),\n]\n\n\nclass TestMapLabelValue(unittest.TestCase):\n @parameterized.expand(\n [\n TEST_CASE_1,\n TEST_CASE_2,\n TEST_CASE_3,\n TEST_CASE_4,\n TEST_CASE_5,\n TEST_CASE_6,\n TEST_CASE_7,\n TEST_CASE_8,\n ]\n )\n def test_shape(self, input_param, input_data, expected_value):\n result = MapLabelValue(**input_param)(input_data)\n np.testing.assert_equal(result, expected_value)\n self.assertTupleEqual(result.shape, expected_value.shape)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n",
"# Copyright 2020 - 2021 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport tempfile\nimport unittest\n\nimport nibabel as nib\nimport numpy as np\nfrom parameterized import parameterized\n\nfrom monai.data import write_nifti\nfrom monai.transforms import LoadImage, Orientation, Spacing\nfrom tests.utils import TEST_NDARRAYS, assert_allclose, make_nifti_image\n\nTESTS = []\nfor p in TEST_NDARRAYS:\n for q in TEST_NDARRAYS:\n TEST_IMAGE = p(np.arange(24).reshape((2, 4, 3)))\n TEST_AFFINE = q(\n np.array(\n [[-5.3, 0.0, 0.0, 102.01], [0.0, 0.52, 2.17, -7.50], [-0.0, 1.98, -0.26, -23.12], [0.0, 0.0, 0.0, 1.0]]\n )\n )\n TESTS.append(\n [\n TEST_IMAGE,\n TEST_AFFINE,\n dict(reader=\"NibabelReader\", image_only=False, as_closest_canonical=True),\n np.arange(24).reshape((2, 4, 3)),\n ]\n )\n TESTS.append(\n [\n TEST_IMAGE,\n TEST_AFFINE,\n dict(reader=\"NibabelReader\", image_only=True, as_closest_canonical=True),\n np.array(\n [\n [[12.0, 15.0, 18.0, 21.0], [13.0, 16.0, 19.0, 22.0], [14.0, 17.0, 20.0, 23.0]],\n [[0.0, 3.0, 6.0, 9.0], [1.0, 4.0, 7.0, 10.0], [2.0, 5.0, 8.0, 11.0]],\n ]\n ),\n ]\n )\n TESTS.append(\n [\n TEST_IMAGE,\n TEST_AFFINE,\n dict(reader=\"NibabelReader\", image_only=True, as_closest_canonical=False),\n np.arange(24).reshape((2, 4, 3)),\n ]\n )\n TESTS.append(\n [\n TEST_IMAGE,\n TEST_AFFINE,\n dict(reader=\"NibabelReader\", image_only=False, as_closest_canonical=False),\n np.arange(24).reshape((2, 4, 3)),\n ]\n )\n TESTS.append(\n [\n TEST_IMAGE,\n None,\n dict(reader=\"NibabelReader\", image_only=False, as_closest_canonical=False),\n np.arange(24).reshape((2, 4, 3)),\n ]\n )\n\n\nclass TestNiftiLoadRead(unittest.TestCase):\n @parameterized.expand(TESTS)\n def test_orientation(self, array, affine, reader_param, expected):\n test_image = make_nifti_image(array, affine)\n\n # read test cases\n loader = LoadImage(**reader_param)\n load_result = loader(test_image)\n if isinstance(load_result, tuple):\n data_array, header = load_result\n else:\n data_array = load_result\n header = None\n if os.path.exists(test_image):\n os.remove(test_image)\n\n # write test cases\n if header is not None:\n write_nifti(data_array, test_image, header[\"affine\"], header.get(\"original_affine\", None))\n elif affine is not None:\n write_nifti(data_array, test_image, affine)\n saved = nib.load(test_image)\n saved_affine = saved.affine\n saved_data = saved.get_fdata()\n if os.path.exists(test_image):\n os.remove(test_image)\n\n if affine is not None:\n assert_allclose(saved_affine, affine, type_test=False)\n assert_allclose(saved_data, expected, type_test=False)\n\n def test_consistency(self):\n np.set_printoptions(suppress=True, precision=3)\n test_image = make_nifti_image(np.arange(64).reshape(1, 8, 8), np.diag([1.5, 1.5, 1.5, 1]))\n data, header = LoadImage(reader=\"NibabelReader\", as_closest_canonical=False)(test_image)\n data, original_affine, new_affine = Spacing([0.8, 0.8, 0.8])(data[None], header[\"affine\"], mode=\"nearest\")\n data, _, new_affine = Orientation(\"ILP\")(data, new_affine)\n if os.path.exists(test_image):\n os.remove(test_image)\n write_nifti(data[0], test_image, new_affine, original_affine, mode=\"nearest\", padding_mode=\"border\")\n saved = nib.load(test_image)\n saved_data = saved.get_fdata()\n np.testing.assert_allclose(saved_data, np.arange(64).reshape(1, 8, 8), atol=1e-7)\n if os.path.exists(test_image):\n os.remove(test_image)\n write_nifti(\n data[0],\n test_image,\n new_affine,\n original_affine,\n mode=\"nearest\",\n padding_mode=\"border\",\n output_spatial_shape=(1, 8, 8),\n )\n saved = nib.load(test_image)\n saved_data = saved.get_fdata()\n np.testing.assert_allclose(saved_data, np.arange(64).reshape(1, 8, 8), atol=1e-7)\n if os.path.exists(test_image):\n os.remove(test_image)\n # test the case that only correct orientation but don't resample\n write_nifti(data[0], test_image, new_affine, original_affine, resample=False)\n saved = nib.load(test_image)\n # compute expected affine\n start_ornt = nib.orientations.io_orientation(new_affine)\n target_ornt = nib.orientations.io_orientation(original_affine)\n ornt_transform = nib.orientations.ornt_transform(start_ornt, target_ornt)\n data_shape = data[0].shape\n expected_affine = new_affine @ nib.orientations.inv_ornt_aff(ornt_transform, data_shape)\n np.testing.assert_allclose(saved.affine, expected_affine)\n if os.path.exists(test_image):\n os.remove(test_image)\n\n def test_write_2d(self):\n with tempfile.TemporaryDirectory() as out_dir:\n image_name = os.path.join(out_dir, \"test.nii.gz\")\n for p in TEST_NDARRAYS:\n img = p(np.arange(6).reshape((2, 3)))\n write_nifti(img, image_name, affine=np.diag([1]), target_affine=np.diag([1.4]))\n out = nib.load(image_name)\n np.testing.assert_allclose(out.get_fdata(), [[0, 1, 2], [3.0, 4, 5]])\n np.testing.assert_allclose(out.affine, np.diag([1.4, 1, 1, 1]))\n\n image_name = os.path.join(out_dir, \"test1.nii.gz\")\n img = np.arange(5).reshape((1, 5))\n write_nifti(\n img, image_name, affine=np.diag([1, 1, 1, 3, 3]), target_affine=np.diag([1.4, 2.0, 1, 3, 5])\n )\n out = nib.load(image_name)\n np.testing.assert_allclose(out.get_fdata(), [[0, 2, 4]])\n np.testing.assert_allclose(out.affine, np.diag([1.4, 2, 1, 1]))\n\n def test_write_3d(self):\n with tempfile.TemporaryDirectory() as out_dir:\n image_name = os.path.join(out_dir, \"test.nii.gz\")\n for p in TEST_NDARRAYS:\n img = p(np.arange(6).reshape((1, 2, 3)))\n write_nifti(img, image_name, affine=np.diag([1]), target_affine=np.diag([1.4]))\n out = nib.load(image_name)\n np.testing.assert_allclose(out.get_fdata(), [[[0, 1, 2], [3, 4, 5]]])\n np.testing.assert_allclose(out.affine, np.diag([1.4, 1, 1, 1]))\n\n image_name = os.path.join(out_dir, \"test1.nii.gz\")\n img = p(np.arange(5).reshape((1, 1, 5)))\n write_nifti(\n img, image_name, affine=np.diag([1, 1, 1, 3, 3]), target_affine=np.diag([1.4, 2.0, 2, 3, 5])\n )\n out = nib.load(image_name)\n np.testing.assert_allclose(out.get_fdata(), [[[0, 2, 4]]])\n np.testing.assert_allclose(out.affine, np.diag([1.4, 2, 2, 1]))\n\n def test_write_4d(self):\n with tempfile.TemporaryDirectory() as out_dir:\n image_name = os.path.join(out_dir, \"test.nii.gz\")\n for p in TEST_NDARRAYS:\n img = p(np.arange(6).reshape((1, 1, 3, 2)))\n write_nifti(img, image_name, affine=np.diag([1.4, 1]), target_affine=np.diag([1, 1.4, 1]))\n out = nib.load(image_name)\n np.testing.assert_allclose(out.get_fdata(), [[[[0, 1], [2, 3], [4, 5]]]])\n np.testing.assert_allclose(out.affine, np.diag([1, 1.4, 1, 1]))\n\n image_name = os.path.join(out_dir, \"test1.nii.gz\")\n img = p(np.arange(5).reshape((1, 1, 5, 1)))\n write_nifti(\n img, image_name, affine=np.diag([1, 1, 1, 3, 3]), target_affine=np.diag([1.4, 2.0, 2, 3, 5])\n )\n out = nib.load(image_name)\n np.testing.assert_allclose(out.get_fdata(), [[[[0], [2], [4]]]])\n np.testing.assert_allclose(out.affine, np.diag([1.4, 2, 2, 1]))\n\n def test_write_5d(self):\n with tempfile.TemporaryDirectory() as out_dir:\n image_name = os.path.join(out_dir, \"test.nii.gz\")\n for p in TEST_NDARRAYS:\n img = p(np.arange(12).reshape((1, 1, 3, 2, 2)))\n write_nifti(img, image_name, affine=np.diag([1]), target_affine=np.diag([1.4]))\n out = nib.load(image_name)\n np.testing.assert_allclose(\n out.get_fdata(),\n np.array([[[[[0.0, 1.0], [2.0, 3.0]], [[4.0, 5.0], [6.0, 7.0]], [[8.0, 9.0], [10.0, 11.0]]]]]),\n )\n np.testing.assert_allclose(out.affine, np.diag([1.4, 1, 1, 1]))\n\n image_name = os.path.join(out_dir, \"test1.nii.gz\")\n img = p(np.arange(10).reshape((1, 1, 5, 1, 2)))\n write_nifti(\n img, image_name, affine=np.diag([1, 1, 1, 3, 3]), target_affine=np.diag([1.4, 2.0, 2, 3, 5])\n )\n out = nib.load(image_name)\n np.testing.assert_allclose(out.get_fdata(), np.array([[[[[0.0, 1.0]], [[4.0, 5.0]], [[8.0, 9.0]]]]]))\n np.testing.assert_allclose(out.affine, np.diag([1.4, 2, 2, 1]))\n\n\nif __name__ == \"__main__\":\n unittest.main()\n",
"# Copyright 2020 - 2021 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport warnings\nfrom functools import partial\nfrom typing import TYPE_CHECKING, Any, Callable, Dict, Optional, Tuple, Type, Union\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom torch.optim import Optimizer\nfrom torch.utils.data import DataLoader\n\nfrom monai.networks.utils import eval_mode\nfrom monai.optimizers.lr_scheduler import ExponentialLR, LinearLR\nfrom monai.utils import StateCacher, copy_to_device, optional_import\n\nif TYPE_CHECKING:\n import matplotlib.pyplot as plt\n\n has_matplotlib = True\n import tqdm\n\n has_tqdm = True\nelse:\n plt, has_matplotlib = optional_import(\"matplotlib.pyplot\")\n tqdm, has_tqdm = optional_import(\"tqdm\")\n\n__all__ = [\"LearningRateFinder\"]\n\n\nclass DataLoaderIter:\n def __init__(self, data_loader: DataLoader, image_extractor: Callable, label_extractor: Callable) -> None:\n if not isinstance(data_loader, DataLoader):\n raise ValueError(\n f\"Loader has unsupported type: {type(data_loader)}. Expected type was `torch.utils.data.DataLoader`\"\n )\n self.data_loader = data_loader\n self._iterator = iter(data_loader)\n self.image_extractor = image_extractor\n self.label_extractor = label_extractor\n\n @property\n def dataset(self):\n return self.data_loader.dataset\n\n def inputs_labels_from_batch(self, batch_data):\n images = self.image_extractor(batch_data)\n labels = self.label_extractor(batch_data)\n return images, labels\n\n def __iter__(self):\n return self\n\n def __next__(self):\n batch = next(self._iterator)\n return self.inputs_labels_from_batch(batch)\n\n\nclass TrainDataLoaderIter(DataLoaderIter):\n def __init__(\n self, data_loader: DataLoader, image_extractor: Callable, label_extractor: Callable, auto_reset: bool = True\n ) -> None:\n super().__init__(data_loader, image_extractor, label_extractor)\n self.auto_reset = auto_reset\n\n def __next__(self):\n try:\n batch = next(self._iterator)\n inputs, labels = self.inputs_labels_from_batch(batch)\n except StopIteration:\n if not self.auto_reset:\n raise\n self._iterator = iter(self.data_loader)\n batch = next(self._iterator)\n inputs, labels = self.inputs_labels_from_batch(batch)\n\n return inputs, labels\n\n\nclass ValDataLoaderIter(DataLoaderIter):\n \"\"\"This iterator will reset itself **only** when it is acquired by\n the syntax of normal `iterator`. That is, this iterator just works\n like a `torch.data.DataLoader`. If you want to restart it, you\n should use it like:\n\n ```\n loader_iter = ValDataLoaderIter(data_loader)\n for batch in loader_iter:\n ...\n\n # `loader_iter` should run out of values now, you can restart it by:\n # 1. the way we use a `torch.data.DataLoader`\n for batch in loader_iter: # __iter__ is called implicitly\n ...\n\n # 2. passing it into `iter()` manually\n loader_iter = iter(loader_iter) # __iter__ is called by `iter()`\n ```\n \"\"\"\n\n def __init__(self, data_loader: DataLoader, image_extractor: Callable, label_extractor: Callable) -> None:\n super().__init__(data_loader, image_extractor, label_extractor)\n self.run_limit = len(self.data_loader)\n self.run_counter = 0\n\n def __iter__(self):\n if self.run_counter >= self.run_limit:\n self._iterator = iter(self.data_loader)\n self.run_counter = 0\n return self\n\n def __next__(self):\n self.run_counter += 1\n return super().__next__()\n\n\ndef default_image_extractor(x: Any) -> torch.Tensor:\n \"\"\"Default callable for getting image from batch data.\"\"\"\n out: torch.Tensor = x[\"image\"] if isinstance(x, dict) else x[0]\n return out\n\n\ndef default_label_extractor(x: Any) -> torch.Tensor:\n \"\"\"Default callable for getting label from batch data.\"\"\"\n out: torch.Tensor = x[\"label\"] if isinstance(x, dict) else x[1]\n return out\n\n\nclass LearningRateFinder:\n \"\"\"Learning rate range test.\n\n The learning rate range test increases the learning rate in a pre-training run\n between two boundaries in a linear or exponential manner. It provides valuable\n information on how well the network can be trained over a range of learning rates\n and what is the optimal learning rate.\n\n Example (fastai approach):\n >>> lr_finder = LearningRateFinder(net, optimizer, criterion)\n >>> lr_finder.range_test(data_loader, end_lr=100, num_iter=100)\n >>> lr_finder.get_steepest_gradient()\n >>> lr_finder.plot() # to inspect the loss-learning rate graph\n\n Example (Leslie Smith's approach):\n >>> lr_finder = LearningRateFinder(net, optimizer, criterion)\n >>> lr_finder.range_test(train_loader, val_loader=val_loader, end_lr=1, num_iter=100, step_mode=\"linear\")\n\n Gradient accumulation is supported; example:\n >>> train_data = ... # prepared dataset\n >>> desired_bs, real_bs = 32, 4 # batch size\n >>> accumulation_steps = desired_bs // real_bs # required steps for accumulation\n >>> data_loader = torch.utils.data.DataLoader(train_data, batch_size=real_bs, shuffle=True)\n >>> acc_lr_finder = LearningRateFinder(net, optimizer, criterion)\n >>> acc_lr_finder.range_test(data_loader, end_lr=10, num_iter=100, accumulation_steps=accumulation_steps)\n\n By default, image will be extracted from data loader with x[\"image\"] and x[0], depending on whether\n batch data is a dictionary or not (and similar behaviour for extracting the label). If your data loader\n returns something other than this, pass a callable function to extract it, e.g.:\n >>> image_extractor = lambda x: x[\"input\"]\n >>> label_extractor = lambda x: x[100]\n >>> lr_finder = LearningRateFinder(net, optimizer, criterion)\n >>> lr_finder.range_test(train_loader, val_loader, image_extractor, label_extractor)\n\n References:\n Modified from: https://github.com/davidtvs/pytorch-lr-finder.\n Cyclical Learning Rates for Training Neural Networks: https://arxiv.org/abs/1506.01186\n \"\"\"\n\n def __init__(\n self,\n model: nn.Module,\n optimizer: Optimizer,\n criterion: torch.nn.Module,\n device: Optional[Union[str, torch.device]] = None,\n memory_cache: bool = True,\n cache_dir: Optional[str] = None,\n amp: bool = False,\n verbose: bool = True,\n ) -> None:\n \"\"\"Constructor.\n\n Args:\n model: wrapped model.\n optimizer: wrapped optimizer.\n criterion: wrapped loss function.\n device: device on which to test. run a string (\"cpu\" or \"cuda\") with an\n optional ordinal for the device type (e.g. \"cuda:X\", where is the ordinal).\n Alternatively, can be an object representing the device on which the\n computation will take place. Default: None, uses the same device as `model`.\n memory_cache: if this flag is set to True, `state_dict` of\n model and optimizer will be cached in memory. Otherwise, they will be saved\n to files under the `cache_dir`.\n cache_dir: path for storing temporary files. If no path is\n specified, system-wide temporary directory is used. Notice that this\n parameter will be ignored if `memory_cache` is True.\n amp: use Automatic Mixed Precision\n verbose: verbose output\n Returns:\n None\n \"\"\"\n # Check if the optimizer is already attached to a scheduler\n self.optimizer = optimizer\n self._check_for_scheduler()\n\n self.model = model\n self.criterion = criterion\n self.history: Dict[str, list] = {\"lr\": [], \"loss\": []}\n self.memory_cache = memory_cache\n self.cache_dir = cache_dir\n self.amp = amp\n self.verbose = verbose\n\n # Save the original state of the model and optimizer so they can be restored if\n # needed\n self.model_device = next(self.model.parameters()).device\n self.state_cacher = StateCacher(memory_cache, cache_dir=cache_dir)\n self.state_cacher.store(\"model\", self.model.state_dict())\n self.state_cacher.store(\"optimizer\", self.optimizer.state_dict())\n\n # If device is None, use the same as the model\n self.device = device if device else self.model_device\n\n def reset(self) -> None:\n \"\"\"Restores the model and optimizer to their initial states.\"\"\"\n\n self.model.load_state_dict(self.state_cacher.retrieve(\"model\"))\n self.optimizer.load_state_dict(self.state_cacher.retrieve(\"optimizer\"))\n self.model.to(self.model_device)\n\n def range_test(\n self,\n train_loader: DataLoader,\n val_loader: Optional[DataLoader] = None,\n image_extractor: Callable = default_image_extractor,\n label_extractor: Callable = default_label_extractor,\n start_lr: Optional[float] = None,\n end_lr: int = 10,\n num_iter: int = 100,\n step_mode: str = \"exp\",\n smooth_f: float = 0.05,\n diverge_th: int = 5,\n accumulation_steps: int = 1,\n non_blocking_transfer: bool = True,\n auto_reset: bool = True,\n ) -> None:\n \"\"\"Performs the learning rate range test.\n\n Args:\n train_loader: training set data loader.\n val_loader: validation data loader (if desired).\n image_extractor: callable function to get the image from a batch of data.\n Default: `x[\"image\"] if isinstance(x, dict) else x[0]`.\n label_extractor: callable function to get the label from a batch of data.\n Default: `x[\"label\"] if isinstance(x, dict) else x[1]`.\n start_lr : the starting learning rate for the range test.\n The default is the optimizer's learning rate.\n end_lr: the maximum learning rate to test. The test may stop earlier than\n this if the result starts diverging.\n num_iter: the max number of iterations for test.\n step_mode: schedule for increasing learning rate: (`linear` or `exp`).\n smooth_f: the loss smoothing factor within the `[0, 1[` interval. Disabled\n if set to `0`, otherwise loss is smoothed using exponential smoothing.\n diverge_th: test is stopped when loss surpasses threshold:\n `diverge_th * best_loss`.\n accumulation_steps: steps for gradient accumulation. If set to `1`,\n gradients are not accumulated.\n non_blocking_transfer: when `True`, moves data to device asynchronously if\n possible, e.g., moving CPU Tensors with pinned memory to CUDA devices.\n auto_reset: if `True`, returns model and optimizer to original states at end\n of test.\n Returns:\n None\n \"\"\"\n\n # Reset test results\n self.history = {\"lr\": [], \"loss\": []}\n best_loss = -float(\"inf\")\n\n # Move the model to the proper device\n self.model.to(self.device)\n\n # Check if the optimizer is already attached to a scheduler\n self._check_for_scheduler()\n\n # Set the starting learning rate\n if start_lr:\n self._set_learning_rate(start_lr)\n\n # Check number of iterations\n if num_iter <= 1:\n raise ValueError(\"`num_iter` must be larger than 1\")\n\n # Initialize the proper learning rate policy\n lr_schedule: Union[ExponentialLR, LinearLR]\n if step_mode.lower() == \"exp\":\n lr_schedule = ExponentialLR(self.optimizer, end_lr, num_iter)\n elif step_mode.lower() == \"linear\":\n lr_schedule = LinearLR(self.optimizer, end_lr, num_iter)\n else:\n raise ValueError(f\"expected one of (exp, linear), got {step_mode}\")\n\n if smooth_f < 0 or smooth_f >= 1:\n raise ValueError(\"smooth_f is outside the range [0, 1[\")\n\n # Create an iterator to get data batch by batch\n train_iter = TrainDataLoaderIter(train_loader, image_extractor, label_extractor)\n if val_loader:\n val_iter = ValDataLoaderIter(val_loader, image_extractor, label_extractor)\n\n trange: Union[partial[tqdm.trange], Type[range]]\n if self.verbose and has_tqdm:\n trange = partial(tqdm.trange, desc=\"Computing optimal learning rate\")\n tprint = tqdm.tqdm.write\n else:\n trange = range\n tprint = print\n\n for iteration in trange(num_iter):\n if self.verbose and not has_tqdm:\n print(f\"Computing optimal learning rate, iteration {iteration + 1}/{num_iter}\")\n\n # Train on batch and retrieve loss\n loss = self._train_batch(\n train_iter,\n accumulation_steps,\n non_blocking_transfer=non_blocking_transfer,\n )\n if val_loader:\n loss = self._validate(val_iter, non_blocking_transfer=non_blocking_transfer)\n\n # Update the learning rate\n self.history[\"lr\"].append(lr_schedule.get_lr()[0])\n lr_schedule.step()\n\n # Track the best loss and smooth it if smooth_f is specified\n if iteration == 0:\n best_loss = loss\n else:\n if smooth_f > 0:\n loss = smooth_f * loss + (1 - smooth_f) * self.history[\"loss\"][-1]\n if loss < best_loss:\n best_loss = loss\n\n # Check if the loss has diverged; if it has, stop the test\n self.history[\"loss\"].append(loss)\n if loss > diverge_th * best_loss:\n if self.verbose:\n tprint(\"Stopping early, the loss has diverged\")\n break\n\n if auto_reset:\n if self.verbose:\n print(\"Resetting model and optimizer\")\n self.reset()\n\n def _set_learning_rate(self, new_lrs: Union[float, list]) -> None:\n \"\"\"Set learning rate(s) for optimizer.\"\"\"\n if not isinstance(new_lrs, list):\n new_lrs = [new_lrs] * len(self.optimizer.param_groups)\n if len(new_lrs) != len(self.optimizer.param_groups):\n raise ValueError(\n \"Length of `new_lrs` is not equal to the number of parameter groups \" + \"in the given optimizer\"\n )\n\n for param_group, new_lr in zip(self.optimizer.param_groups, new_lrs):\n param_group[\"lr\"] = new_lr\n\n def _check_for_scheduler(self):\n \"\"\"Check optimizer doesn't already have scheduler.\"\"\"\n for param_group in self.optimizer.param_groups:\n if \"initial_lr\" in param_group:\n raise RuntimeError(\"Optimizer already has a scheduler attached to it\")\n\n def _train_batch(self, train_iter, accumulation_steps: int, non_blocking_transfer: bool = True) -> float:\n self.model.train()\n total_loss = 0\n\n self.optimizer.zero_grad()\n for i in range(accumulation_steps):\n inputs, labels = next(train_iter)\n inputs, labels = copy_to_device([inputs, labels], device=self.device, non_blocking=non_blocking_transfer)\n\n # Forward pass\n outputs = self.model(inputs)\n loss = self.criterion(outputs, labels)\n\n # Loss should be averaged in each step\n loss /= accumulation_steps\n\n # Backward pass\n if self.amp and hasattr(self.optimizer, \"_amp_stash\"):\n # For minor performance optimization, see also:\n # https://nvidia.github.io/apex/advanced.html#gradient-accumulation-across-iterations\n delay_unscale = ((i + 1) % accumulation_steps) != 0\n\n with torch.cuda.amp.scale_loss(loss, self.optimizer, delay_unscale=delay_unscale) as scaled_loss: # type: ignore\n scaled_loss.backward()\n else:\n loss.backward()\n\n total_loss += loss.item()\n\n self.optimizer.step()\n\n return total_loss\n\n def _validate(self, val_iter: ValDataLoaderIter, non_blocking_transfer: bool = True) -> float:\n # Set model to evaluation mode and disable gradient computation\n running_loss = 0\n with eval_mode(self.model):\n for inputs, labels in val_iter:\n # Copy data to the correct device\n inputs, labels = copy_to_device(\n [inputs, labels], device=self.device, non_blocking=non_blocking_transfer\n )\n\n # Forward pass and loss computation\n outputs = self.model(inputs)\n loss = self.criterion(outputs, labels)\n running_loss += loss.item() * len(labels)\n\n return running_loss / len(val_iter.dataset)\n\n def get_lrs_and_losses(\n self,\n skip_start: int = 0,\n skip_end: int = 0,\n ) -> Tuple[list, list]:\n \"\"\"Get learning rates and their corresponding losses\n\n Args:\n skip_start: number of batches to trim from the start.\n skip_end: number of batches to trim from the end.\n \"\"\"\n if skip_start < 0:\n raise ValueError(\"skip_start cannot be negative\")\n if skip_end < 0:\n raise ValueError(\"skip_end cannot be negative\")\n\n lrs = self.history[\"lr\"]\n losses = self.history[\"loss\"]\n end_idx = len(lrs) - skip_end - 1\n lrs = lrs[skip_start:end_idx]\n losses = losses[skip_start:end_idx]\n\n return lrs, losses\n\n def get_steepest_gradient(\n self,\n skip_start: int = 0,\n skip_end: int = 0,\n ) -> Union[Tuple[float, float], Tuple[None, None]]:\n \"\"\"Get learning rate which has steepest gradient and its corresponding loss\n\n Args:\n skip_start: number of batches to trim from the start.\n skip_end: number of batches to trim from the end.\n\n Returns:\n Learning rate which has steepest gradient and its corresponding loss\n \"\"\"\n lrs, losses = self.get_lrs_and_losses(skip_start, skip_end)\n\n try:\n min_grad_idx = np.gradient(np.array(losses)).argmin()\n return lrs[min_grad_idx], losses[min_grad_idx]\n except ValueError:\n print(\"Failed to compute the gradients, there might not be enough points.\")\n return None, None\n\n def plot(\n self,\n skip_start: int = 0,\n skip_end: int = 0,\n log_lr: bool = True,\n ax=None,\n steepest_lr: bool = True,\n ):\n \"\"\"Plots the learning rate range test.\n\n Args:\n skip_start: number of batches to trim from the start.\n skip_end: number of batches to trim from the start.\n log_lr: True to plot the learning rate in a logarithmic\n scale; otherwise, plotted in a linear scale.\n ax: the plot is created in the specified matplotlib axes object and the\n figure is not be shown. If `None`, then the figure and axes object are\n created in this method and the figure is shown.\n steepest_lr: plot the learning rate which had the steepest gradient.\n\n Returns:\n The `matplotlib.axes.Axes` object that contains the plot. Returns `None` if\n `matplotlib` is not installed.\n \"\"\"\n if not has_matplotlib:\n warnings.warn(\"Matplotlib is missing, can't plot result\")\n return None\n\n lrs, losses = self.get_lrs_and_losses(skip_start, skip_end)\n\n # Create the figure and axes object if axes was not already given\n fig = None\n if ax is None:\n fig, ax = plt.subplots()\n\n # Plot loss as a function of the learning rate\n ax.plot(lrs, losses)\n\n # Plot the LR with steepest gradient\n if steepest_lr:\n lr_at_steepest_grad, loss_at_steepest_grad = self.get_steepest_gradient(skip_start, skip_end)\n if lr_at_steepest_grad is not None:\n ax.scatter(\n lr_at_steepest_grad,\n loss_at_steepest_grad,\n s=75,\n marker=\"o\",\n color=\"red\",\n zorder=3,\n label=\"steepest gradient\",\n )\n ax.legend()\n\n if log_lr:\n ax.set_xscale(\"log\")\n ax.set_xlabel(\"Learning rate\")\n ax.set_ylabel(\"Loss\")\n\n # Show only if the figure was created internally\n if fig is not None:\n plt.show()\n\n return ax\n",
"# Copyright 2020 - 2021 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport unittest\nfrom copy import deepcopy\n\nimport numpy as np\nimport torch\nfrom parameterized import parameterized\n\nfrom monai.data.synthetic import create_test_image_2d, create_test_image_3d\nfrom monai.transforms import KSpaceSpikeNoise, RandKSpaceSpikeNoise\nfrom monai.utils.misc import set_determinism\nfrom tests.utils import TEST_NDARRAYS\n\nTESTS = []\nfor shape in ((128, 64), (64, 48, 80)):\n for p in TEST_NDARRAYS:\n for channel_wise in (True, False):\n TESTS.append((shape, p, channel_wise))\n\n\nclass TestRandKSpaceSpikeNoise(unittest.TestCase):\n def setUp(self):\n set_determinism(0)\n super().setUp()\n\n def tearDown(self):\n set_determinism(None)\n\n @staticmethod\n def get_data(im_shape, im_type):\n create_test_image = create_test_image_2d if len(im_shape) == 2 else create_test_image_3d\n im = create_test_image(*im_shape, rad_max=20, noise_max=0.0, num_seg_classes=5)[0][None]\n return im_type(im)\n\n @parameterized.expand(TESTS)\n def test_0_prob(self, im_shape, im_type, channel_wise):\n im = self.get_data(im_shape, im_type)\n intensity_range = [14, 15]\n t = RandKSpaceSpikeNoise(0.0, intensity_range, channel_wise)\n out = t(im)\n self.assertEqual(type(im), type(out))\n if isinstance(out, torch.Tensor):\n self.assertEqual(out.device, im.device)\n im, out = im.cpu(), out.cpu()\n np.testing.assert_allclose(im, out)\n\n @parameterized.expand(TESTS)\n def test_1_prob(self, im_shape, im_type, channel_wise):\n im = self.get_data(im_shape, im_type)\n intensity_range = [14, 14]\n t = RandKSpaceSpikeNoise(1.0, intensity_range, channel_wise)\n out = t(im)\n base_t = KSpaceSpikeNoise(t.sampled_locs, [14])\n out = out - base_t(im)\n self.assertEqual(type(im), type(out))\n if isinstance(out, torch.Tensor):\n self.assertEqual(out.device, im.device)\n im, out = im.cpu(), out.cpu()\n np.testing.assert_allclose(out, im * 0)\n\n @parameterized.expand(TESTS)\n def test_same_result(self, im_shape, im_type, channel_wise):\n im = self.get_data(im_shape, im_type)\n intensity_range = [14, 15]\n t = RandKSpaceSpikeNoise(0.0, intensity_range, channel_wise)\n t.set_random_state(42)\n out1 = t(deepcopy(im))\n t.set_random_state(42)\n out2 = t(deepcopy(im))\n self.assertEqual(type(im), type(out1))\n if isinstance(out1, torch.Tensor):\n self.assertEqual(out1.device, im.device)\n out1, out2 = out1.cpu(), out2.cpu()\n np.testing.assert_allclose(out1, out2)\n\n @parameterized.expand(TESTS)\n def test_intensity(self, im_shape, im_type, channel_wise):\n im = self.get_data(im_shape, im_type)\n intensity_range = [14, 14.1]\n t = RandKSpaceSpikeNoise(1.0, intensity_range, channel_wise)\n _ = t(deepcopy(im))\n self.assertGreaterEqual(t.sampled_k_intensity[0], 14)\n self.assertLessEqual(t.sampled_k_intensity[0], 14.1)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n",
"# Copyright 2020 - 2021 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport warnings\nfrom typing import Callable, List, Optional, Union\n\nimport torch\nfrom torch.nn.modules.loss import _Loss\n\nfrom monai.networks import one_hot\nfrom monai.utils import LossReduction\n\n\nclass TverskyLoss(_Loss):\n\n \"\"\"\n Compute the Tversky loss defined in:\n\n Sadegh et al. (2017) Tversky loss function for image segmentation\n using 3D fully convolutional deep networks. (https://arxiv.org/abs/1706.05721)\n\n Adapted from:\n https://github.com/NifTK/NiftyNet/blob/v0.6.0/niftynet/layer/loss_segmentation.py#L631\n\n \"\"\"\n\n def __init__(\n self,\n include_background: bool = True,\n to_onehot_y: bool = False,\n sigmoid: bool = False,\n softmax: bool = False,\n other_act: Optional[Callable] = None,\n alpha: float = 0.5,\n beta: float = 0.5,\n reduction: Union[LossReduction, str] = LossReduction.MEAN,\n smooth_nr: float = 1e-5,\n smooth_dr: float = 1e-5,\n batch: bool = False,\n ) -> None:\n \"\"\"\n Args:\n include_background: If False channel index 0 (background category) is excluded from the calculation.\n to_onehot_y: whether to convert `y` into the one-hot format. Defaults to False.\n sigmoid: If True, apply a sigmoid function to the prediction.\n softmax: If True, apply a softmax function to the prediction.\n other_act: if don't want to use `sigmoid` or `softmax`, use other callable function to execute\n other activation layers, Defaults to ``None``. for example:\n `other_act = torch.tanh`.\n alpha: weight of false positives\n beta: weight of false negatives\n reduction: {``\"none\"``, ``\"mean\"``, ``\"sum\"``}\n Specifies the reduction to apply to the output. Defaults to ``\"mean\"``.\n\n - ``\"none\"``: no reduction will be applied.\n - ``\"mean\"``: the sum of the output will be divided by the number of elements in the output.\n - ``\"sum\"``: the output will be summed.\n\n smooth_nr: a small constant added to the numerator to avoid zero.\n smooth_dr: a small constant added to the denominator to avoid nan.\n batch: whether to sum the intersection and union areas over the batch dimension before the dividing.\n Defaults to False, a Dice loss value is computed independently from each item in the batch\n before any `reduction`.\n\n Raises:\n TypeError: When ``other_act`` is not an ``Optional[Callable]``.\n ValueError: When more than 1 of [``sigmoid=True``, ``softmax=True``, ``other_act is not None``].\n Incompatible values.\n\n \"\"\"\n\n super().__init__(reduction=LossReduction(reduction).value)\n if other_act is not None and not callable(other_act):\n raise TypeError(f\"other_act must be None or callable but is {type(other_act).__name__}.\")\n if int(sigmoid) + int(softmax) + int(other_act is not None) > 1:\n raise ValueError(\"Incompatible values: more than 1 of [sigmoid=True, softmax=True, other_act is not None].\")\n self.include_background = include_background\n self.to_onehot_y = to_onehot_y\n self.sigmoid = sigmoid\n self.softmax = softmax\n self.other_act = other_act\n self.alpha = alpha\n self.beta = beta\n self.smooth_nr = float(smooth_nr)\n self.smooth_dr = float(smooth_dr)\n self.batch = batch\n\n def forward(self, input: torch.Tensor, target: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Args:\n input: the shape should be BNH[WD].\n target: the shape should be BNH[WD].\n\n Raises:\n ValueError: When ``self.reduction`` is not one of [\"mean\", \"sum\", \"none\"].\n\n \"\"\"\n if self.sigmoid:\n input = torch.sigmoid(input)\n\n n_pred_ch = input.shape[1]\n if self.softmax:\n if n_pred_ch == 1:\n warnings.warn(\"single channel prediction, `softmax=True` ignored.\")\n else:\n input = torch.softmax(input, 1)\n\n if self.other_act is not None:\n input = self.other_act(input)\n\n if self.to_onehot_y:\n if n_pred_ch == 1:\n warnings.warn(\"single channel prediction, `to_onehot_y=True` ignored.\")\n else:\n target = one_hot(target, num_classes=n_pred_ch)\n\n if not self.include_background:\n if n_pred_ch == 1:\n warnings.warn(\"single channel prediction, `include_background=False` ignored.\")\n else:\n # if skipping background, removing first channel\n target = target[:, 1:]\n input = input[:, 1:]\n\n if target.shape != input.shape:\n raise AssertionError(f\"ground truth has differing shape ({target.shape}) from input ({input.shape})\")\n\n p0 = input\n p1 = 1 - p0\n g0 = target\n g1 = 1 - g0\n\n # reducing only spatial dimensions (not batch nor channels)\n reduce_axis: List[int] = torch.arange(2, len(input.shape)).tolist()\n if self.batch:\n # reducing spatial dimensions and batch\n reduce_axis = [0] + reduce_axis\n\n tp = torch.sum(p0 * g0, reduce_axis)\n fp = self.alpha * torch.sum(p0 * g1, reduce_axis)\n fn = self.beta * torch.sum(p1 * g0, reduce_axis)\n numerator = tp + self.smooth_nr\n denominator = tp + fp + fn + self.smooth_dr\n\n score: torch.Tensor = 1.0 - numerator / denominator\n\n if self.reduction == LossReduction.SUM.value:\n return torch.sum(score) # sum over the batch and channel dims\n if self.reduction == LossReduction.NONE.value:\n return score # returns [N, num_classes] losses\n if self.reduction == LossReduction.MEAN.value:\n return torch.mean(score)\n raise ValueError(f'Unsupported reduction: {self.reduction}, available options are [\"mean\", \"sum\", \"none\"].')\n"
] | [
[
"torch.tensor"
],
[
"numpy.array",
"numpy.random.randint"
],
[
"torch.randint"
],
[
"torch.device",
"numpy.array",
"torch.autograd.gradcheck",
"numpy.testing.assert_allclose"
],
[
"numpy.testing.assert_equal",
"numpy.array"
],
[
"numpy.diag",
"numpy.arange",
"numpy.set_printoptions",
"numpy.testing.assert_allclose",
"numpy.array"
],
[
"numpy.array",
"matplotlib.pyplot.show",
"torch.cuda.amp.scale_loss",
"matplotlib.pyplot.subplots"
],
[
"numpy.testing.assert_allclose"
],
[
"torch.softmax",
"torch.mean",
"torch.sigmoid",
"torch.sum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
SR42-dev/line-following-robot-with-aruco-markers-obstacle-detection-and-turtlesim-publisher | [
"d7dae86a4f1fdc56ab80193c218e25243e44e487"
] | [
"lineFollowerArucoROS-checkpoint3.py"
] | [
"import sys\r\nimport cv2\r\nimport math\r\nimport time\r\nimport rospy\r\nimport serial\r\nimport argparse\r\nimport numpy as np\r\nfrom std_srvs.srv import Empty\r\nfrom turtlesim.msg import Pose\r\nfrom geometry_msgs.msg import Twist\r\n\r\n# ROS movement global variables and function definitions\r\nx = 0\r\ny = 0\r\nz = 0\r\nyaw = 0\r\n\r\ndef poseCallback(pose_message):\r\n global x, y, z, yaw\r\n x = pose_message.x\r\n y = pose_message.y\r\n yaw = pose_message.theta\r\n\r\n\r\ndef move(speed, distance, is_forward):\r\n velocity_message = Twist()\r\n global x, y\r\n x0 = x\r\n y0 = y\r\n\r\n if is_forward:\r\n velocity_message.linear.x = abs(speed)\r\n else:\r\n velocity_message.linear.x = -abs(speed)\r\n\r\n distance_moved = 0.0\r\n loop_rate = rospy.Rate(10)\r\n cmd_vel_topic = '/turtle1/cmd_vel'\r\n velocity_publisher = rospy.Publisher(cmd_vel_topic, Twist, queue_size=10)\r\n\r\n while True:\r\n\r\n rospy.loginfo('Turtlesim linear movement')\r\n velocity_publisher.publish(velocity_message)\r\n loop_rate.sleep()\r\n\r\n distance_moved = distance_moved + abs(0.5 * math.sqrt(((x - x0) * 2) + ((y - y0) * 2)))\r\n if not (distance_moved < distance):\r\n rospy.loginfo(\"----Reached----\")\r\n break\r\n\r\n velocity_message.linear.x = 0\r\n velocity_publisher.publish(velocity_message)\r\n\r\n\r\ndef rotate(angular_speed_degree, relative_angle_degree, clockwise):\r\n global yaw\r\n velocity_message = Twist()\r\n velocity_message.linear.x = 0\r\n velocity_message.linear.y = 0\r\n velocity_message.linear.z = 0\r\n velocity_message.angular.x = 0\r\n velocity_message.angular.y = 0\r\n velocity_message.angular.z = 0\r\n\r\n theta0 = yaw\r\n angular_speed = math.radians(abs(angular_speed_degree))\r\n\r\n if clockwise:\r\n velocity_message.angular.z = -abs(angular_speed)\r\n else:\r\n velocity_message.angular.z = abs(angular_speed)\r\n\r\n angle_moved = 0.0\r\n\r\n loop_rate = rospy.Rate(10)\r\n cmd_vel_topic = '/turtle1/cmd_vel'\r\n velocity_publisher = rospy.Publisher(cmd_vel_topic, Twist, queue_size=10)\r\n\r\n t0 = rospy.Time.now().to_sec()\r\n\r\n while True:\r\n\r\n rospy.loginfo('Turtlesim rotation')\r\n velocity_publisher.publish(velocity_message)\r\n\r\n t1 = rospy.Time.now().to_sec()\r\n current_angle_degree = (t1 - t0) * angular_speed_degree\r\n loop_rate.sleep()\r\n\r\n if current_angle_degree > relative_angle_degree:\r\n rospy.loginfo('----Reached----')\r\n break\r\n\r\n velocity_message.angular.z = 0\r\n velocity_publisher.publish(velocity_message)\r\n\r\nrospy.init_node('turtlesim_motion_pose', anonymous=True)\r\n\r\ncmd_vel_topic = '/turtle1/cmd_vel'\r\nvelocity_publisher = rospy.Publisher(cmd_vel_topic, Twist, queue_size=10)\r\n\r\nposition_topic = '/turtle1/pose'\r\npose_subscriber = rospy.Subscriber(position_topic, Pose, poseCallback)\r\ntime.sleep(2)\r\n\r\n# construct the argument parser and parse the arguments\r\nap = argparse.ArgumentParser()\r\nap.add_argument(\"-t\", \"--type\", type=str,\r\n\tdefault=\"DICT_ARUCO_ORIGINAL\",\r\n\thelp=\"type of ArUCo tag to detect\")\r\nargs = vars(ap.parse_args())\r\n\r\n# define names of each possible ArUco tag OpenCV supports\r\nARUCO_DICT = {\r\n\t\"DICT_4X4_50\": cv2.aruco.DICT_4X4_50,\r\n\t\"DICT_4X4_100\": cv2.aruco.DICT_4X4_100,\r\n\t\"DICT_4X4_250\": cv2.aruco.DICT_4X4_250,\r\n\t\"DICT_4X4_1000\": cv2.aruco.DICT_4X4_1000,\r\n\t\"DICT_5X5_50\": cv2.aruco.DICT_5X5_50,\r\n\t\"DICT_5X5_100\": cv2.aruco.DICT_5X5_100,\r\n\t\"DICT_5X5_250\": cv2.aruco.DICT_5X5_250,\r\n\t\"DICT_5X5_1000\": cv2.aruco.DICT_5X5_1000,\r\n\t\"DICT_6X6_50\": cv2.aruco.DICT_6X6_50,\r\n\t\"DICT_6X6_100\": cv2.aruco.DICT_6X6_100,\r\n\t\"DICT_6X6_250\": cv2.aruco.DICT_6X6_250,\r\n\t\"DICT_6X6_1000\": cv2.aruco.DICT_6X6_1000,\r\n\t\"DICT_7X7_50\": cv2.aruco.DICT_7X7_50,\r\n\t\"DICT_7X7_100\": cv2.aruco.DICT_7X7_100,\r\n\t\"DICT_7X7_250\": cv2.aruco.DICT_7X7_250,\r\n\t\"DICT_7X7_1000\": cv2.aruco.DICT_7X7_1000,\r\n\t\"DICT_ARUCO_ORIGINAL\": cv2.aruco.DICT_ARUCO_ORIGINAL,\r\n\t\"DICT_APRILTAG_16h5\": cv2.aruco.DICT_APRILTAG_16h5,\r\n\t\"DICT_APRILTAG_25h9\": cv2.aruco.DICT_APRILTAG_25h9,\r\n\t\"DICT_APRILTAG_36h10\": cv2.aruco.DICT_APRILTAG_36h10,\r\n\t\"DICT_APRILTAG_36h11\": cv2.aruco.DICT_APRILTAG_36h11\r\n}\r\n\r\n# verify that the supplied ArUCo tag exists and is supported by\r\n# OpenCV\r\nif ARUCO_DICT.get(args[\"type\"], None) is None:\r\n\tprint(\"[INFO] ArUCo tag of '{}' is not supported\".format(\r\n\t\targs[\"type\"]))\r\n\tsys.exit(0)\r\n# load the ArUCo dictionary and grab the ArUCo parameters\r\nprint(\"[INFO] detecting '{}' tags...\".format(args[\"type\"]))\r\narucoDict = cv2.aruco.Dictionary_get(cv2.aruco.DICT_4X4_250)\r\narucoParams = cv2.aruco.DetectorParameters_create()\r\n# initialize the video stream and allow the camera sensor to warm up\r\nprint(\"[INFO] starting video stream...\")\r\n\r\ncap = cv2.VideoCapture(2)\r\nc1 = 0\r\nlinecolor = (100, 215, 255)\r\nlwr_red = np.array([0, 0, 0])\r\nupper_red = np.array([179, 65, 55])\r\ncountl = False\r\ncountr = False\r\nSer = serial.Serial(\"/dev/ttyUSB0\", baudrate=9600)\r\nSer.flush()\r\nwidth = cap.get(3)\r\nwhile True:\r\n ret, frame = cap.read()\r\n if not ret:\r\n _, frame = cap.read()\r\n\r\n # detect ArUco markers in the input frame\r\n (corners, ids, rejected) = cv2.aruco.detectMarkers(frame,\r\n arucoDict, parameters=arucoParams)\r\n\r\n\r\n hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\r\n kernel = np.ones((5, 5), np.uint8)\r\n mask = cv2.inRange(hsv, lwr_red, upper_red)\r\n mask = cv2.dilate(mask, kernel, iterations=1)\r\n res = cv2.bitwise_and(frame, frame, mask=mask)\r\n cnts, _ = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\r\n center = None\r\n\r\n # verify at least one ArUco marker was detected\r\n if len(corners) > 0:\r\n # flatten the ArUco IDs list\r\n ids = ids.flatten()\r\n # loop over the detected ArUCo corners\r\n for (markerCorner, markerID) in zip(corners, ids):\r\n # extract the marker corners (which are always returned\r\n # in top-left, top-right, bottom-right, and bottom-left\r\n # order)\r\n corners = markerCorner.reshape((4, 2))\r\n (topLeft, topRight, bottomRight, bottomLeft) = corners\r\n # convert each of the (x, y)-coordinate pairs to integers\r\n topRight = (int(topRight[0]), int(topRight[1]))\r\n bottomRight = (int(bottomRight[0]), int(bottomRight[1]))\r\n bottomLeft = (int(bottomLeft[0]), int(bottomLeft[1]))\r\n topLeft = (int(topLeft[0]), int(topLeft[1]))\r\n\r\n # draw the bounding box of the ArUCo detection\r\n cv2.line(frame, topLeft, topRight, (0, 255, 0), 2)\r\n cv2.line(frame, topRight, bottomRight, (0, 255, 0), 2)\r\n cv2.line(frame, bottomRight, bottomLeft, (0, 255, 0), 2)\r\n cv2.line(frame, bottomLeft, topLeft, (0, 255, 0), 2)\r\n # compute and draw the center (x, y)-coordinates of the\r\n # ArUco marker\r\n cX = int((topLeft[0] + bottomRight[0]) / 2.0)\r\n cY = int((topLeft[1] + bottomRight[1]) / 2.0)\r\n cv2.circle(frame, (cX, cY), 4, (0, 0, 255), -1)\r\n # draw the ArUco marker ID on the frame\r\n\r\n cv2.putText(frame, str(markerID),\r\n (topLeft[0], topLeft[1] - 15),\r\n cv2.FONT_HERSHEY_SIMPLEX,\r\n 0.5, (0, 255, 0), 2)\r\n if markerID == 0:\r\n if not countl:\r\n countr = False\r\n countl=True\r\n i='f'\r\n for lp in range(12):\r\n Ser.write(i.encode())\r\n move(1, 1, True)\r\n time.sleep(0.1)\r\n cv2.putText(frame, '<--', (5, 50), cv2.FONT_HERSHEY_COMPLEX, 2, (0, 0, 255), 2, cv2.LINE_AA)\r\n print(\"Left\")\r\n i = 'l' # left turn\r\n for lp in range(6):\r\n Ser.write(i.encode())\r\n rotate(30, 10, False)\r\n time.sleep(0.5)\r\n i='f'\r\n for lp in range(7):\r\n Ser.write(i.encode())\r\n move(1, 1, True)\r\n time.sleep(0.1)\r\n elif markerID == 1:\r\n if not countr:\r\n countl = False\r\n countr=True\r\n i='f'\r\n for lp in range(8):\r\n Ser.write(i.encode())\r\n move(1, 1, True)\r\n time.sleep(0.1)\r\n i = 'r' # left turn\r\n cv2.putText(frame, '-->', (5, 50), cv2.FONT_HERSHEY_COMPLEX, 2, (0, 0, 255), 2, cv2.LINE_AA)\r\n print(\"Right\")\r\n for lp in range(6):\r\n Ser.write(i.encode())\r\n rotate(30, 10, True)\r\n time.sleep(0.5)\r\n else:\r\n i = 'x'\r\n Ser.write(i.encode())\r\n print(\"Invalid\")\r\n\r\n if len(cnts) > 0:\r\n c = max(cnts, key=cv2.contourArea)\r\n ((x, y), radius) = cv2.minEnclosingCircle(c)\r\n M = cv2.moments(c)\r\n center = (int(M[\"m10\"] / M[\"m00\"]), int(M[\"m01\"] / M[\"m00\"]))\r\n if radius > 3:\r\n # cv2.circle(frame, (int(x), int(y)), int(radius), (255, 255, 255), 2)\r\n cv2.circle(frame, center, 5, linecolor, -1)\r\n\r\n if (x > 0.25 * width and x <= 0.75 * width):\r\n print('Forward')\r\n cv2.putText(frame, '^', (5, 50), cv2.FONT_HERSHEY_COMPLEX, 2, (0, 0, 255), 2, cv2.LINE_AA)\r\n Ser.write(b'f')\r\n move(1, 1, True)\r\n # time.sleep(0.01)\r\n\r\n else:\r\n print(\"Track Not Visible\")\r\n c1 += 1\r\n if (c1 == 5):\r\n print(\"Backward\")\r\n cv2.putText(frame, 'V', (5, 50), cv2.FONT_HERSHEY_COMPLEX, 2, (0, 0, 255), 2, cv2.LINE_AA)\r\n Ser.write(b'b')\r\n move(1, 1, False)\r\n c1 = 0\r\n\r\n time.sleep(0.2)\r\n cv2.imshow(\"Frame\", frame)\r\n if cv2.waitKey(1) & 0xFF == ord('q'):\r\n cap.release()\r\n Ser.close()\r\n cv2.destroyAllWindows()\r\n break"
] | [
[
"numpy.array",
"numpy.ones"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
bkornpob/axehelper | [
"d89407f73f92e140a5cc9a76c643b9a8656e8b0f",
"d89407f73f92e140a5cc9a76c643b9a8656e8b0f",
"d89407f73f92e140a5cc9a76c643b9a8656e8b0f"
] | [
"build/lib/axehelper/axehelper_bkg.py",
"build/lib/axehelper/make_sip.py",
"build/lib/axehelper/photapcorr.py"
] | [
"# Kornpob Bhirombhakdi\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport copy,glob,os\nfrom astropy.io import fits\nfrom math import pi\n\nclass AXEhelper_BKG:\n def __init__(self,axeflist=None,fltflist=None,\n padxleft=5,padxright=5,\n padylow=10,halfdy=3,padyup=10,\n adjusty=1.2\n ):\n self.axeflist = axeflist\n self.fltflist = fltflist\n self.params = (padxleft,padxright,padylow,halfdy,padyup,adjusty)\n self.headerall = self._headerall()\n ####################\n ####################\n ####################\n def make_poly2d(self):\n OBJ = {}\n fltflist = self.fltflist\n axeflist = self.axeflist\n HEADERALL = self.headerall\n for ii,i in enumerate(fltflist):\n # image\n tmp = fits.open(i)\n tmpdata = tmp[1].data.copy()\n tmpdq = tmp['DQ'].data.copy()\n\n # header & prep\n tmpheader = HEADERALL[axeflist[ii]]\n xref,yref = tmpheader['XYREF']\n pixx = tmpheader['PIXX']\n pixy = tmpheader['PIXY']\n cc0x,cc0y,cc1x,cc1y = tmpheader['CC']\n sectx = tmpheader['SECTX']\n secty = tmpheader['SECTY']\n\n # Polynomial2D\n x1 = np.arange(cc0x,cc1x)\n x2 = np.arange(cc0y,cc1y)\n x1,x2 = np.meshgrid(x1,x2)\n\n obj = Polynomial2D()\n obj.data['X1'] = x1.copy()\n obj.data['X2'] = x2.copy()\n obj.data['Y'] = tmpdata[cc0y:cc1y,cc0x:cc1x]\n # print(x1.shape,x2.shape,obj.data['Y'].shape) \n\n # data['MASK']\n tmp = np.full_like(tmpdq,True,dtype=bool)\n m = np.where(tmpdq==0)\n tmp[m] = False\n obj.data['MASK'] = tmp[cc0y:cc1y,cc0x:cc1x]\n # print(obj.data['Y'].shape,obj.data['MASK'].shape) \n\n OBJ[i] = copy.deepcopy(obj)\n return OBJ\n ####################\n ####################\n ####################\n def _headerall(self):\n axeflist = self.axeflist\n fltflist = self.fltflist\n padxleft,padxright,padylow,halfdy,padyup,adjusty = self.params\n \n tmp = {}\n for i in axeflist:\n # read from header\n HEADER = copy.deepcopy(fits.open(i)[1].header)\n xref,yref = HEADER['REFPNTX'],HEADER['REFPNTY']\n bb0x,bb1x = HEADER['BB0X'],HEADER['BB1X']\n orient = HEADER['ORIENT']\n cpointx,cpointy = HEADER['CPOINTX'],HEADER['CPOINTY']\n dldx0,dldx1 = HEADER['DLDX0'],HEADER['DLDX1']\n\n # manually adjust offset\n yref += adjusty\n\n # trace and wavelength\n fny = lambda x : np.tan((90.+orient)*pi/180.) * (x - cpointx) + yref\n fnw = lambda x : dldx1 * (x - cpointx) + dldx0\n\n pixx = np.array([round(xref),round(bb1x)],dtype=int)\n pixy = np.round(fny(pixx)).astype(int)\n ww = fnw(pixx)\n \n # section\n pixywidth = pixy[-1] - pixy[0] + 1\n sectx = (padxleft,round(bb0x-xref),round(bb1x-bb0x),padxright)\n secty = (padylow,halfdy,pixywidth,halfdy,padyup)\n\n # cut box\n cc0x = round(xref)-padxleft\n cc1x = round(bb1x)+padxright\n cc0y = int(fny(cc0x))-halfdy-padylow\n cc1y = int(fny(cc1x))+halfdy+padyup\n\n # output \n tmp[i] = {}\n tmp[i]['XYREF'] = (xref,yref)\n tmp[i]['DLDX'] = (dldx0,dldx1)\n tmp[i]['BBX'] = (bb0x,bb1x)\n tmp[i]['PIXX'] = pixx.copy()\n tmp[i]['PIXY'] = pixy.copy()\n tmp[i]['WW'] = ww.copy()\n tmp[i]['SECTX'] = copy.deepcopy(sectx)\n tmp[i]['SECTY'] = copy.deepcopy(secty)\n tmp[i]['CC'] = (cc0x,cc0y,cc1x,cc1y)\n\n return copy.deepcopy(tmp)\n ####################\n ####################\n ####################\n def show(self,save=False,savefname='default'):\n fltflist = self.fltflist\n axeflist = self.axeflist\n HEADERALL = self.headerall\n\n for ii,i in enumerate(fltflist):\n tmp = fits.open(i)\n tmpdata = tmp[1].data.copy()\n\n tmpheader = HEADERALL[axeflist[ii]]\n xref,yref = tmpheader['XYREF']\n pixx = tmpheader['PIXX']\n pixy = tmpheader['PIXY']\n ww = tmpheader['WW']\n cc0x,cc0y,cc1x,cc1y = tmpheader['CC']\n sectx = tmpheader['SECTX']\n secty = tmpheader['SECTY']\n\n fig,ax = plt.subplots(2,1,sharex=True)\n fig.tight_layout()\n m = np.where(np.isfinite(tmpdata))\n vmin,vmax = np.percentile(tmpdata[m],5.),np.percentile(tmpdata[m],99.)\n ax[0].imshow(tmpdata,origin='lower',cmap='viridis',vmin=vmin,vmax=vmax)\n ax[0].scatter(xref,yref,s=30,facecolor='red',edgecolor='None')\n ax[0].plot(pixx,pixy,'r-')\n ax[0].set_xlim(cc0x,cc1x)\n ax[0].set_ylim(cc0y,cc1y)\n ax[0].set_title('{0}'.format(i.split('/')[-1].split('_')[0]),fontsize=20)\n ax[0].set_ylabel('pixY',fontsize=20)\n\n bb0x = cc0x+sectx[0]+sectx[1]\n bb1x = bb0x+sectx[2]\n bb0y = cc0y+secty[0]\n bb1y = bb0y+secty[1]+secty[2]+secty[3]\n tmpx = [bb0x,bb1x,bb1x,bb0x,bb0x]\n tmpy = [bb0y,bb0y,bb1y,bb1y,bb0y]\n ax[0].plot(tmpx,tmpy,'r-')\n\n ax[1].plot(pixx,ww)\n ax[1].set_xlabel('pixX',fontsize=20)\n ax[1].set_ylabel('obs. wavelength (A)',fontsize=20)\n ax[1].grid()\n \n if save:\n if savefname=='default':\n string = '/'.join(axeflist[ii].split('/')[0:-1])\n string += '/{0}_axehelperbkg.png'.format(axeflist[ii].split('/')[-1].split('.')[0])\n else:\n string = savefname\n fig.savefig(string,bbox_inches='tight')\n \n ",
"# Kornpob Bhirombhakdi 20200331\n\nimport numpy as np\n\ndef make_SIP(coef,x,y,startx=True):\n \"\"\"\n Simple imaging polynomial (SIP) is a conventional method to describe non-linear variation in an image. Ref: https://fits.gsfc.nasa.gov/registry/sip/shupeADASS.pdf.\n ##########\n Assume a SIP model of order 2, i.e., Z = a0 + a1*X + a2*X**2.\n Typically, X is relative to SIP reference system whose origin is corresponding to (xref,yref) in the original image. Therefore, X = x - xref where (x,y) is an image pixel.\n Z is a quantity of interests. In aXe grism reduction, Z can be Y (as y = Y + yref) for trace, or wavelength.\n SIP coefficients are 2D with a given polynomial order. Assume the order is 3. Therefore, ai = ai0 + ai1*X' + ai2*Y' + ai3*X'**2 + ai4*X'*Y' + ai5*Y'**2 + ... + ai9*Y'**3.\n Note X is the leading term (this is specified by startx=True in make_SIP). Set startx=False otherwise.\n Note that X and X' might be different. For aXe reduction, (X',Y') = (xd,yd) as the source location from direct image.\n \"\"\"\n if startx:\n xref,yref = x,y\n else:\n xref,yref = y,x\n n = len(coef)\n d = []\n px,py = 0,0\n a = [(px,py)]\n b = [(xref,yref)]\n p = 0\n q = True\n while(q):\n if px==0:\n p+=1\n px=p\n py=0\n else:\n px-=1\n py+=1\n a.append((px,py))\n b.append((xref,yref))\n if len(a)>=len(coef):\n q = False\n a,b = np.array(a),np.array(b)\n c = b**a\n c = np.sum(c[:,0]*c[:,1]*coef)\n d.append(c)\n d = np.array(d)\n return d \n",
"import numpy as np\nfrom scipy.interpolate import interp2d\nimport copy\nclass PhotApCorr:\n def __init__(self):\n TABLE = {'HST-WFC3-IR': \n {'ref': ['WFC3 Instrument Handbook, Ch 7.6 + 9.3']\n ,'scale': 0.13\n ,'scaleunit': 'arcsec/pix'\n ,'type': 'radius'\n ,'row': 'apsize'\n ,'col': 'wave'\n ,'apunit': 'arcsec'\n ,'apsize': np.array((0.10,0.15,0.20,0.25,0.30\n ,0.40,0.50,0.60,0.80,1.00\n ,1.50,2.00\n ))\n ,'waveunit': 'A'\n ,'wave': np.array((7000.,8000.,9000.,10000.,11000.,12000.,13000.,14000.,15000.,16000.,17000.))\n ,'value' : np.array(((0.575,0.549,0.524,0.502,0.484,0.468,0.453,0.438,0.426,0.410,0.394)\n ,(0.736,0.714,0.685,0.653,0.623,0.596,0.575,0.558,0.550,0.539,0.531)\n ,(0.802,0.794,0.780,0.762,0.739,0.712,0.683,0.653,0.631,0.608,0.590)\n ,(0.831,0.827,0.821,0.813,0.804,0.792,0.776,0.756,0.735,0.708,0.679)\n ,(0.850,0.845,0.838,0.833,0.828,0.822,0.816,0.808,0.803,0.789,0.770)\n ,(0.878,0.876,0.869,0.859,0.850,0.845,0.841,0.838,0.840,0.836,0.832)\n ,(0.899,0.894,0.889,0.884,0.878,0.868,0.858,0.852,0.852,0.850,0.848)\n ,(0.916,0.913,0.904,0.897,0.893,0.889,0.883,0.875,0.870,0.863,0.859)\n ,(0.937,0.936,0.929,0.924,0.918,0.909,0.903,0.900,0.903,0.900,0.895)\n ,(0.951,0.951,0.946,0.941,0.935,0.930,0.925,0.920,0.917,0.912,0.909)\n ,(0.967,0.969,0.967,0.965,0.963,0.959,0.954,0.951,0.952,0.948,0.943)\n ,(0.974,0.977,0.976,0.975,0.973,0.972,0.969,0.967,0.970,0.967,0.963)\n ))\n ,'ZP': {'F098M': (9864.7,25.68),\n 'F105W': (10551.0,26.27),\n 'F110W': (11534.4,26.82),\n 'F125W': (12486.1,26.24),\n 'F140W': (13922.8,26.46),\n 'F160W': (15369.1,25.95)\n }\n ,'ZPunit': ('filter','pivot wavelength Angstrom','ABMAG ZP INF')\n ,'model': None\n }\n ,'HST-WFC3-UVIS': \n {'ref': ['WFC3 Instrument Handbook, Ch 6.6']\n ,'scale': 0.04\n ,'scaleunit': 'arcsec/pix'\n ,'type': 'radius'\n ,'row': 'apsize'\n ,'col': 'wave'\n ,'apunit': 'arcsec'\n ,'apsize': np.array((0.10,0.15,0.20,0.25,0.30\n ,0.40,0.50,0.60,0.80,1.00\n ,1.50,2.00\n ))\n ,'waveunit': 'A'\n ,'wave': np.array((2000.,3000.,4000.,5000.,6000.,7000.,8000.,9000.,10000.,11000.))\n ,'value' : np.array(((0.660,0.739,0.754,0.745,0.720,0.687,0.650,0.623,0.612,0.605)\n ,(0.717,0.793,0.823,0.834,0.832,0.823,0.807,0.778,0.742,0.699)\n ,(0.752,0.822,0.845,0.859,0.859,0.857,0.853,0.847,0.844,0.829)\n ,(0.781,0.844,0.864,0.875,0.877,0.874,0.870,0.867,0.868,0.864)\n ,(0.802,0.858,0.880,0.888,0.890,0.889,0.883,0.879,0.879,0.876)\n ,(0.831,0.880,0.899,0.911,0.910,0.907,0.906,0.904,0.900,0.894)\n ,(0.861,0.894,0.912,0.923,0.925,0.923,0.918,0.915,0.918,0.917)\n ,(0.884,0.906,0.922,0.932,0.934,0.933,0.931,0.927,0.927,0.923)\n ,(0.936,0.928,0.936,0.944,0.947,0.946,0.945,0.942,0.944,0.942)\n ,(0.967,0.946,0.948,0.954,0.955,0.955,0.955,0.952,0.955,0.952)\n ,(0.989,0.984,0.973,0.970,0.970,0.969,0.967,0.966,0.970,0.968)\n ,(0.994,0.992,0.989,0.985,0.980,0.977,0.976,0.975,0.978,0.976)\n ))\n ,'ZP': {'F606W': (5887.5,26.08),\n 'F814W': (8029.9,25.10)\n }\n ,'ZPunit': ('filter','pivot wavelength Angstrom','ABMAG ZP INF')\n ,'model': None\n }\n ,'HST-ACS-WFC':\n {'ref': ['http://www.stsci.edu/hst/instrumentation/acs/data-analysis/aperture-corrections'\n ,'https://ui.adsabs.harvard.edu/abs/2016AJ....152...60B/abstract'\n ,'ACS Instrument Handbook'\n ,'https://acszeropoints.stsci.edu/results_all/?date=2019-12-05&detector=WFC'\n ]\n ,'scale': 0.05\n ,'scaleunit': 'arcsec/pix'\n ,'type': 'radius'\n ,'row': 'apsize'\n ,'col': 'filter'\n ,'apunit': 'arcsec'\n ,'apsize': np.array((0.05,0.1,0.15,0.2,0.25\n ,0.3,0.35,0.4,0.45,0.5\n ,1.,2.\n ))\n ,'filter': np.array(('F435W','F475W','F502N','F555W','F550M',\n 'F606W','F625W','F658N','F660N','F775W',\n 'F814W','F892N','F850LP'))\n ,'value' : np.array(((0.330,0.663,0.792,0.839,0.863,0.877,0.887,0.895,0.902,0.907,0.941,0.979),\n (0.329,0.670,0.794,0.842,0.868,0.883,0.893,0.901,0.907,0.912,0.944,0.979),\n (0.328,0.670,0.794,0.842,0.869,0.884,0.894,0.902,0.909,0.914,0.945,0.978),\n (0.328,0.668,0.794,0.841,0.868,0.885,0.895,0.903,0.910,0.915,0.946,0.977),\n (0.328,0.666,0.794,0.840,0.867,0.885,0.896,0.904,0.910,0.915,0.947,0.976),\n (0.328,0.661,0.795,0.839,0.866,0.885,0.896,0.904,0.910,0.916,0.947,0.975),\n (0.330,0.655,0.795,0.838,0.864,0.884,0.896,0.904,0.911,0.916,0.948,0.974),\n (0.331,0.651,0.794,0.838,0.863,0.883,0.896,0.904,0.911,0.916,0.948,0.973),\n (0.331,0.650,0.794,0.838,0.863,0.883,0.896,0.904,0.911,0.916,0.948,0.973),\n (0.329,0.625,0.783,0.836,0.858,0.877,0.894,0.904,0.910,0.916,0.949,0.972),\n (0.322,0.611,0.770,0.830,0.853,0.871,0.889,0.901,0.908,0.914,0.949,0.972),\n (0.278,0.546,0.705,0.787,0.818,0.840,0.860,0.877,0.889,0.897,0.942,0.970),\n (0.268,0.532,0.690,0.776,0.810,0.833,0.853,0.871,0.884,0.893,0.940,0.970)\n )).T\n ,'ZP': {'F435W': (4329.2,25.662),\n 'F475W': (4746.2,26.053),\n 'F502N': (5023.0,22.282),\n 'F555W': (5360.9,25.711),\n 'F550M': (5581.5,24.853),\n 'F606W': (5922.0,26.495),\n 'F625W': (6312.0,25.902),\n 'F658N': (6584.0,22.761),\n 'F660N': (6599.4,21.711),\n 'F775W': (7693.2,25.664),\n 'F814W': (8045.0,25.942),\n 'F892N': (8914.8,22.397),\n 'F850LP': (9033.2,24.856)\n }\n ,'ZPunit': ('filter','pivot wavelength Angstrom','ABMAG ZP INF')\n ,'model': None\n }\n }\n self.table = TABLE\n self.instrument = list(TABLE.keys())\n self.make_model()\n def make_model(self):\n for i in self.instrument:\n if i in {'HST-WFC3-IR'}:\n apsize = np.copy(self.table[i]['apsize'])\n wave = np.copy(self.table[i]['wave'])\n value = np.copy(self.table[i]['value'])\n model = interp2d(wave,apsize,value,kind='linear',copy=True\n ,bounds_error=False,fill_value=np.nan\n )\n self.table[i]['model'] = copy.deepcopy(model)\n elif i in {'HST-ACS-WFC'}:\n apsize = np.copy(self.table[i]['apsize'])\n wave = []\n for j in self.table[i]['filter']:\n wave.append(self.table[i]['ZP'][j][0])\n wave = np.array(wave)\n value = np.copy(self.table[i]['value'])\n model = interp2d(wave,apsize,value,kind='linear',copy=True\n ,bounds_error=False,fill_value=np.nan\n )\n self.table[i]['model'] = copy.deepcopy(model)\n def make_apcorr(self,instrument,wave,apsize,apunit='pix'\n ,replace='median'\n ):\n apunittab = self.table[instrument]['apunit']\n model = self.table[instrument]['model']\n apsize2 = None\n value = None\n if (apunittab=='arcsec') & (apunit=='pix'):\n apsize2 = self.pix2arcsec(instrument,apsize)\n elif (apunittab=='pix') & (apunit=='arcsec'):\n apsize2 = self.arcsec2pix(instrument,apsize)\n value = model(wave,apsize2)\n if replace=='median':\n median = np.median(value[np.where(np.isfinite(value))])\n value[np.where(~np.isfinite(value))] = median\n value[np.where(value <= 0.)] = 0.\n value[np.where(value >= 1.)] = 1. \n return value\n def pix2arcsec(self,instrument=None,pixsize=None):\n out = None\n if not instrument:\n print('Error: instrument is required. Set to None')\n return\n if not pixsize:\n print('Error: pixsize is required. Set to None')\n return\n scale = self.table[instrument]['scale']\n scaleunit = self.table[instrument]['scaleunit']\n if scaleunit=='arcsec/pix':\n out = pixsize * scale\n elif scaleunit=='pix/arcsec':\n out = pixsize.astype(float) / scaleunit\n else:\n print('Error: invalid scaleunit. Set to None')\n return out\n def arcsec2pix(self,instrument=None,arcsec=None):\n out = None\n if not instrument:\n print('Error: instrument is required. Set to None')\n return\n if not arcsec:\n print('Error: arcsec is required. Set to None')\n scale = self.table[instrument]['scale']\n scaleunit = self.table[instrument]['scaleunit']\n if scaleunit=='arcsec/pix':\n out = arcsec.astype(float) / scale\n elif scaleunit=='pix/arcsec':\n out = arcsec.astype(float) * scale\n else:\n print('Error: invalid scaleunit. Set to None')\n return out\n"
] | [
[
"numpy.isfinite",
"numpy.arange",
"matplotlib.pyplot.subplots",
"numpy.percentile",
"numpy.full_like",
"numpy.tan",
"numpy.meshgrid",
"numpy.where"
],
[
"numpy.array",
"numpy.sum"
],
[
"numpy.isfinite",
"numpy.copy",
"scipy.interpolate.interp2d",
"numpy.array",
"numpy.where"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
}
] |
markytools/eeedeeplearning-finalProj | [
"6a06d73091262fb996c990302692cff7d9eed3b1"
] | [
"train.py"
] | [
"import sys\nfrom optparse import OptionParser\n\nimport torch\nimport torch.backends.cudnn as cudnn\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch import optim\nfrom torch.autograd import Variable\n\nfrom eval import eval_net\nfrom models.unet import UNet\nfrom utils import *\n\ndef train_net(net, epochs=100, batch_size=2, lr=0.02, val_percent=0.1,\n cp=True, gpu=False):\n dir_img = '/media/markytools/503f6b96-90ca-4bfb-a99e-35f774205c77/EEE298/eee298deeplearning-finalproject-withdataset/LABELS_ONE_X/'\n dir_mask = '/media/markytools/503f6b96-90ca-4bfb-a99e-35f774205c77/EEE298/eee298deeplearning-finalproject-withdataset/LABELS_ONE_Y/'\n dir_checkpoint = './checkpoints'\n\n ids = get_ids(dir_img)\n ids = split_ids(ids)\n\n iddataset = split_train_val(ids, val_percent)\n\n print('''\n Starting training:\n Epochs: {}\n Batch size: {}\n Learning rate: {}\n Training size: {}\n Validation size: {}\n Checkpoints: {}\n CUDA: {}\n '''.format(epochs, batch_size, lr, len(iddataset['train']),\n len(iddataset['val']), str(cp), str(gpu)))\n\n N_train = len(iddataset['train'])\n\n optimizer = optim.Adam(net.parameters(),lr=lr,betas=(0.9,0.99))\n criterion = nn.BCELoss()\n\n for epoch in range(epochs):\n print('Starting epoch {}/{}.'.format(epoch + 1, epochs))\n\n # reset the generators\n train = get_imgs_and_masks(iddataset['train'], dir_img, dir_mask)\n val = get_imgs_and_masks(iddataset['val'], dir_img, dir_mask)\n\n epoch_loss = 0\n\n if 1:\n val_dice = eval_net(net, val, gpu)\n print('Validation Dice Coeff: {}'.format(val_dice))\n\n for i, b in enumerate(batch(train, batch_size)):\n X = np.array([i[0] for i in b])\n y = np.array([i[1] for i in b])\n\n X = torch.FloatTensor(X)\n y = torch.ByteTensor(y)\n\n if gpu:\n X = Variable(X).cuda()\n y = Variable(y).cuda()\n else:\n X = Variable(X)\n y = Variable(y)\n\n y_pred = net(X)\n probs = F.sigmoid(y_pred)\n probs_flat = probs.view(-1)\n\n y_flat = y.view(-1)\n\n loss = criterion(probs_flat, y_flat.float())\n epoch_loss += loss.data[0]\n\n print('{0:.4f} --- loss: {1:.6f}'.format(i * batch_size / N_train,\n loss.data[0]))\n\n optimizer.zero_grad()\n\n loss.backward()\n\n optimizer.step()\n\n print('Epoch finished ! Loss: {}'.format(epoch_loss / i))\n\n if cp:\n torch.save(net.state_dict(),\n dir_checkpoint + 'CP{}.pth'.format(epoch + 1))\n\n print('Checkpoint {} saved !'.format(epoch + 1))\n\nif __name__ == '__main__':\n parser = OptionParser()\n parser.add_option('-e', '--epochs', dest='epochs', default=300, type='int',\n help='number of epochs')\n parser.add_option('-b', '--batch-size', dest='batchsize', default=2,\n type='int', help='batch size')\n parser.add_option('-l', '--learning-rate', dest='lr', default=0.001,\n type='float', help='learning rate')\n parser.add_option('-g', '--gpu', action='store_true', dest='gpu',\n default=False, help='use cuda')\n parser.add_option('-m', '--model', dest='model', default=1,\n type='int', help='select model (int): (1-Unet, )')\n parser.add_option('-c', '--load', dest='load',\n default=False, help='load file model')\n\n (options, args) = parser.parse_args()\n\n if (options.model == 1):\n net = UNet(3, 1)\n\n if options.load:\n net.load_state_dict(torch.load(options.load))\n print('Model loaded from {}'.format(options.load))\n\n if options.gpu:\n net.cuda()\n cudnn.benchmark = True\n\n try:\n train_net(net, options.epochs, options.batchsize, options.lr,\n gpu=options.gpu)\n except KeyboardInterrupt:\n torch.save(net.state_dict(), 'INTERRUPTED.pth')\n print('Saved interrupt')\n try:\n sys.exit(0)\n except SystemExit:\n os._exit(0)\n"
] | [
[
"torch.ByteTensor",
"torch.load",
"torch.nn.BCELoss",
"torch.nn.functional.sigmoid",
"torch.FloatTensor",
"torch.autograd.Variable"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
kacunningham413/PlasmoCount | [
"0213c63add92c8df1a53526af394bc9692ca4a62"
] | [
"api/programs/model.py"
] | [
"from pathlib import Path\nimport pandas as pd\nfrom PIL import Image as PILImage\nimport torch\nfrom torchvision import transforms, ops\nfrom fastai.basic_train import load_learner\nfrom fastai.vision import Image\nfrom fastai.core import FloatItem\nimport matplotlib.pyplot as plt\nfrom scipy import stats\n\n\nclass Model:\n def __init__(self,\n model_path='./models',\n od_model='faster-rcnn.pt',\n class_model='class_resnet.pkl',\n ls_model='ls_resnet.pkl',\n gam_model='gam_resnet.pkl',\n cutoffs=[1.5, 2.5]):\n model_path = Path(model_path)\n device = torch.device(\n 'cuda') if torch.cuda.is_available() else torch.device('cpu')\n self.od_model = torch.load(str(model_path / od_model), device)\n self.od_model.eval()\n self.class_model = load_learner(path=model_path, file=class_model)\n self.ls_model = load_learner(path=model_path, file=ls_model)\n self.gam_model = load_learner(path=model_path, file=gam_model)\n self.cutoffs = cutoffs\n\n def load_image(self, fileName):\n self.fileName = fileName\n img = PILImage.open(self.fileName).convert(\"RGB\")\n tensor = transforms.ToTensor()(img)\n self.img = tensor\n return tensor\n\n def predict(self, has_gams):\n with torch.no_grad():\n prediction = self.od_model([self.img])[0]\n prediction = self.post_processing(prediction)\n # get crops for class detection\n classes = []\n life_stages = []\n for bbox in prediction['boxes']:\n x0, y0, x1, y1 = bbox.int()\n bbox_img = Image(self.img[:, y0:y1, x0:x1])\n bbox_pred = self.class_model.predict(bbox_img)\n if str(bbox_pred[0]) == 'infected':\n if has_gams:\n gam_pred = self.gam_model.predict(bbox_img)\n if str(gam_pred[0]) == 'asexual':\n ls_pred = self.ls_model.predict(bbox_img)\n else:\n ls_pred = [FloatItem(-1)]\n else:\n ls_pred = self.ls_model.predict(bbox_img)\n life_stages.append(ls_pred)\n else:\n life_stages.append(None)\n classes.append(bbox_pred)\n\n # format predictions\n result = {}\n result['boxes'] = pd.Series(prediction['boxes'].tolist())\n result['p_boxes'] = pd.Series(prediction['scores'].tolist())\n result = pd.DataFrame.from_dict(result)\n result[['classes', 'p_classes']] = pd.Series(classes).apply(\n lambda x: pd.Series([str(x[0]), (x[2][x[1]]).item()]))\n result['life_stage'] = pd.Series(life_stages).apply(\n lambda x: float(x[0].data) if x is not None else None)\n result['life_stage_c'] = result['life_stage'].apply(\n lambda x: self.calc_life_stages(x))\n\n return result\n\n def post_processing(self,\n pred,\n score_thresh=0.9,\n iou_thresh=0.5,\n z_thresh=4):\n pred = self.apply_score_filter(pred, score_thresh)\n pred = self.apply_nms(pred, iou_thresh)\n pred = self.apply_size_filter(pred, z_thresh)\n return pred\n\n def apply_nms(self, pred, iou_thresh):\n idx = ops.nms(pred[\"boxes\"], pred[\"scores\"], iou_thresh)\n for i in [\"boxes\", \"labels\", \"scores\"]:\n pred[i] = pred[i][idx]\n return pred\n\n def apply_score_filter(self, pred, thresh):\n idx = [i for i, score in enumerate(pred['scores']) if score > thresh]\n for i in [\"boxes\", \"labels\", \"scores\"]:\n pred[i] = pred[i][idx]\n return pred\n\n def calc_area(self, coods):\n return abs((coods[:, 2] - coods[:, 0]) * (coods[:, 3] - coods[:, 1]))\n\n def apply_size_filter(self, pred, z_thresh):\n area = self.calc_area(pred['boxes'])\n zscores = stats.zscore(area)\n idx = [i for i, score in enumerate(zscores) if abs(score) < z_thresh]\n for i in [\"boxes\", \"labels\", \"scores\"]:\n pred[i] = pred[i][idx]\n return pred\n\n def calc_life_stages(self, x):\n RT_cutoff, TS_cutoff = self.cutoffs\n if not x:\n return 'uninfected'\n elif (x >= 0) & (x <= RT_cutoff):\n return 'ring'\n elif (x > RT_cutoff) & (x <= TS_cutoff):\n return 'trophozoite'\n elif (x > TS_cutoff):\n return 'schizont'\n elif (x == -1):\n return 'gametocyte'\n else:\n return 'uninfected'"
] | [
[
"pandas.Series",
"scipy.stats.zscore",
"torch.no_grad",
"torch.cuda.is_available",
"pandas.DataFrame.from_dict",
"torch.device"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
}
] |
milinddeore/pytorch-vsumm-reinforce | [
"c3ca731c9a7f00282c8460deb47f34658cfc0522"
] | [
"utils/generate_dataset.py"
] | [
"\"\"\"\n Generate Dataset\n\n 1. Converting video to frames\n 2. Extracting features\n 3. Getting change points\n 4. User Summary ( for evaluation )\n\n\"\"\"\nimport os, sys\nsys.path.append('../')\nfrom networks.CNN import ResNet\nfrom utils.KTS.cpd_auto import cpd_auto\nfrom tqdm import tqdm\nimport math\nimport cv2\nimport numpy as np\nimport h5py\nimport argparse\n\nparser = argparse.ArgumentParser()\nparser.add_argument('-p', '--path', type=str, required=True, help=\"path of video file, whos h5 needs to generate.\")\nparser.add_argument('--h5_gen', type=str, required=True, help=\"path to h5 generated file\")\nargs = parser.parse_args()\n\n\nclass Generate_Dataset:\n def __init__(self, video_path, save_path):\n self.resnet = ResNet()\n self.dataset = {}\n self.video_list = []\n self.video_path = ''\n self.frame_root_path = './frames'\n self.h5_file = h5py.File(save_path, 'w')\n\n self._set_video_list(video_path)\n print('Video path : {} H5 autogen path : {}'.format(video_path, save_path))\n\n def _set_video_list(self, video_path):\n if os.path.isdir(video_path):\n self.video_path = video_path\n self.video_list = os.listdir(video_path)\n self.video_list.sort()\n else:\n self.video_path = ''\n self.video_list.append(video_path)\n\n for idx, file_name in enumerate(self.video_list):\n self.dataset['video_{}'.format(idx+1)] = {}\n self.h5_file.create_group('video_{}'.format(idx+1))\n\n\n def _extract_feature(self, frame):\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n frame = cv2.resize(frame, (224, 224))\n res_pool5 = self.resnet(frame)\n frame_feat = res_pool5.cpu().data.numpy().flatten()\n\n return frame_feat\n\n def _get_change_points(self, video_feat, n_frame, fps):\n print('n_frame {} fps {}'.format(n_frame, fps))\n n = n_frame / math.ceil(fps)\n m = int(math.ceil(n/2.0))\n K = np.dot(video_feat, video_feat.T)\n change_points, _ = cpd_auto(K, m, 1)\n change_points = np.concatenate(([0], change_points, [n_frame-1]))\n\n temp_change_points = []\n for idx in range(len(change_points)-1):\n segment = [change_points[idx], change_points[idx+1]-1]\n if idx == len(change_points)-2:\n segment = [change_points[idx], change_points[idx+1]]\n\n temp_change_points.append(segment)\n change_points = np.array(list(temp_change_points))\n\n temp_n_frame_per_seg = []\n for change_points_idx in range(len(change_points)):\n n_frame = change_points[change_points_idx][1] - change_points[change_points_idx][0]\n temp_n_frame_per_seg.append(n_frame)\n n_frame_per_seg = np.array(list(temp_n_frame_per_seg))\n\n return change_points, n_frame_per_seg\n\n # TODO : save dataset\n def _save_dataset(self):\n pass\n\n def generate_dataset(self):\n for video_idx, video_filename in enumerate(tqdm(self.video_list)):\n video_path = video_filename\n if os.path.isdir(self.video_path):\n video_path = os.path.join(self.video_path, video_filename)\n\n video_basename = os.path.basename(video_path).split('.')[0]\n\n if not os.path.exists(os.path.join(self.frame_root_path, video_basename)):\n os.mkdir(os.path.join(self.frame_root_path, video_basename))\n\n video_capture = cv2.VideoCapture(video_path)\n\n fps = video_capture.get(cv2.CAP_PROP_FPS)\n n_frames = int(video_capture.get(cv2.CAP_PROP_FRAME_COUNT))\n\n #frame_list = []\n picks = []\n video_feat = None\n video_feat_for_train = None\n for frame_idx in tqdm(range(n_frames-1)):\n success, frame = video_capture.read()\n if success:\n frame_feat = self._extract_feature(frame)\n\n if frame_idx % 15 == 0:\n picks.append(frame_idx)\n\n if video_feat_for_train is None:\n video_feat_for_train = frame_feat\n else:\n video_feat_for_train = np.vstack((video_feat_for_train, frame_feat))\n\n if video_feat is None:\n video_feat = frame_feat\n else:\n video_feat = np.vstack((video_feat, frame_feat))\n\n img_filename = \"{}.jpg\".format(str(frame_idx).zfill(5))\n cv2.imwrite(os.path.join(self.frame_root_path, video_basename, img_filename), frame)\n\n else:\n break\n\n video_capture.release()\n\n change_points, n_frame_per_seg = self._get_change_points(video_feat, n_frames, fps)\n\n # self.dataset['video_{}'.format(video_idx+1)]['frames'] = list(frame_list)\n # self.dataset['video_{}'.format(video_idx+1)]['features'] = list(video_feat)\n # self.dataset['video_{}'.format(video_idx+1)]['picks'] = np.array(list(picks))\n # self.dataset['video_{}'.format(video_idx+1)]['n_frames'] = n_frames\n # self.dataset['video_{}'.format(video_idx+1)]['fps'] = fps\n # self.dataset['video_{}'.format(video_idx+1)]['change_points'] = change_points\n # self.dataset['video_{}'.format(video_idx+1)]['n_frame_per_seg'] = n_frame_per_seg\n\n self.h5_file['video_{}'.format(video_idx+1)]['features'] = list(video_feat_for_train)\n self.h5_file['video_{}'.format(video_idx+1)]['picks'] = np.array(list(picks))\n self.h5_file['video_{}'.format(video_idx+1)]['n_frames'] = n_frames\n self.h5_file['video_{}'.format(video_idx+1)]['fps'] = fps\n self.h5_file['video_{}'.format(video_idx+1)]['change_points'] = change_points\n self.h5_file['video_{}'.format(video_idx+1)]['n_frame_per_seg'] = n_frame_per_seg\n\nif __name__ == \"__main__\":\n gen = Generate_Dataset(args.path, args.h5_gen)\n gen.generate_dataset()\n gen.h5_file.close()\n"
] | [
[
"numpy.concatenate",
"numpy.dot",
"numpy.vstack"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jhabikal21/tensorflow | [
"98d20962172301385aae694141801a375debd2bc",
"98d20962172301385aae694141801a375debd2bc",
"98d20962172301385aae694141801a375debd2bc",
"98d20962172301385aae694141801a375debd2bc",
"98d20962172301385aae694141801a375debd2bc",
"98d20962172301385aae694141801a375debd2bc"
] | [
"tensorflow/python/kernel_tests/slice_op_test.py",
"tensorflow/contrib/tpu/python/tpu/training_loop.py",
"tensorflow/contrib/boosted_trees/python/training/functions/gbdt_batch.py",
"tensorflow/contrib/learn/python/learn/estimators/head.py",
"tensorflow/contrib/data/python/ops/batching.py",
"tensorflow/contrib/data/python/ops/prefetching_ops.py"
] | [
"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Functional tests for slice op.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nfrom six.moves import xrange # pylint: disable=redefined-builtin\n\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import errors_impl\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import gradients_impl\nfrom tensorflow.python.platform import test\n\n\nclass SliceTest(test.TestCase):\n\n def testEmpty(self):\n inp = np.random.rand(4, 4).astype(\"f\")\n for k in xrange(4):\n with self.test_session(use_gpu=True):\n a = constant_op.constant(inp, shape=[4, 4], dtype=dtypes.float32)\n slice_t = a[2, k:k]\n slice_val = slice_t.eval()\n self.assertAllEqual(slice_val, inp[2, k:k])\n\n def testInt32(self):\n inp = np.random.rand(4, 4).astype(\"i\")\n for k in xrange(4):\n with self.test_session(use_gpu=True):\n a = constant_op.constant(inp, shape=[4, 4], dtype=dtypes.int32)\n slice_t = a[2, k:k]\n slice_val = slice_t.eval()\n self.assertAllEqual(slice_val, inp[2, k:k])\n\n def testInt64Slicing(self):\n with self.test_session(use_gpu=True):\n a = constant_op.constant([0, 1, 2], dtype=dtypes.int64)\n\n # Slice using int64 Tensor.\n i = constant_op.constant(1, dtype=dtypes.int64)\n slice_t = a[i]\n slice_val = slice_t.eval()\n self.assertAllEqual(1, slice_val)\n slice_t = a[i:i+1]\n slice_val = slice_t.eval()\n self.assertAllEqual([1], slice_val)\n\n # Slice using int64 integer.\n i = np.asarray(1).astype(np.int64)\n slice_t = a[i]\n slice_val = slice_t.eval()\n self.assertAllEqual(1, slice_val)\n slice_t = a[i:i+1]\n slice_val = slice_t.eval()\n self.assertAllEqual([1], slice_val)\n\n def testSelectAll(self):\n for _ in range(10):\n with self.test_session(use_gpu=True):\n inp = np.random.rand(4, 4, 4, 4).astype(\"f\")\n a = constant_op.constant(inp, shape=[4, 4, 4, 4], dtype=dtypes.float32)\n\n slice_explicit_t = array_ops.slice(a, [0, 0, 0, 0], [-1, -1, -1, -1])\n slice_implicit_t = a[:, :, :, :]\n\n self.assertAllEqual(inp, slice_explicit_t.eval())\n self.assertAllEqual(inp, slice_implicit_t.eval())\n self.assertEqual(inp.shape, slice_explicit_t.get_shape())\n self.assertEqual(inp.shape, slice_implicit_t.get_shape())\n\n def testSingleDimension(self):\n for _ in range(10):\n with self.test_session(use_gpu=True):\n inp = np.random.rand(10).astype(\"f\")\n a = constant_op.constant(inp, shape=[10], dtype=dtypes.float32)\n\n hi = np.random.randint(0, 9)\n scalar_t = a[hi]\n scalar_val = scalar_t.eval()\n self.assertAllEqual(scalar_val, inp[hi])\n\n if hi > 0:\n lo = np.random.randint(0, hi)\n else:\n lo = 0\n slice_t = a[lo:hi]\n slice_val = slice_t.eval()\n self.assertAllEqual(slice_val, inp[lo:hi])\n\n def testScalarInput(self):\n input_val = 0\n with self.test_session() as sess:\n # Test with constant input; shape inference fails.\n with self.assertRaisesWithPredicateMatch(ValueError, \"out of range\"):\n constant_op.constant(input_val)[:].get_shape()\n\n # Test evaluating with non-constant input; kernel execution fails.\n input_t = array_ops.placeholder(dtypes.int32)\n slice_t = input_t[:]\n with self.assertRaisesWithPredicateMatch(errors_impl.InvalidArgumentError,\n \"out of range\"):\n sess.run([slice_t], feed_dict={input_t: input_val})\n\n def testInvalidIndex(self):\n input_val = [1, 2]\n with self.test_session() as sess:\n # Test with constant input; shape inference fails.\n with self.assertRaisesWithPredicateMatch(ValueError, \"out of range\"):\n constant_op.constant(input_val)[1:, 1:].get_shape()\n\n # Test evaluating with non-constant input; kernel execution fails.\n input_t = array_ops.placeholder(dtypes.int32)\n slice_t = input_t[1:, 1:]\n with self.assertRaisesWithPredicateMatch(errors_impl.InvalidArgumentError,\n \"out of range\"):\n sess.run([slice_t], feed_dict={input_t: input_val})\n\n def _testSliceMatrixDim0(self, x, begin, size):\n with self.test_session(use_gpu=True):\n tf_ans = array_ops.slice(x, [begin, 0], [size, x.shape[1]]).eval()\n np_ans = x[begin:begin + size, :]\n self.assertAllEqual(tf_ans, np_ans)\n\n def testSliceMatrixDim0(self):\n x = np.random.rand(8, 4).astype(\"f\")\n self._testSliceMatrixDim0(x, 1, 2)\n self._testSliceMatrixDim0(x, 3, 3)\n y = np.random.rand(8, 7).astype(\"f\") # 7 * sizeof(float) is not aligned\n self._testSliceMatrixDim0(y, 1, 2)\n self._testSliceMatrixDim0(y, 3, 3)\n\n def testSingleElementAll(self):\n for _ in range(10):\n with self.test_session(use_gpu=True):\n inp = np.random.rand(4, 4).astype(\"f\")\n a = constant_op.constant(inp, shape=[4, 4], dtype=dtypes.float32)\n\n x, y = np.random.randint(0, 3, size=2).tolist()\n slice_t = a[x, 0:y]\n slice_val = slice_t.eval()\n self.assertAllEqual(slice_val, inp[x, 0:y])\n\n def testSimple(self):\n with self.test_session(use_gpu=True) as sess:\n inp = np.random.rand(4, 4).astype(\"f\")\n a = constant_op.constant(\n [float(x) for x in inp.ravel(order=\"C\")],\n shape=[4, 4],\n dtype=dtypes.float32)\n slice_t = array_ops.slice(a, [0, 0], [2, 2])\n slice2_t = a[:2, :2]\n slice_val, slice2_val = sess.run([slice_t, slice2_t])\n self.assertAllEqual(slice_val, inp[:2, :2])\n self.assertAllEqual(slice2_val, inp[:2, :2])\n self.assertEqual(slice_val.shape, slice_t.get_shape())\n self.assertEqual(slice2_val.shape, slice2_t.get_shape())\n\n def testComplex(self):\n with self.test_session(use_gpu=True):\n inp = np.random.rand(4, 10, 10, 4).astype(\"f\")\n a = constant_op.constant(inp, dtype=dtypes.float32)\n\n x = np.random.randint(0, 9)\n z = np.random.randint(0, 9)\n if z > 0:\n y = np.random.randint(0, z)\n else:\n y = 0\n slice_t = a[:, x, y:z, :]\n self.assertAllEqual(slice_t.eval(), inp[:, x, y:z, :])\n\n def testRandom(self):\n # Random dims of rank 6\n input_shape = np.random.randint(0, 20, size=6)\n inp = np.random.rand(*input_shape).astype(\"f\")\n with self.test_session(use_gpu=True) as sess:\n a = constant_op.constant(\n [float(x) for x in inp.ravel(order=\"C\")],\n shape=input_shape,\n dtype=dtypes.float32)\n indices = [0 if x == 0 else np.random.randint(x) for x in input_shape]\n sizes = [\n np.random.randint(0, input_shape[i] - indices[i] + 1)\n for i in range(6)\n ]\n slice_t = array_ops.slice(a, indices, sizes)\n slice2_t = a[indices[0]:indices[0] + sizes[0], indices[1]:indices[\n 1] + sizes[1], indices[2]:indices[2] + sizes[2], indices[3]:indices[3]\n + sizes[3], indices[4]:indices[4] + sizes[4], indices[5]:\n indices[5] + sizes[5]]\n\n slice_val, slice2_val = sess.run([slice_t, slice2_t])\n\n expected_val = inp[indices[0]:indices[0] + sizes[0], indices[1]:indices[\n 1] + sizes[1], indices[2]:indices[2] + sizes[2], indices[3]:indices[\n 3] + sizes[3], indices[4]:indices[4] + sizes[4], indices[5]:indices[\n 5] + sizes[5]]\n self.assertAllEqual(slice_val, expected_val)\n self.assertAllEqual(slice2_val, expected_val)\n self.assertEqual(expected_val.shape, slice_t.get_shape())\n self.assertEqual(expected_val.shape, slice2_t.get_shape())\n\n def testPartialShapeInference(self):\n z = array_ops.zeros((1, 2, 3))\n self.assertAllEqual(z.get_shape().as_list(), [1, 2, 3])\n\n m1 = array_ops.slice(z, [0, 0, 0], [-1, -1, -1])\n self.assertAllEqual(m1.get_shape().as_list(), [1, 2, 3])\n\n m2 = array_ops.slice(z, [0, 0, 0], [constant_op.constant(1) + 0, 2, -1])\n self.assertAllEqual(m2.get_shape().as_list(), [None, 2, None])\n\n\n def _testGradientSlice(self, input_shape, slice_begin, slice_size):\n with self.test_session(use_gpu=True):\n num_inputs = np.prod(input_shape)\n num_grads = np.prod(slice_size)\n inp = np.random.rand(num_inputs).astype(\"f\").reshape(input_shape)\n a = constant_op.constant(\n [float(x) for x in inp.ravel(order=\"C\")],\n shape=input_shape,\n dtype=dtypes.float32)\n slice_t = array_ops.slice(a, slice_begin, slice_size)\n grads = np.random.rand(num_grads).astype(\"f\").reshape(slice_size)\n grad_tensor = constant_op.constant(grads)\n grad = gradients_impl.gradients(slice_t, [a], grad_tensor)[0]\n result = grad.eval()\n\n # Create a zero tensor of the input shape ane place\n # the grads into the right location to compare against TensorFlow.\n np_ans = np.zeros(input_shape)\n slices = []\n for i in xrange(len(input_shape)):\n slices.append(slice(slice_begin[i], slice_begin[i] + slice_size[i]))\n np_ans[slices] = grads\n\n self.assertAllClose(np_ans, result)\n\n def _testGradientVariableSize(self):\n with self.test_session(use_gpu=True):\n inp = constant_op.constant([1.0, 2.0, 3.0], name=\"in\")\n out = array_ops.slice(inp, [1], [-1])\n grad_actual = gradients_impl.gradients(out, inp)[0].eval()\n self.assertAllClose([0., 1., 1.], grad_actual)\n\n def testGradientsAll(self):\n # Slice the middle square out of a 4x4 input\n self._testGradientSlice([4, 4], [1, 1], [2, 2])\n\n # Slice the upper left square out of a 4x4 input\n self._testGradientSlice([4, 4], [0, 0], [2, 2])\n\n # Slice a non-square input starting from (2,1)\n self._testGradientSlice([4, 4], [2, 1], [1, 2])\n\n # Slice a 3D tensor\n self._testGradientSlice([3, 3, 3], [0, 1, 0], [2, 1, 1])\n\n # Use -1 as a slice dimension.\n self._testGradientVariableSize()\n\n def testNotIterable(self):\n # NOTE (mrry): If we register __getitem__ as an overloaded id:2995 gh:2996\n # operator, Python will valiantly attempt to iterate over the\n # Tensor from 0 to infinity. This test ensures that this\n # unintended behavior is prevented.\n c = constant_op.constant(5.0)\n with self.assertRaisesWithPredicateMatch(\n TypeError, lambda e: \"`Tensor` objects are not iterable\" in str(e)):\n for _ in c:\n pass\n\n def testComputedShape(self):\n # NOTE (mrry): We cannot currently handle partially-known values, id:3496 gh:3497\n # because `tf.slice()` uses -1 to specify a wildcard size, and\n # this can't be handled using the\n # `tensor_util.constant_value_as_shape()` trick.\n a = constant_op.constant([[1, 2, 3], [4, 5, 6]])\n begin = constant_op.constant(0)\n size = constant_op.constant(1)\n b = array_ops.slice(a, [begin, 0], [size, 2])\n self.assertEqual([1, 2], b.get_shape())\n\n begin = array_ops.placeholder(dtypes.int32, shape=())\n c = array_ops.slice(a, [begin, 0], [-1, 2])\n self.assertEqual([None, 2], c.get_shape().as_list())\n\n def testSliceOfSlice(self):\n with self.test_session(use_gpu=True):\n a = constant_op.constant([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]])\n b = a[1:, :]\n c = b[:-1, :]\n d = c[1, :]\n res = 2 * d - c[1, :] + a[2, :] - 2 * b[-2, :]\n self.assertAllEqual([0, 0, 0], res.eval())\n\n\nif __name__ == \"__main__\":\n test.main()\n",
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# =============================================================================\n\n\"\"\"Library for constructing a training loop, suitable for TPUs.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.contrib.tpu.python.tpu import tpu_function\n\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import control_flow_ops\n\n\ndef while_loop(condition, body, inputs=None, infeed_queue=None, name=None):\n \"\"\"Builds a training loop for TPUs.\n\n The set of loop-carried tensors corresponds to `inputs`. Both\n `condition` and `body` take the current value of the loop-carried\n tensors. 'body' additionally takes a tuple of infeed from\n infeed_queue if infeed_queue is not None. `condition` must return a\n single boolean value that determines whether iteration\n continues. `body` must return an updated list of values for the\n loop-carried tensors.\n\n Args:\n condition: a Python function that builds the loop condition.\n body: a Python function that builds the loop body.\n inputs: a list of initial values passed into the training loop, or\n None (equivalent to an empty list).\n infeed_queue: if not None, the infeed queue from which to append a tuple\n of arguments as inputs to condition.\n name: an optional name for the loop.\n\n Returns:\n The final values of the loop-carried tensors.\n\n Raises:\n TypeError: if body or condition has the wrong signature.\n \"\"\"\n\n # Converts inputs to Tensors.\n inputs = [] if inputs is None else [ops.convert_to_tensor(x) for\n x in inputs]\n input_types = [x.dtype for x in inputs]\n input_arity = len(inputs)\n\n body_arg_error = tpu_function.check_function_argument_count(\n body, input_arity, infeed_queue)\n if body_arg_error is not None:\n if infeed_queue is None:\n raise TypeError(\n \"Supplied loop body function cannot be called with the specified \"\n \"inputs. You specified %d inputs: %s, but the loop body needs %s\" % (\n input_arity, str([i.name for i in inputs]), body_arg_error))\n else:\n raise TypeError(\n \"Supplied loop body function cannot be called with the specified \"\n \"inputs. You specified %d inputs: %s and %d additional inputs from \"\n \"infeed, but the computation needs %s\" % (input_arity, str(\n [i.name for i in inputs]), infeed_queue.number_of_tuple_elements,\n body_arg_error))\n condition_arg_error = tpu_function.check_function_argument_count(\n condition, input_arity, None)\n if condition_arg_error is not None:\n if infeed_queue is None:\n raise TypeError(\n \"Supplied loop condition function cannot be called with the \"\n \"specified inputs. You specified %d inputs: %s, but the loop \"\n \"condition needs %s\" % (input_arity, str([i.name for i in inputs]),\n condition_arg_error))\n else:\n raise TypeError(\n \"Supplied loop condition function cannot be called with the \"\n \"specified inputs. You specified %d inputs: %s, but the loop \"\n \"condition needs %s. Note that infeed is not passed to the loop \"\n \"condition.\" % (input_arity, str([i.name for i in inputs]),\n condition_arg_error))\n\n def condition_wrapper(*inputs):\n # Discards the dummy output added for arity-0 loops.\n if input_arity == 0:\n inputs = []\n return condition(*inputs)\n\n def body_wrapper(*inputs):\n \"\"\"Wrapper around `body` that handles infeed queues and control deps.\"\"\"\n inputs = list(inputs)\n\n # Discards the dummy output added for arity-0 loops.\n if input_arity == 0:\n inputs = []\n\n # Runs `body` with the dequeue_ops appended.\n if infeed_queue:\n number_of_shards = tpu_function.get_tpu_context().number_of_shards\n if number_of_shards is None:\n raise ValueError(\"Can't build training loop with infeed when there is \"\n \"no tpu_shard_context. Are you building a loop or \"\n \"graph directly rather than from inside tpu.rewrite, \"\n \"tpu.batch_parallel, tpu.shard, or tpu.replicate?\")\n infeed_queue.set_number_of_shards(number_of_shards)\n dequeue_ops = [d for d in infeed_queue.generate_dequeue_op()]\n else:\n dequeue_ops = []\n outputs = body(*(inputs + dequeue_ops))\n\n # If the computation only returned one value, make it a tuple.\n if not isinstance(outputs, (list, tuple)):\n outputs = (outputs,)\n\n outputs = [\n o if isinstance(o, ops.Operation) else ops.convert_to_tensor(o)\n for o in outputs\n ]\n\n # Separates the returned Operations and Tensors.\n output_operations = [o for o in outputs if isinstance(o, ops.Operation)]\n output_tensors = [o for o in outputs\n if not isinstance(o, ops.Operation)]\n\n if outputs != output_tensors + output_operations:\n raise ValueError(\n \"TPU training loop body must return zero or more Tensor values \"\n \"followed by zero or more Operations.\")\n\n output_types = [op.dtype for op in output_tensors]\n if input_types != output_types:\n raise TypeError(\n \"Mismatch between input types and output types for training loop \"\n \"body: {} vs {}\".format(input_types, output_types))\n\n # Add the dequeue operations to output_operations to ensure they are run\n # by the loop, even if the programmer's loop body does not use them.\n output_operations += dequeue_ops\n\n # Add a dummy output, if needed.\n if not output_tensors:\n output_tensors = array_ops.constant(0)\n\n if output_operations:\n # TODO (phawkins): in principle this is too restrictive since it serializes id:1047 gh:1048\n # the training loop steps. In practice it does not matter since this loop\n # will be compiled by XLA.\n return control_flow_ops.tuple(output_tensors,\n control_inputs=output_operations)\n else:\n return output_tensors\n\n # If the body has arity 0, add a dummy loop-carried value to which we can add\n # control dependencies from any side-effecting operations.\n if input_arity == 0:\n inputs = [array_ops.constant(0)]\n return control_flow_ops.while_loop(condition_wrapper, body_wrapper, inputs,\n name=name)\n\n\ndef repeat(n, body, inputs=None, infeed_queue=None, name=None):\n \"\"\"Builds a training loop that executes a fixed number of interations.\n\n The set of loop-carried tensors correspond to `inputs`.\n `body` must be a function that takes and returns the values of the\n loop-carried tensors.\n\n Args:\n n: the number of loop iterations\n body: a Python function that builds the loop body.\n inputs: a list of initial values passed into the training loop or\n None (equivalent to an empty list).\n infeed_queue: if not None, the infeed queue from which to append a tuple\n of arguments as inputs to condition.\n name: an optional name for the loop.\n Returns:\n The final values of the loop-carried tensors.\n Raises:\n ValueError: if there is a type error.\n \"\"\"\n def _convert_to_list(xs):\n if not isinstance(xs, (list, tuple)):\n return [xs]\n else:\n return list(xs)\n\n def cond(i, *args):\n del args\n return i < n\n\n def body_wrapper(i, *args):\n return [i + 1] + _convert_to_list(body(*args))\n\n inputs = [0] if inputs is None else [0] + _convert_to_list(inputs)\n outputs = while_loop(\n cond, body_wrapper, inputs=inputs, infeed_queue=infeed_queue, name=name)\n outputs = _convert_to_list(outputs)\n if len(outputs) == 1:\n # Returns the Op rather than an empty list.\n return outputs[0].op\n else:\n return outputs[1:]\n",
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Training functions for Gradient boosted decision trees.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport copy\n\nfrom tensorflow.contrib import learn\nfrom tensorflow.contrib import stateless\n\nfrom tensorflow.contrib.boosted_trees.lib.learner.batch import categorical_split_handler\nfrom tensorflow.contrib.boosted_trees.lib.learner.batch import ordinal_split_handler\nfrom tensorflow.contrib.boosted_trees.proto import learner_pb2\nfrom tensorflow.contrib.boosted_trees.python.ops import batch_ops_utils\nfrom tensorflow.contrib.boosted_trees.python.ops import gen_model_ops\nfrom tensorflow.contrib.boosted_trees.python.ops import model_ops\nfrom tensorflow.contrib.boosted_trees.python.ops import prediction_ops\nfrom tensorflow.contrib.boosted_trees.python.ops import stats_accumulator_ops\nfrom tensorflow.contrib.boosted_trees.python.ops import training_ops\nfrom tensorflow.contrib.layers.python.layers import feature_column as feature_column_lib\nfrom tensorflow.contrib.layers.python.layers import feature_column_ops\nfrom tensorflow.python.feature_column import feature_column as fc_core\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import sparse_tensor\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import gradients_impl\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import variable_scope\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.platform import tf_logging as logging\nfrom tensorflow.python.summary import summary\nfrom tensorflow.python.training import device_setter\n\n# Key names for prediction dict.\nENSEMBLE_STAMP = \"ensemble_stamp\"\nPREDICTIONS = \"predictions\"\nPARTITION_IDS = \"partition_ids\"\nNUM_LAYERS_ATTEMPTED = \"num_layers\"\nNUM_TREES_ATTEMPTED = \"num_trees\"\n_FEATURE_NAME_TEMPLATE = \"%s_%d\"\n\n\ndef _get_column_by_index(tensor, indices):\n \"\"\"Returns columns from a 2-D tensor by index.\"\"\"\n shape = array_ops.shape(tensor)\n p_flat = array_ops.reshape(tensor, [-1])\n i_flat = array_ops.reshape(\n array_ops.reshape(math_ops.range(0, shape[0]) * shape[1], [-1, 1]) +\n indices, [-1])\n return array_ops.reshape(array_ops.gather(p_flat, i_flat), [shape[0], -1])\n\n\ndef _make_predictions_dict(stamp, logits, partition_ids, ensemble_stats):\n \"\"\"Returns predictions for the given logits and n_classes.\n\n Args:\n stamp: The ensemble stamp.\n logits: A rank 2 `Tensor` with shape [batch_size, n_classes - 1].\n that contains predictions when no dropout was applied.\n partition_ids: A rank 1 `Tensor` with shape [batch_size].\n ensemble_stats: A TreeEnsembleStatsOp result tuple.\n\n Returns:\n A dict of predictions.\n \"\"\"\n result = {}\n result[ENSEMBLE_STAMP] = stamp\n result[PREDICTIONS] = logits\n result[PARTITION_IDS] = partition_ids\n result[NUM_LAYERS_ATTEMPTED] = ensemble_stats.attempted_layers\n result[NUM_TREES_ATTEMPTED] = ensemble_stats.attempted_trees\n return result\n\n\nclass _OpRoundRobinStrategy(object):\n \"\"\"Returns the next ps task index for placement via per-Op round-robin order.\n\n This strategy works slightly better for the GBDT graph because of using\n custom resources which vary significantly in compute cost.\n \"\"\"\n\n def __init__(self, ps_ops, num_tasks):\n \"\"\"Create a new `_RoundRobinStrategy`.\n\n Args:\n ps_ops: List of Op types to place on PS.\n num_tasks: Number of ps tasks to cycle among.\n \"\"\"\n next_task = 0\n self._next_task_per_op = {}\n for op in ps_ops:\n self._next_task_per_op[op] = next_task\n next_task = (next_task + 1) % num_tasks if num_tasks else 0\n self._num_tasks = num_tasks\n\n def __call__(self, op):\n \"\"\"Choose a ps task index for the given `Operation`.\n\n Args:\n op: An `Operation` to be placed on ps.\n\n Returns:\n The next ps task index to use for the `Operation`. Returns the next\n index, in the range `[offset, offset + num_tasks)`.\n\n Raises:\n ValueError: If attempting to place non-PS Op.\n \"\"\"\n if op.type not in self._next_task_per_op:\n raise ValueError(\"Unknown op type '%s' for placement:\" % op.type)\n task = self._next_task_per_op[op.type]\n self._next_task_per_op[op.type] = ((task + 1) % self._num_tasks\n if self._num_tasks else 0)\n return task\n\n\ndef extract_features(features, feature_columns):\n \"\"\"Extracts columns from a dictionary of features.\n\n Args:\n features: `dict` of `Tensor` objects.\n feature_columns: A list of feature_columns.\n\n Returns:\n Seven values:\n - A list of all feature column names.\n - A list of dense floats.\n - A list of sparse float feature indices.\n - A list of sparse float feature values.\n - A list of sparse float feature shapes.\n - A list of sparse int feature indices.\n - A list of sparse int feature values.\n - A list of sparse int feature shapes.\n Raises:\n ValueError: if features is not valid.\n \"\"\"\n if not features:\n raise ValueError(\"Features dictionary must be specified.\")\n\n # Make a shallow copy of features to ensure downstream usage\n # is unaffected by modifications in the model function.\n features = copy.copy(features)\n if feature_columns:\n scope = \"gbdt\"\n with variable_scope.variable_scope(scope):\n feature_columns = list(feature_columns)\n transformed_features = {}\n for fc in feature_columns:\n # pylint: disable=protected-access\n if isinstance(fc, feature_column_lib._EmbeddingColumn):\n # pylint: enable=protected-access\n transformed_features[fc.name] = fc_core.input_layer(\n features, [fc],\n weight_collections=[scope])\n else:\n result = feature_column_ops.transform_features(features, [fc])\n if len(result) > 1:\n raise ValueError(\"Unexpected number of output features\")\n transformed_features[fc.name] = result[list(result.keys())[0]]\n features = transformed_features\n\n dense_float_names = []\n dense_floats = []\n sparse_float_names = []\n sparse_float_indices = []\n sparse_float_values = []\n sparse_float_shapes = []\n sparse_int_names = []\n sparse_int_indices = []\n sparse_int_values = []\n sparse_int_shapes = []\n for key in sorted(features.keys()):\n tensor = features[key]\n if isinstance(tensor, sparse_tensor.SparseTensor):\n if tensor.values.dtype == dtypes.float32:\n sparse_float_names.append(key)\n sparse_float_indices.append(tensor.indices)\n sparse_float_values.append(tensor.values)\n sparse_float_shapes.append(tensor.dense_shape)\n elif tensor.values.dtype == dtypes.int64:\n sparse_int_names.append(key)\n sparse_int_indices.append(tensor.indices)\n sparse_int_values.append(tensor.values)\n sparse_int_shapes.append(tensor.dense_shape)\n else:\n raise ValueError(\"Unsupported sparse feature %s with dtype %s.\" %\n (tensor.indices.name, tensor.dtype))\n else:\n if tensor.dtype == dtypes.float32:\n if len(tensor.shape) > 1 and tensor.shape[1] > 1:\n unstacked = array_ops.unstack(tensor, axis=1)\n for i in range(len(unstacked)):\n dense_float_names.append(_FEATURE_NAME_TEMPLATE % (key, i))\n dense_floats.append(array_ops.reshape(unstacked[i], [-1, 1]))\n else:\n dense_float_names.append(key)\n dense_floats.append(tensor)\n else:\n raise ValueError(\"Unsupported dense feature %s with dtype %s.\" %\n (tensor.name, tensor.dtype))\n # Feature columns are logically organized into incrementing slots starting\n # from dense floats, then sparse floats then sparse ints.\n fc_names = (dense_float_names + sparse_float_names + sparse_int_names)\n return (fc_names, dense_floats, sparse_float_indices, sparse_float_values,\n sparse_float_shapes, sparse_int_indices, sparse_int_values,\n sparse_int_shapes)\n\n\ndef _dropout_params(mode, ensemble_stats):\n \"\"\"Returns parameters relevant for dropout.\n\n Args:\n mode: Train/Eval/Infer\n ensemble_stats: A TreeEnsembleStatsOp result tuple.\n\n Returns:\n Whether to apply dropout and a dropout seed.\n \"\"\"\n if mode == learn.ModeKeys.TRAIN:\n # Do dropout only during training.\n apply_dropout = True\n seed = ensemble_stats.attempted_trees\n else:\n seed = -1\n apply_dropout = False\n return apply_dropout, seed\n\n\nclass GradientBoostedDecisionTreeModel(object):\n \"\"\"A GBDT model function.\"\"\"\n\n def __init__(self,\n is_chief,\n num_ps_replicas,\n ensemble_handle,\n center_bias,\n examples_per_layer,\n learner_config,\n features,\n logits_dimension,\n feature_columns=None):\n \"\"\"Construct a new GradientBoostedDecisionTreeModel function.\n\n Args:\n is_chief: Whether to build the chief graph.\n num_ps_replicas: Number of parameter server replicas, can be 0.\n ensemble_handle: A handle to the ensemble variable.\n center_bias: Whether to center the bias before growing trees.\n examples_per_layer: Number of examples to accumulate before growing\n a tree layer. It can also be a function that computes the number of\n examples based on the depth of the layer that's being built.\n learner_config: A learner config.\n features: `dict` of `Tensor` objects.\n logits_dimension: An int, the dimension of logits.\n feature_columns: A list of feature columns.\n\n Raises:\n ValueError: if inputs are not valid.\n \"\"\"\n if ensemble_handle is None:\n raise ValueError(\"ensemble_handle must be specified.\")\n\n if learner_config is None:\n raise ValueError(\"learner_config must be specified.\")\n\n if learner_config.num_classes < 2:\n raise ValueError(\"Number of classes must be >=2\")\n\n self._logits_dimension = logits_dimension\n self._is_chief = is_chief\n self._num_ps_replicas = num_ps_replicas\n self._ensemble_handle = ensemble_handle\n self._center_bias = center_bias\n self._examples_per_layer = examples_per_layer\n\n # Fill in the defaults.\n if (learner_config.multi_class_strategy ==\n learner_pb2.LearnerConfig.MULTI_CLASS_STRATEGY_UNSPECIFIED):\n if logits_dimension == 1:\n learner_config.multi_class_strategy = (\n learner_pb2.LearnerConfig.TREE_PER_CLASS)\n else:\n learner_config.multi_class_strategy = (\n learner_pb2.LearnerConfig.DIAGONAL_HESSIAN)\n\n if (learner_config.growing_mode ==\n learner_pb2.LearnerConfig.GROWING_MODE_UNSPECIFIED):\n learner_config.growing_mode = learner_pb2.LearnerConfig.LAYER_BY_LAYER\n\n if (learner_config.pruning_mode ==\n learner_pb2.LearnerConfig.PRUNING_MODE_UNSPECIFIED):\n learner_config.pruning_mode = learner_pb2.LearnerConfig.POST_PRUNE\n\n if learner_config.constraints.max_tree_depth == 0:\n # Use 6 as the default maximum depth.\n learner_config.constraints.max_tree_depth = 6\n\n tuner = learner_config.learning_rate_tuner.WhichOneof(\"tuner\")\n if not tuner:\n learner_config.learning_rate_tuner.fixed.learning_rate = 0.1\n\n self._learner_config = learner_config\n self._feature_columns = feature_columns\n self._learner_config_serialized = learner_config.SerializeToString()\n self._attempted_trees = variables.Variable(\n initial_value=array_ops.zeros([], dtypes.int64), trainable=False,\n name=\"attempted_trees\")\n self._finalized_trees = variables.Variable(\n initial_value=array_ops.zeros([], dtypes.int64), trainable=False,\n name=\"finalized_trees\")\n if not features:\n raise ValueError(\"Features dictionary must be specified.\")\n (fc_names, dense_floats, sparse_float_indices, sparse_float_values,\n sparse_float_shapes, sparse_int_indices, sparse_int_values,\n sparse_int_shapes) = extract_features(features, self._feature_columns)\n logging.info(\"Active Feature Columns: \" + str(fc_names))\n self._fc_names = fc_names\n self._dense_floats = dense_floats\n self._sparse_float_indices = sparse_float_indices\n self._sparse_float_values = sparse_float_values\n self._sparse_float_shapes = sparse_float_shapes\n self._sparse_int_indices = sparse_int_indices\n self._sparse_int_values = sparse_int_values\n self._sparse_int_shapes = sparse_int_shapes\n self._reduce_dim = (self._learner_config.multi_class_strategy ==\n learner_pb2.LearnerConfig.TREE_PER_CLASS and\n learner_config.num_classes == 2)\n\n def _predict_and_return_dict(self, ensemble_handle, ensemble_stamp, mode):\n \"\"\"Runs prediction and returns a dictionary of the prediction results.\n\n Args:\n ensemble_handle: ensemble resource handle.\n ensemble_stamp: stamp of ensemble resource.\n mode: learn.ModeKeys.TRAIN or EVAL or INFER.\n\n Returns:\n a dictionary of prediction results -\n ENSEMBLE_STAMP, PREDICTION, PARTITION_IDS,\n NUM_LAYER_ATTEMPTED, NUM_TREES_ATTEMPED.\n \"\"\"\n ensemble_stats = training_ops.tree_ensemble_stats(ensemble_handle,\n ensemble_stamp)\n # We don't need dropout info - we can always restore it based on the\n # seed.\n apply_dropout, seed = _dropout_params(mode, ensemble_stats)\n # Make sure ensemble stats run. This will check that the ensemble has\n # the right stamp.\n with ops.control_dependencies(ensemble_stats):\n predictions, _ = prediction_ops.gradient_trees_prediction(\n ensemble_handle,\n seed,\n self._dense_floats,\n self._sparse_float_indices,\n self._sparse_float_values,\n self._sparse_float_shapes,\n self._sparse_int_indices,\n self._sparse_int_values,\n self._sparse_int_shapes,\n learner_config=self._learner_config_serialized,\n apply_dropout=apply_dropout,\n apply_averaging=mode != learn.ModeKeys.TRAIN,\n use_locking=True,\n center_bias=self._center_bias,\n reduce_dim=self._reduce_dim)\n partition_ids = prediction_ops.gradient_trees_partition_examples(\n ensemble_handle,\n self._dense_floats,\n self._sparse_float_indices,\n self._sparse_float_values,\n self._sparse_float_shapes,\n self._sparse_int_indices,\n self._sparse_int_values,\n self._sparse_int_shapes,\n use_locking=True)\n\n return _make_predictions_dict(ensemble_stamp, predictions, partition_ids,\n ensemble_stats)\n\n def predict(self, mode):\n \"\"\"Returns predictions given the features and mode.\n\n Args:\n mode: Mode the graph is running in (train|predict|eval).\n\n Returns:\n A dict of predictions tensors.\n\n Raises:\n ValueError: if features is not valid.\n \"\"\"\n\n # Use the current ensemble to predict on the current batch of input.\n # For faster prediction we check if the inputs are on the same device\n # as the model. If not, we create a copy of the model on the worker.\n input_deps = (self._dense_floats + self._sparse_float_indices +\n self._sparse_int_indices)\n if not input_deps:\n raise ValueError(\"No input tensors for prediction.\")\n\n if any(i.device != input_deps[0].device for i in input_deps):\n raise ValueError(\"All input tensors should be on the same device.\")\n\n # Get most current model stamp.\n ensemble_stamp = model_ops.tree_ensemble_stamp_token(self._ensemble_handle)\n\n # Determine if ensemble is colocated with the inputs.\n if self._ensemble_handle.device != input_deps[0].device:\n # Create a local ensemble and get its local stamp.\n with ops.name_scope(\"local_ensemble\", \"TreeEnsembleVariable\") as name:\n local_ensemble_handle = (\n gen_model_ops.decision_tree_ensemble_resource_handle_op(name=name))\n create_op = gen_model_ops.create_tree_ensemble_variable(\n local_ensemble_handle, stamp_token=-1, tree_ensemble_config=\"\")\n with ops.control_dependencies([create_op]):\n local_stamp = model_ops.tree_ensemble_stamp_token(\n local_ensemble_handle)\n\n # Determine whether the local ensemble is stale and update it if needed.\n def _refresh_local_ensemble_fn():\n # Serialize the model from parameter server after reading all inputs.\n with ops.control_dependencies(input_deps):\n (ensemble_stamp, serialized_model) = (\n model_ops.tree_ensemble_serialize(self._ensemble_handle))\n\n # Update local ensemble with the serialized model from parameter server.\n with ops.control_dependencies([create_op]):\n return model_ops.tree_ensemble_deserialize(\n local_ensemble_handle,\n stamp_token=ensemble_stamp,\n tree_ensemble_config=serialized_model), ensemble_stamp\n\n refresh_local_ensemble, ensemble_stamp = control_flow_ops.cond(\n math_ops.not_equal(ensemble_stamp,\n local_stamp), _refresh_local_ensemble_fn,\n lambda: (control_flow_ops.no_op(), ensemble_stamp))\n\n # Once updated, use the local model for prediction.\n with ops.control_dependencies([refresh_local_ensemble]):\n return self._predict_and_return_dict(local_ensemble_handle,\n ensemble_stamp, mode)\n else:\n # Use ensemble_handle directly, if colocated.\n with ops.device(self._ensemble_handle.device):\n return self._predict_and_return_dict(self._ensemble_handle,\n ensemble_stamp, mode)\n\n def train(self, loss, predictions_dict, labels):\n \"\"\"Grows a new tree and adds it to the ensemble.\n\n Args:\n loss: A scalar tensor representing average loss of examples.\n predictions_dict: Dictionary of Rank 2 `Tensor` representing information\n about predictions per example.\n labels: Rank 2 `Tensor` representing labels per example.\n\n Returns:\n An op that adds a new tree to the ensemble.\n\n Raises:\n ValueError: if inputs are not valid.\n \"\"\"\n # Get the worker device from input dependencies.\n input_deps = (self._dense_floats + self._sparse_float_indices +\n self._sparse_int_indices)\n worker_device = input_deps[0].device\n\n # Get tensors relevant for training and form the loss.\n predictions = predictions_dict[PREDICTIONS]\n partition_ids = predictions_dict[PARTITION_IDS]\n ensemble_stamp = predictions_dict[ENSEMBLE_STAMP]\n gradients = gradients_impl.gradients(\n loss,\n predictions,\n name=\"Gradients\",\n colocate_gradients_with_ops=False,\n gate_gradients=0,\n aggregation_method=None)[0]\n strategy = self._learner_config.multi_class_strategy\n\n class_id = -1\n # Handle different multiclass strategies.\n if strategy == learner_pb2.LearnerConfig.TREE_PER_CLASS:\n # We build one vs rest trees.\n gradient_shape = tensor_shape.scalar()\n hessian_shape = tensor_shape.scalar()\n\n if self._logits_dimension == 1:\n # We have only 1 score, gradients is of shape [batch, 1].\n hessians = gradients_impl.gradients(\n gradients,\n predictions,\n name=\"Hessian\",\n colocate_gradients_with_ops=False,\n gate_gradients=0,\n aggregation_method=None)[0]\n\n squeezed_gradients = array_ops.squeeze(gradients, axis=[1])\n squeezed_hessians = array_ops.squeeze(hessians, axis=[1])\n else:\n hessian_list = self._diagonal_hessian(gradients, predictions)\n # Assemble hessian list into a tensor.\n hessians = array_ops.stack(hessian_list, axis=1)\n\n # Choose the class for which the tree is built (one vs rest).\n class_id = math_ops.to_int32(\n predictions_dict[NUM_TREES_ATTEMPTED] % self._logits_dimension)\n\n # Use class id tensor to get the column with that index from gradients\n # and hessians.\n squeezed_gradients = array_ops.squeeze(\n _get_column_by_index(gradients, class_id))\n squeezed_hessians = array_ops.squeeze(\n _get_column_by_index(hessians, class_id))\n else:\n # Other multiclass strategies.\n gradient_shape = tensor_shape.TensorShape([self._logits_dimension])\n\n if strategy == learner_pb2.LearnerConfig.FULL_HESSIAN:\n hessian_shape = tensor_shape.TensorShape(\n ([self._logits_dimension, self._logits_dimension]))\n hessian_list = self._full_hessian(gradients, predictions)\n else:\n # Diagonal hessian strategy.\n hessian_shape = tensor_shape.TensorShape(([self._logits_dimension]))\n hessian_list = self._diagonal_hessian(gradients, predictions)\n\n squeezed_gradients = gradients\n hessians = array_ops.stack(hessian_list, axis=1)\n squeezed_hessians = hessians\n\n # Get the weights for each example for quantiles calculation,\n weights = self._get_weights(hessian_shape, squeezed_hessians)\n\n regularization_config = self._learner_config.regularization\n min_node_weight = self._learner_config.constraints.min_node_weight\n # Create all handlers ensuring resources are evenly allocated across PS.\n fc_name_idx = 0\n handlers = []\n init_stamp_token = constant_op.constant(0, dtype=dtypes.int64)\n with ops.device(self._get_replica_device_setter(worker_device)):\n # Create handlers for dense float columns\n for dense_float_column_idx in range(len(self._dense_floats)):\n fc_name = self._fc_names[fc_name_idx]\n handlers.append(\n ordinal_split_handler.DenseSplitHandler(\n l1_regularization=regularization_config.l1,\n l2_regularization=regularization_config.l2,\n tree_complexity_regularization=(\n regularization_config.tree_complexity),\n min_node_weight=min_node_weight,\n feature_column_group_id=dense_float_column_idx,\n epsilon=0.01,\n num_quantiles=100,\n dense_float_column=self._dense_floats[dense_float_column_idx],\n name=fc_name,\n gradient_shape=gradient_shape,\n hessian_shape=hessian_shape,\n multiclass_strategy=strategy,\n init_stamp_token=init_stamp_token))\n fc_name_idx += 1\n\n # Create handlers for sparse float columns.\n for sparse_float_column_idx in range(len(self._sparse_float_indices)):\n fc_name = self._fc_names[fc_name_idx]\n handlers.append(\n ordinal_split_handler.SparseSplitHandler(\n l1_regularization=regularization_config.l1,\n l2_regularization=regularization_config.l2,\n tree_complexity_regularization=(\n regularization_config.tree_complexity),\n min_node_weight=min_node_weight,\n feature_column_group_id=sparse_float_column_idx,\n epsilon=0.01,\n num_quantiles=100,\n sparse_float_column=sparse_tensor.SparseTensor(\n self._sparse_float_indices[sparse_float_column_idx],\n self._sparse_float_values[sparse_float_column_idx],\n self._sparse_float_shapes[sparse_float_column_idx]),\n name=fc_name,\n gradient_shape=gradient_shape,\n hessian_shape=hessian_shape,\n multiclass_strategy=strategy,\n init_stamp_token=init_stamp_token))\n fc_name_idx += 1\n\n # Create handlers for sparse int columns.\n for sparse_int_column_idx in range(len(self._sparse_int_indices)):\n fc_name = self._fc_names[fc_name_idx]\n handlers.append(\n categorical_split_handler.EqualitySplitHandler(\n l1_regularization=regularization_config.l1,\n l2_regularization=regularization_config.l2,\n tree_complexity_regularization=(\n regularization_config.tree_complexity),\n min_node_weight=min_node_weight,\n feature_column_group_id=sparse_int_column_idx,\n sparse_int_column=sparse_tensor.SparseTensor(\n self._sparse_int_indices[sparse_int_column_idx],\n self._sparse_int_values[sparse_int_column_idx],\n self._sparse_int_shapes[sparse_int_column_idx]),\n name=fc_name,\n gradient_shape=gradient_shape,\n hessian_shape=hessian_shape,\n multiclass_strategy=strategy,\n init_stamp_token=init_stamp_token))\n fc_name_idx += 1\n\n # Create steps accumulator.\n steps_accumulator = stats_accumulator_ops.StatsAccumulator(\n stamp_token=0,\n gradient_shape=tensor_shape.scalar(),\n hessian_shape=tensor_shape.scalar(),\n name=\"StepsAccumulator\")\n\n # Create bias stats accumulator.\n bias_stats_accumulator = stats_accumulator_ops.StatsAccumulator(\n stamp_token=0,\n gradient_shape=gradient_shape,\n hessian_shape=hessian_shape,\n name=\"BiasAccumulator\")\n\n # Create ensemble stats variables.\n num_layer_examples = variables.Variable(\n initial_value=array_ops.zeros([], dtypes.int64),\n name=\"num_layer_examples\",\n trainable=False)\n num_layer_steps = variables.Variable(\n initial_value=array_ops.zeros([], dtypes.int64),\n name=\"num_layer_steps\",\n trainable=False)\n num_layers = variables.Variable(\n initial_value=array_ops.zeros([], dtypes.int64),\n name=\"num_layers\",\n trainable=False)\n active_tree = variables.Variable(\n initial_value=array_ops.zeros([], dtypes.int64),\n name=\"active_tree\",\n trainable=False)\n active_layer = variables.Variable(\n initial_value=array_ops.zeros([], dtypes.int64),\n name=\"active_layer\",\n trainable=False)\n\n # Create ensemble stats summaries.\n summary.scalar(\"layer_stats/num_examples\", num_layer_examples)\n summary.scalar(\"layer_stats/num_steps\", num_layer_steps)\n summary.scalar(\"ensemble_stats/active_tree\", active_tree)\n summary.scalar(\"ensemble_stats/active_layer\", active_layer)\n\n # Update bias stats.\n stats_update_ops = []\n continue_centering = variables.Variable(\n initial_value=self._center_bias,\n name=\"continue_centering\",\n trainable=False)\n stats_update_ops.append(\n control_flow_ops.cond(continue_centering,\n self._make_update_bias_stats_fn(\n ensemble_stamp, predictions, gradients,\n bias_stats_accumulator),\n control_flow_ops.no_op))\n\n # Update handler stats.\n handler_reads = {}\n for handler in handlers:\n handler_reads[handler] = handler.scheduled_reads()\n\n handler_results = batch_ops_utils.run_handler_scheduled_ops(\n handler_reads, ensemble_stamp, worker_device)\n per_handler_updates = {}\n # Two values per handler. First one is if the handler is active for the\n # current layer. The second one is if the handler is going to be active\n # for the next layer.\n subsampling_type = self._learner_config.WhichOneof(\"feature_fraction\")\n if subsampling_type == \"feature_fraction_per_level\":\n seed = predictions_dict[NUM_LAYERS_ATTEMPTED]\n active_handlers_current_layer = stateless.stateless_random_uniform(\n shape=[len(handlers)], seed=[seed, 1])\n active_handlers_next_layer = stateless.stateless_random_uniform(\n shape=[len(handlers)], seed=[seed + 1, 1])\n active_handlers = array_ops.stack(\n [active_handlers_current_layer, active_handlers_next_layer], axis=1)\n active_handlers = (active_handlers <\n self._learner_config.feature_fraction_per_level)\n elif subsampling_type == \"feature_fraction_per_tree\":\n seed = predictions_dict[NUM_TREES_ATTEMPTED]\n active_handlers_current_layer = stateless.stateless_random_uniform(\n shape=[len(handlers)], seed=[seed, 2])\n active_handlers_current_layer = (\n active_handlers_current_layer <\n self._learner_config.feature_fraction_per_tree)\n active_handlers = array_ops.stack(active_handlers_current_layer,\n array_ops.ones(\n [len(handlers)], dtype=dtypes.bool))\n else:\n active_handlers = array_ops.ones([len(handlers), 2], dtype=dtypes.bool)\n\n # Prepare empty gradients and hessians when handlers are not ready.\n empty_hess_shape = [1] + hessian_shape.as_list()\n empty_grad_shape = [1] + gradient_shape.as_list()\n\n empty_gradients = constant_op.constant(\n [], dtype=dtypes.float32, shape=empty_grad_shape)\n empty_hessians = constant_op.constant(\n [], dtype=dtypes.float32, shape=empty_hess_shape)\n\n for handler_idx in range(len(handlers)):\n handler = handlers[handler_idx]\n is_active = active_handlers[handler_idx]\n updates, scheduled_updates = handler.update_stats(\n ensemble_stamp, partition_ids, squeezed_gradients, squeezed_hessians,\n empty_gradients, empty_hessians, weights, is_active,\n handler_results[handler])\n stats_update_ops.append(updates)\n per_handler_updates[handler] = scheduled_updates\n\n update_results = batch_ops_utils.run_handler_scheduled_ops(\n per_handler_updates, ensemble_stamp, worker_device)\n for update in update_results.values():\n stats_update_ops += update\n # Accumulate a step after updating stats.\n batch_size = math_ops.cast(array_ops.shape(labels)[0], dtypes.float32)\n with ops.control_dependencies(stats_update_ops):\n add_step_op = steps_accumulator.add(ensemble_stamp, [0], [[0, 0]],\n [batch_size], [1.0])\n\n # Determine learning rate.\n learning_rate_tuner = self._learner_config.learning_rate_tuner.WhichOneof(\n \"tuner\")\n if learning_rate_tuner == \"fixed\" or learning_rate_tuner == \"dropout\":\n tuner = getattr(self._learner_config.learning_rate_tuner,\n learning_rate_tuner)\n learning_rate = tuner.learning_rate\n else:\n # TODO (nponomareva, soroush) do the line search. id:498 gh:499\n raise ValueError(\"Line search learning rate is not yet supported.\")\n\n # After adding the step, decide if further processing is needed.\n ensemble_update_ops = [add_step_op]\n with ops.control_dependencies([add_step_op]):\n if self._is_chief:\n dropout_seed = predictions_dict[NUM_TREES_ATTEMPTED]\n\n # Get accumulated steps and examples for the current layer.\n _, _, _, _, acc_examples, acc_steps = steps_accumulator.serialize()\n acc_examples = math_ops.cast(acc_examples[0], dtypes.int64)\n acc_steps = math_ops.cast(acc_steps[0], dtypes.int64)\n ensemble_update_ops.append(num_layer_examples.assign(acc_examples))\n ensemble_update_ops.append(num_layer_steps.assign(acc_steps))\n # Determine whether we need to update tree ensemble.\n examples_per_layer = self._examples_per_layer\n if callable(examples_per_layer):\n examples_per_layer = examples_per_layer(active_layer)\n ensemble_update_ops.append(\n control_flow_ops.cond(\n acc_examples >= examples_per_layer,\n self._make_update_ensemble_fn(\n ensemble_stamp, steps_accumulator, bias_stats_accumulator,\n continue_centering, learning_rate, handlers, num_layers,\n active_tree, active_layer, dropout_seed, class_id),\n control_flow_ops.no_op))\n\n # Calculate the loss to be reported.\n # Note, the loss is calculated from the prediction considering dropouts, so\n # that the value might look staggering over steps when the dropout ratio is\n # high. eval_loss might be referred instead in the aspect of convergence.\n return control_flow_ops.group(*ensemble_update_ops)\n\n def _get_weights(self, hessian_shape, hessians):\n \"\"\"Derives weights to be used based on hessians and multiclass strategy.\"\"\"\n if hessian_shape == tensor_shape.scalar():\n # This is tree per class.\n weights = hessians\n elif len(hessian_shape.dims) == 1:\n # This is diagonal hessian.\n weights = math_ops.reduce_sum(hessians, axis=1)\n else:\n # This is full hessian.\n weights = math_ops.trace(hessians)\n return weights\n\n def _full_hessian(self, grads, predictions):\n \"\"\"Prepares hessians for full-hessian multiclass strategy.\"\"\"\n # Because of\n # https://github.com/tensorflow/tensorflow/issues/675, we can't just\n # compute the full hessian with a single call to gradients, but instead\n # must compute it row-by-row.\n gradients_list = array_ops.unstack(\n grads, num=self._logits_dimension, axis=1)\n hessian_rows = []\n\n for row in range(self._logits_dimension):\n # If current row is i, K is number of classes,each row returns a tensor of\n # size batch_size x K representing for each example dx_i dx_1, dx_i dx_2\n # etc dx_i dx_K\n hessian_row = gradients_impl.gradients(\n gradients_list[row],\n predictions,\n name=\"Hessian_%d\" % row,\n colocate_gradients_with_ops=False,\n gate_gradients=0,\n aggregation_method=None)\n\n # hessian_row is of dimension 1, batch_size, K, => trim first dimension\n # to get batch_size x K\n hessian_row = array_ops.squeeze(array_ops.unstack(hessian_row), [0])\n hessian_rows.append(hessian_row)\n return hessian_rows\n\n def _diagonal_hessian(self, grads, predictions):\n \"\"\"Prepares hessians for diagonal-hessian multiclass mode.\"\"\"\n diag_hessian_list = []\n\n gradients_list = array_ops.unstack(\n grads, num=self._logits_dimension, axis=1)\n\n for row, row_grads in enumerate(gradients_list):\n # If current row is i, K is number of classes,each row returns a tensor of\n # size batch_size x K representing for each example dx_i dx_1, dx_1 dx_2\n # etc dx_i dx_K\n hessian_row = gradients_impl.gradients(\n row_grads,\n predictions,\n name=\"Hessian_%d\" % row,\n colocate_gradients_with_ops=False,\n gate_gradients=0,\n aggregation_method=None)\n\n # hessian_row is of dimension 1, batch_size, K, => trim first dimension\n # to get batch_size x K\n hessian_row = array_ops.squeeze(array_ops.unstack(hessian_row), [0])\n\n # Get dx_i^2 for the whole batch.\n elem = array_ops.transpose(hessian_row)[row]\n diag_hessian_list.append(elem)\n\n return diag_hessian_list\n\n def _get_replica_device_setter(self, worker_device):\n \"\"\"Creates a replica device setter.\"\"\"\n ps_tasks = self._num_ps_replicas\n ps_ops = [\n \"Variable\",\n \"VariableV2\",\n \"DecisionTreeEnsembleResourceHandleOp\",\n \"StatsAccumulatorScalarResourceHandleOp\",\n \"StatsAccumulatorTensorResourceHandleOp\",\n \"QuantileStreamResourceHandleOp\",\n ]\n ps_strategy = _OpRoundRobinStrategy(ps_ops, ps_tasks)\n return device_setter.replica_device_setter(\n worker_device=worker_device,\n ps_tasks=ps_tasks,\n merge_devices=True,\n ps_ops=ps_ops,\n ps_strategy=ps_strategy)\n\n def _make_update_bias_stats_fn(self, ensemble_stamp, predictions, gradients,\n bias_stats_accumulator):\n \"\"\"A method to create the function which updates the bias stats.\"\"\"\n\n def _update_bias_stats():\n \"\"\"A method to update the bias stats.\"\"\"\n # Get reduced gradients and hessians.\n grads_sum = math_ops.reduce_sum(gradients, 0)\n hess = gradients_impl.gradients(\n grads_sum,\n predictions,\n name=\"Hessians\",\n colocate_gradients_with_ops=False,\n gate_gradients=0,\n aggregation_method=None)[0]\n hess_sum = math_ops.reduce_sum(hess, 0)\n\n # Accumulate gradients and hessians.\n partition_ids = math_ops.range(self._logits_dimension)\n feature_ids = array_ops.zeros(\n [self._logits_dimension, 2], dtype=dtypes.int64)\n\n add_stats_op = bias_stats_accumulator.add(\n ensemble_stamp, partition_ids, feature_ids, grads_sum, hess_sum)\n return control_flow_ops.group(*[add_stats_op], name=\"update_bias_stats\")\n\n return _update_bias_stats\n\n def _make_update_ensemble_fn(self, ensemble_stamp, steps_accumulator,\n bias_stats_accumulator, continue_centering,\n learning_rate, handlers, num_layers, active_tree,\n active_layer, dropout_seed, class_id):\n \"\"\"A method to create the function which updates the tree ensemble.\"\"\"\n\n def _update_ensemble():\n \"\"\"A method to update the tree ensemble.\"\"\"\n # Get next stamp token.\n next_ensemble_stamp = ensemble_stamp + 1\n # Finalize bias stats.\n _, _, _, bias_grads, bias_hess = bias_stats_accumulator.flush(\n ensemble_stamp, next_ensemble_stamp)\n\n # Finalize handler splits.\n are_splits_ready_list = []\n partition_ids_list = []\n gains_list = []\n split_info_list = []\n\n for handler in handlers:\n (are_splits_ready,\n partition_ids, gains, split_info) = handler.make_splits(\n ensemble_stamp, next_ensemble_stamp, class_id)\n are_splits_ready_list.append(are_splits_ready)\n partition_ids_list.append(partition_ids)\n gains_list.append(gains)\n split_info_list.append(split_info)\n # Stack all the inputs to one tensor per type.\n # This is a workaround for the slowness of graph building in tf.cond.\n # See (b/36554864).\n split_sizes = array_ops.stack([\n array_ops.shape(partition_id)[0]\n for partition_id in partition_ids_list\n ])\n partition_ids = array_ops.concat(partition_ids_list, axis=0)\n gains = array_ops.concat(gains_list, axis=0)\n split_infos = array_ops.concat(split_info_list, axis=0)\n\n # Determine if all splits are ready.\n are_all_splits_ready = math_ops.reduce_all(\n array_ops.stack(\n are_splits_ready_list, axis=0, name=\"stack_handler_readiness\"))\n\n # Define bias centering update operation.\n def _center_bias_fn():\n # Center tree ensemble bias.\n delta_updates = array_ops.where(bias_hess > 0, -bias_grads / bias_hess,\n array_ops.zeros_like(bias_grads))\n center_bias = training_ops.center_tree_ensemble_bias(\n tree_ensemble_handle=self._ensemble_handle,\n stamp_token=ensemble_stamp,\n next_stamp_token=next_ensemble_stamp,\n delta_updates=delta_updates,\n learner_config=self._learner_config_serialized)\n return continue_centering.assign(center_bias)\n\n # Define ensemble growing operations.\n def _grow_ensemble_ready_fn():\n # Grow the ensemble given the current candidates.\n sizes = array_ops.unstack(split_sizes)\n partition_ids_list = list(array_ops.split(partition_ids, sizes, axis=0))\n gains_list = list(array_ops.split(gains, sizes, axis=0))\n split_info_list = list(array_ops.split(split_infos, sizes, axis=0))\n return training_ops.grow_tree_ensemble(\n tree_ensemble_handle=self._ensemble_handle,\n stamp_token=ensemble_stamp,\n next_stamp_token=next_ensemble_stamp,\n learning_rate=learning_rate,\n partition_ids=partition_ids_list,\n gains=gains_list,\n splits=split_info_list,\n learner_config=self._learner_config_serialized,\n dropout_seed=dropout_seed,\n center_bias=self._center_bias)\n\n def _grow_ensemble_not_ready_fn():\n # Don't grow the ensemble, just update the stamp.\n return training_ops.grow_tree_ensemble(\n tree_ensemble_handle=self._ensemble_handle,\n stamp_token=ensemble_stamp,\n next_stamp_token=next_ensemble_stamp,\n learning_rate=0,\n partition_ids=[],\n gains=[],\n splits=[],\n learner_config=self._learner_config_serialized,\n dropout_seed=dropout_seed,\n center_bias=self._center_bias)\n\n def _grow_ensemble_fn():\n # Conditionally grow an ensemble depending on whether the splits\n # from all the handlers are ready.\n return control_flow_ops.cond(are_all_splits_ready,\n _grow_ensemble_ready_fn,\n _grow_ensemble_not_ready_fn)\n\n # Update ensemble.\n update_ops = [are_all_splits_ready]\n update_model = control_flow_ops.cond(continue_centering, _center_bias_fn,\n _grow_ensemble_fn)\n update_ops.append(update_model)\n\n # Update ensemble stats.\n with ops.control_dependencies([update_model]):\n stats = training_ops.tree_ensemble_stats(\n self._ensemble_handle, stamp_token=next_ensemble_stamp)\n update_ops.append(self._finalized_trees.assign(stats.num_trees))\n update_ops.append(self._attempted_trees.assign(stats.attempted_trees))\n update_ops.append(num_layers.assign(stats.num_layers))\n update_ops.append(active_tree.assign(stats.active_tree))\n update_ops.append(active_layer.assign(stats.active_layer))\n\n # Flush step stats.\n update_ops.extend(\n steps_accumulator.flush(ensemble_stamp, next_ensemble_stamp))\n return control_flow_ops.group(*update_ops, name=\"update_ensemble\")\n\n return _update_ensemble\n\n def get_number_of_trees_tensor(self):\n return self._finalized_trees, self._attempted_trees\n",
"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Abstractions for the head(s) of a model.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport abc\n\nimport six\n\nfrom tensorflow.contrib import framework as framework_lib\nfrom tensorflow.contrib import layers as layers_lib\nfrom tensorflow.contrib.learn.python.learn.estimators import constants\nfrom tensorflow.contrib.learn.python.learn.estimators import model_fn\nfrom tensorflow.contrib.learn.python.learn.estimators import prediction_key\nfrom tensorflow.contrib.learn.python.learn.estimators.metric_key import MetricKey as mkey\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import sparse_tensor\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import lookup_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import metrics as metrics_lib\nfrom tensorflow.python.ops import nn\nfrom tensorflow.python.ops import sparse_ops\nfrom tensorflow.python.ops import string_ops\nfrom tensorflow.python.ops import variable_scope\nfrom tensorflow.python.ops import weights_broadcast_ops\nfrom tensorflow.python.ops.losses import losses as losses_lib\nfrom tensorflow.python.platform import tf_logging as logging\nfrom tensorflow.python.summary import summary\nfrom tensorflow.python.training import training\nfrom tensorflow.python.util import tf_decorator\nfrom tensorflow.python.util import tf_inspect\n\n\nclass Head(object):\n \"\"\"Interface for the head/top of a model.\n\n Given logits (or output of a hidden layer), a Head knows how to compute\n predictions, loss, default metric and export signature. It is meant to,\n\n 1) Simplify writing model_fn and to make model_fn more configurable\n 2) Support wide range of machine learning models. Since most heads can work\n with logits, they can support DNN, RNN, Wide, Wide&Deep,\n Global objectives, Gradient boosted trees and many other types\n of machine learning models.\n 2) To allow users to seamlessly switch between 1 to n heads for multi\n objective learning (See _MultiHead implementation for more details)\n\n Common usage:\n Here is simplified model_fn to build a multiclass DNN model.\n ```python\n def _my_dnn_model_fn(features, labels, mode, params, config=None):\n # Optionally your callers can pass head to model_fn as a param.\n head = tf.contrib.learn.multi_class_head(...)\n input = tf.contrib.layers.input_from_feature_columns(features, ...)\n last_hidden_layer_out = tf.contrib.layers.stack(\n input, tf.contrib.layers.fully_connected, [1000, 500])\n logits = tf.contrib.layers.fully_connected(\n last_hidden_layer_out, head.logits_dimension, activation_fn=None)\n\n def _train_op_fn(loss):\n return optimizer.minimize(loss)\n\n return head.create_model_fn_ops(\n features=features,\n labels=labels,\n mode=mode,\n train_op_fn=_train_op_fn,\n logits=logits,\n scope=...)\n ```\n\n Most heads also support logits_input which is typically the output of the last\n hidden layer. Some heads (like heads responsible for candidate sampling or\n hierarchical softmax) intrinsically will not support logits and you have\n to pass logits_input. Here is a common usage,\n ```python\n return head.create_model_fn_ops(\n features=features,\n labels=labels,\n mode=mode,\n train_op_fn=_train_op_fn,\n logits_input=last_hidden_layer_out,\n scope=...)\n ```python\n\n There are cases where computing and applying gradients can not be meaningfully\n captured with train_op_fn we support (for example, with sync optimizer). In\n such case, you can take the responsibility on your own. Here is a common\n use case,\n ```python\n model_fn_ops = head.create_model_fn_ops(\n features=features,\n labels=labels,\n mode=mode,\n train_op_fn=tf.contrib.learn.no_op_train_fn,\n logits=logits,\n scope=...)\n if mode == tf.contrib.learn.ModeKeys.TRAIN:\n optimizer = ...\n sync = tf.train.SyncReplicasOptimizer(opt=optimizer, ...)\n update_op = tf.contrib.layers.optimize_loss(optimizer=sync,\n loss=model_fn_ops.loss, ...)\n hooks = [sync.make_session_run_hook(is_chief)]\n ... update train_op and hooks in ModelFnOps and return\n ```\n \"\"\"\n __metaclass__ = abc.ABCMeta\n\n @abc.abstractproperty\n def logits_dimension(self):\n \"\"\"Size of the last dimension of the logits `Tensor`.\n\n Typically, logits is of shape `[batch_size, logits_dimension]`.\n\n Returns:\n The expected size of the `logits` tensor.\n \"\"\"\n raise NotImplementedError(\"Calling an abstract method.\")\n\n @abc.abstractmethod\n def create_model_fn_ops(self,\n features,\n mode,\n labels=None,\n train_op_fn=None,\n logits=None,\n logits_input=None,\n scope=None):\n \"\"\"Returns `ModelFnOps` that a model_fn can return.\n\n Please note that,\n + Exactly one of `logits` and `logits_input` must be provided.\n + All args must be passed via name.\n\n Args:\n features: Input `dict` of `Tensor` objects.\n mode: Estimator's `ModeKeys`.\n labels: Labels `Tensor`, or `dict` of same.\n train_op_fn: Function that takes a scalar loss `Tensor` and returns an op\n to optimize the model with the loss. This is used in TRAIN mode and\n must not be None. None is allowed in other modes. If you want to\n optimize loss yourself you can pass `no_op_train_fn` and then use\n ModeFnOps.loss to compute and apply gradients.\n logits: logits `Tensor` to be used by the head.\n logits_input: `Tensor` from which to build logits, often needed when you\n don't want to compute the logits. Typically this is the activation of\n the last hidden layer in a DNN. Some heads (like the ones responsible\n for candidate sampling) intrinsically avoid computing full logits and\n only accepts logits_input.\n scope: Optional scope for `variable_scope`.\n\n Returns:\n An instance of `ModelFnOps`.\n\n Raises:\n ValueError: If `mode` is not recognized.\n ValueError: If neither or both of `logits` and `logits_input` is provided.\n \"\"\"\n raise NotImplementedError(\"Calling an abstract method.\")\n\n\ndef regression_head(label_name=None,\n weight_column_name=None,\n label_dimension=1,\n enable_centered_bias=False,\n head_name=None):\n \"\"\"Creates a `Head` for linear regression.\n\n Args:\n label_name: String, name of the key in label dict. Can be null if label\n is a tensor (single headed models).\n weight_column_name: A string defining feature column name representing\n weights. It is used to down weight or boost examples during training. It\n will be multiplied by the loss of the example.\n label_dimension: Number of regression labels per example. This is the size\n of the last dimension of the labels `Tensor` (typically, this has shape\n `[batch_size, label_dimension]`).\n enable_centered_bias: A bool. If True, estimator will learn a centered\n bias variable for each class. Rest of the model structure learns the\n residual after centered bias.\n head_name: name of the head. If provided, predictions, summary and metrics\n keys will be suffixed by `\"/\" + head_name` and the default variable scope\n will be `head_name`.\n\n Returns:\n An instance of `Head` for linear regression.\n \"\"\"\n return _RegressionHead(\n label_name=label_name,\n weight_column_name=weight_column_name,\n label_dimension=label_dimension,\n enable_centered_bias=enable_centered_bias,\n head_name=head_name,\n loss_fn=_mean_squared_loss,\n link_fn=array_ops.identity)\n\n\ndef poisson_regression_head(label_name=None,\n weight_column_name=None,\n label_dimension=1,\n enable_centered_bias=False,\n head_name=None):\n \"\"\"Creates a `Head` for poisson regression.\n\n Args:\n label_name: String, name of the key in label dict. Can be null if label\n is a tensor (single headed models).\n weight_column_name: A string defining feature column name representing\n weights. It is used to down weight or boost examples during training. It\n will be multiplied by the loss of the example.\n label_dimension: Number of regression labels per example. This is the size\n of the last dimension of the labels `Tensor` (typically, this has shape\n `[batch_size, label_dimension]`).\n enable_centered_bias: A bool. If True, estimator will learn a centered\n bias variable for each class. Rest of the model structure learns the\n residual after centered bias.\n head_name: name of the head. If provided, predictions, summary and metrics\n keys will be suffixed by `\"/\" + head_name` and the default variable scope\n will be `head_name`.\n\n Returns:\n An instance of `Head` for poisson regression.\n \"\"\"\n return _RegressionHead(\n label_name=label_name,\n weight_column_name=weight_column_name,\n label_dimension=label_dimension,\n enable_centered_bias=enable_centered_bias,\n head_name=head_name,\n loss_fn=_poisson_loss,\n link_fn=math_ops.exp)\n\n# TODO (zakaria): Consider adding a _RegressionHead for logistic_regression id:1191 gh:1192\n\n\ndef multi_class_head(n_classes,\n label_name=None,\n weight_column_name=None,\n enable_centered_bias=False,\n head_name=None,\n thresholds=None,\n metric_class_ids=None,\n loss_fn=None,\n label_keys=None):\n \"\"\"Creates a `Head` for multi class single label classification.\n\n The Head uses softmax cross entropy loss.\n\n This head expects to be fed integer labels specifying the class index. But\n if `label_keys` is specified, then labels must be strings from this\n vocabulary, and the predicted classes will be strings from the same\n vocabulary.\n\n Args:\n n_classes: Integer, number of classes, must be >= 2\n label_name: String, name of the key in label dict. Can be null if label\n is a tensor (single headed models).\n weight_column_name: A string defining feature column name representing\n weights. It is used to down weight or boost examples during training. It\n will be multiplied by the loss of the example.\n enable_centered_bias: A bool. If True, estimator will learn a centered\n bias variable for each class. Rest of the model structure learns the\n residual after centered bias.\n head_name: name of the head. If provided, predictions, summary and metrics\n keys will be suffixed by `\"/\" + head_name` and the default variable scope\n will be `head_name`.\n thresholds: thresholds for eval metrics, defaults to [.5]\n metric_class_ids: List of class IDs for which we should report per-class\n metrics. Must all be in the range `[0, n_classes)`. Invalid if\n `n_classes` is 2.\n loss_fn: Optional function that takes (`labels`, `logits`, `weights`) as\n parameter and returns a weighted scalar loss. `weights` should be\n optional. See `tf.losses`\n label_keys: Optional list of strings with size `[n_classes]` defining the\n label vocabulary. Only supported for `n_classes` > 2.\n\n Returns:\n An instance of `Head` for multi class classification.\n\n Raises:\n ValueError: if `n_classes` is < 2.\n ValueError: If `metric_class_ids` is provided when `n_classes` is 2.\n ValueError: If `len(label_keys) != n_classes`.\n \"\"\"\n if (n_classes is None) or (n_classes < 2):\n raise ValueError(\"n_classes must be > 1 for classification: %s.\" %\n n_classes)\n if loss_fn:\n _verify_loss_fn_args(loss_fn)\n\n loss_fn = _wrap_custom_loss_fn(loss_fn) if loss_fn else None\n if n_classes == 2:\n if metric_class_ids:\n raise ValueError(\"metric_class_ids invalid for n_classes==2.\")\n if label_keys:\n raise ValueError(\"label_keys is not supported for n_classes=2.\")\n return _BinaryLogisticHead(\n label_name=label_name,\n weight_column_name=weight_column_name,\n enable_centered_bias=enable_centered_bias,\n head_name=head_name,\n thresholds=thresholds,\n loss_fn=loss_fn)\n\n return _MultiClassHead(\n n_classes=n_classes,\n label_name=label_name,\n weight_column_name=weight_column_name,\n enable_centered_bias=enable_centered_bias,\n head_name=head_name,\n thresholds=thresholds,\n metric_class_ids=metric_class_ids,\n loss_fn=loss_fn,\n label_keys=label_keys)\n\n\ndef binary_svm_head(\n label_name=None,\n weight_column_name=None,\n enable_centered_bias=False,\n head_name=None,\n thresholds=None,):\n \"\"\"Creates a `Head` for binary classification with SVMs.\n\n The head uses binary hinge loss.\n\n Args:\n label_name: String, name of the key in label dict. Can be null if label\n is a tensor (single headed models).\n weight_column_name: A string defining feature column name representing\n weights. It is used to down weight or boost examples during training. It\n will be multiplied by the loss of the example.\n enable_centered_bias: A bool. If True, estimator will learn a centered\n bias variable for each class. Rest of the model structure learns the\n residual after centered bias.\n head_name: name of the head. If provided, predictions, summary and metrics\n keys will be suffixed by `\"/\" + head_name` and the default variable scope\n will be `head_name`.\n thresholds: thresholds for eval metrics, defaults to [.5]\n\n Returns:\n An instance of `Head` for binary classification with SVM.\n \"\"\"\n return _BinarySvmHead(\n label_name=label_name,\n weight_column_name=weight_column_name,\n enable_centered_bias=enable_centered_bias,\n head_name=head_name,\n thresholds=thresholds)\n\n\ndef multi_label_head(n_classes,\n label_name=None,\n weight_column_name=None,\n enable_centered_bias=False,\n head_name=None,\n thresholds=None,\n metric_class_ids=None,\n loss_fn=None):\n \"\"\"Creates a Head for multi label classification.\n\n Multi-label classification handles the case where each example may have zero\n or more associated labels, from a discrete set. This is distinct from\n `multi_class_head` which has exactly one label from a discrete set.\n\n This head by default uses sigmoid cross entropy loss, which expects as input\n a multi-hot tensor of shape `(batch_size, num_classes)`.\n\n Args:\n n_classes: Integer, number of classes, must be >= 2\n label_name: String, name of the key in label dict. Can be null if label\n is a tensor (single headed models).\n weight_column_name: A string defining feature column name representing\n weights. It is used to down weight or boost examples during training. It\n will be multiplied by the loss of the example.\n enable_centered_bias: A bool. If True, estimator will learn a centered\n bias variable for each class. Rest of the model structure learns the\n residual after centered bias.\n head_name: name of the head. If provided, predictions, summary and metrics\n keys will be suffixed by `\"/\" + head_name` and the default variable scope\n will be `head_name`.\n thresholds: thresholds for eval metrics, defaults to [.5]\n metric_class_ids: List of class IDs for which we should report per-class\n metrics. Must all be in the range `[0, n_classes)`.\n loss_fn: Optional function that takes (`labels`, `logits`, `weights`) as\n parameter and returns a weighted scalar loss. `weights` should be\n optional. See `tf.losses`\n\n Returns:\n An instance of `Head` for multi label classification.\n\n Raises:\n ValueError: If n_classes is < 2\n ValueError: If loss_fn does not have expected signature.\n \"\"\"\n if n_classes < 2:\n raise ValueError(\"n_classes must be > 1 for classification.\")\n if loss_fn:\n _verify_loss_fn_args(loss_fn)\n\n return _MultiLabelHead(\n n_classes=n_classes,\n label_name=label_name,\n weight_column_name=weight_column_name,\n enable_centered_bias=enable_centered_bias,\n head_name=head_name,\n thresholds=thresholds,\n metric_class_ids=metric_class_ids,\n loss_fn=_wrap_custom_loss_fn(loss_fn) if loss_fn else None)\n\n\ndef loss_only_head(loss_fn, head_name=None):\n \"\"\"Creates a Head that contains only loss terms.\n\n Loss only head holds additional loss terms to be added to other heads and\n usually represents additional regularization terms in the objective function.\n\n Args:\n loss_fn: a function that takes no argument and returns a list of\n scalar tensors.\n head_name: a name for the head.\n\n Returns:\n An instance of `Head` to hold the additional losses.\n \"\"\"\n return _LossOnlyHead(loss_fn, head_name=head_name)\n\n\ndef multi_head(heads, loss_weights=None):\n \"\"\"Creates a MultiHead stemming from same logits/hidden layer.\n\n Args:\n heads: list of Head objects.\n loss_weights: optional list of weights to be used to merge losses from\n each head. All losses are weighted equally if not provided.\n\n Returns:\n A instance of `Head` that merges multiple heads.\n\n Raises:\n ValueError: if heads and loss_weights have different size.\n \"\"\"\n if loss_weights:\n if len(loss_weights) != len(heads):\n raise ValueError(\"heads and loss_weights must have same size\")\n\n def _weighted_loss_merger(losses):\n if loss_weights:\n if len(losses) != len(loss_weights):\n raise ValueError(\"losses and loss_weights must have same size\")\n weighted_losses = []\n for loss, weight in zip(losses, loss_weights):\n weighted_losses.append(math_ops.multiply(loss, weight))\n return math_ops.add_n(weighted_losses)\n else:\n return math_ops.add_n(losses)\n\n return _MultiHead(heads, loss_merger=_weighted_loss_merger)\n\n\ndef no_op_train_fn(loss):\n del loss\n return control_flow_ops.no_op()\n\n\nclass _SingleHead(Head):\n \"\"\"Interface for a single head/top of a model.\"\"\"\n __metaclass__ = abc.ABCMeta\n\n def __init__(\n self, problem_type, logits_dimension, label_name=None,\n weight_column_name=None, head_name=None):\n if problem_type is None:\n raise ValueError(\"Invalid problem_type %s.\" % problem_type)\n if logits_dimension is None or logits_dimension < 1:\n raise ValueError(\"Invalid logits_dimension %s.\" % logits_dimension)\n self._problem_type = problem_type\n self._logits_dimension = logits_dimension\n self._label_name = label_name\n self._weight_column_name = weight_column_name\n self._head_name = head_name\n\n @property\n def logits_dimension(self):\n return self._logits_dimension\n\n @property\n def label_name(self):\n return self._label_name\n\n @property\n def weight_column_name(self):\n return self._weight_column_name\n\n @property\n def head_name(self):\n return self._head_name\n\n def _create_output_alternatives(self, predictions):\n \"\"\"Creates output alternative for the Head.\n\n Args:\n predictions: a dict of {tensor_name: Tensor}, where 'tensor_name' is a\n symbolic name for an output Tensor possibly but not necessarily taken\n from `PredictionKey`, and 'Tensor' is the corresponding output Tensor\n itself.\n\n Returns:\n `dict` of {submodel_name: (problem_type, {tensor_name: Tensor})}, where\n 'submodel_name' is a submodel identifier that should be consistent across\n the pipeline (here likely taken from the head_name),\n 'problem_type' is a `ProblemType`,\n 'tensor_name' is a symbolic name for an output Tensor possibly but not\n necessarily taken from `PredictionKey`, and\n 'Tensor' is the corresponding output Tensor itself.\n \"\"\"\n return {self._head_name: (self._problem_type, predictions)}\n\n\n# TODO (zakaria): use contrib losses. id:1014 gh:1015\ndef _mean_squared_loss(labels, logits, weights=None):\n with ops.name_scope(None, \"mean_squared_loss\", (logits, labels)) as name:\n logits = ops.convert_to_tensor(logits)\n labels = ops.convert_to_tensor(labels)\n # To prevent broadcasting inside \"-\".\n if len(labels.get_shape()) == 1:\n labels = array_ops.expand_dims(labels, dim=(1,))\n # TODO (zakaria): make sure it does not recreate the broadcast bug. id:770 gh:771\n if len(logits.get_shape()) == 1:\n logits = array_ops.expand_dims(logits, dim=(1,))\n logits.get_shape().assert_is_compatible_with(labels.get_shape())\n loss = math_ops.square(logits - math_ops.to_float(labels), name=name)\n return _compute_weighted_loss(loss, weights)\n\n\ndef _poisson_loss(labels, logits, weights=None):\n \"\"\"Computes poisson loss from logits.\"\"\"\n with ops.name_scope(None, \"_poisson_loss\", (logits, labels)) as name:\n logits = ops.convert_to_tensor(logits)\n labels = ops.convert_to_tensor(labels)\n # To prevent broadcasting inside \"-\".\n if len(labels.get_shape()) == 1:\n labels = array_ops.expand_dims(labels, dim=(1,))\n # TODO (zakaria): make sure it does not recreate the broadcast bug. id:706 gh:707\n if len(logits.get_shape()) == 1:\n logits = array_ops.expand_dims(logits, dim=(1,))\n logits.get_shape().assert_is_compatible_with(labels.get_shape())\n loss = nn.log_poisson_loss(labels, logits, compute_full_loss=True,\n name=name)\n return _compute_weighted_loss(loss, weights)\n\n\ndef _logits(logits_input, logits, logits_dimension):\n \"\"\"Validate logits args, and create `logits` if necessary.\n\n Exactly one of `logits_input` and `logits` must be provided.\n\n Args:\n logits_input: `Tensor` input to `logits`.\n logits: `Tensor` output.\n logits_dimension: Integer, last dimension of `logits`. This is used to\n create `logits` from `logits_input` if `logits` is `None`; otherwise, it's\n used to validate `logits`.\n\n Returns:\n `logits` `Tensor`.\n\n Raises:\n ValueError: if neither or both of `logits` and `logits_input` are supplied.\n \"\"\"\n if (logits_dimension is None) or (logits_dimension < 1):\n raise ValueError(\"Invalid logits_dimension %s.\" % logits_dimension)\n\n # If not provided, create logits.\n if logits is None:\n if logits_input is None:\n raise ValueError(\"Neither logits nor logits_input supplied.\")\n return layers_lib.linear(logits_input, logits_dimension, scope=\"logits\")\n\n if logits_input is not None:\n raise ValueError(\"Both logits and logits_input supplied.\")\n\n logits = ops.convert_to_tensor(logits, name=\"logits\")\n logits_dims = logits.get_shape().dims\n if logits_dims is not None:\n logits_dims[-1].assert_is_compatible_with(logits_dimension)\n\n return logits\n\n\ndef _create_model_fn_ops(features,\n mode,\n loss_fn,\n logits_to_predictions_fn,\n metrics_fn,\n create_output_alternatives_fn,\n labels=None,\n train_op_fn=None,\n logits=None,\n logits_dimension=None,\n head_name=None,\n weight_column_name=None,\n enable_centered_bias=False):\n \"\"\"Returns a `ModelFnOps` object.\"\"\"\n _check_mode_valid(mode)\n\n centered_bias = None\n if enable_centered_bias:\n centered_bias = _centered_bias(logits_dimension, head_name)\n logits = nn.bias_add(logits, centered_bias)\n\n predictions = logits_to_predictions_fn(logits)\n loss = None\n train_op = None\n eval_metric_ops = None\n if (mode != model_fn.ModeKeys.INFER) and (labels is not None):\n weight_tensor = _weight_tensor(features, weight_column_name)\n loss, weighted_average_loss = loss_fn(labels, logits, weight_tensor)\n # The name_scope escapism is needed to maintain the same summary tag\n # after switching away from the now unsupported API.\n with ops.name_scope(\"\"):\n summary_loss = array_ops.identity(weighted_average_loss)\n summary.scalar(_summary_key(head_name, mkey.LOSS), summary_loss)\n\n if mode == model_fn.ModeKeys.TRAIN:\n if train_op_fn is None:\n raise ValueError(\"train_op_fn can not be None in TRAIN mode\")\n batch_size = array_ops.shape(logits)[0]\n train_op = _train_op(loss, labels, train_op_fn, centered_bias,\n batch_size, loss_fn, weight_tensor)\n eval_metric_ops = metrics_fn(\n weighted_average_loss, predictions, labels, weight_tensor)\n return model_fn.ModelFnOps(\n mode=mode,\n predictions=predictions,\n loss=loss,\n train_op=train_op,\n eval_metric_ops=eval_metric_ops,\n output_alternatives=create_output_alternatives_fn(predictions))\n\n\nclass _RegressionHead(_SingleHead):\n \"\"\"`Head` for regression with a generalized linear model.\"\"\"\n\n def __init__(self,\n label_dimension,\n loss_fn,\n link_fn,\n logits_dimension=None,\n label_name=None,\n weight_column_name=None,\n enable_centered_bias=False,\n head_name=None):\n \"\"\"`Head` for regression.\n\n Args:\n label_dimension: Number of regression labels per example. This is the\n size of the last dimension of the labels `Tensor` (typically, this has\n shape `[batch_size, label_dimension]`).\n loss_fn: Loss function, takes logits and labels and returns loss.\n link_fn: Link function, takes a logits tensor and returns the output.\n logits_dimension: Number of logits per example. This is the\n size of the last dimension of the logits `Tensor` (typically, this has\n shape `[batch_size, label_dimension]`).\n Default value: `label_dimension`.\n label_name: String, name of the key in label dict. Can be null if label\n is a tensor (single headed models).\n weight_column_name: A string defining feature column name representing\n weights. It is used to down weight or boost examples during training. It\n will be multiplied by the loss of the example.\n enable_centered_bias: A bool. If True, estimator will learn a centered\n bias variable for each class. Rest of the model structure learns the\n residual after centered bias.\n head_name: name of the head. Predictions, summary and metrics keys are\n suffixed by `\"/\" + head_name` and the default variable scope is\n `head_name`.\n \"\"\"\n super(_RegressionHead, self).__init__(\n problem_type=constants.ProblemType.LINEAR_REGRESSION,\n logits_dimension=(logits_dimension if logits_dimension is not None\n else label_dimension),\n label_name=label_name,\n weight_column_name=weight_column_name,\n head_name=head_name)\n\n self._loss_fn = loss_fn\n self._link_fn = link_fn\n self._enable_centered_bias = enable_centered_bias\n\n def create_model_fn_ops(self,\n features,\n mode,\n labels=None,\n train_op_fn=None,\n logits=None,\n logits_input=None,\n scope=None):\n \"\"\"See `Head`.\"\"\"\n with variable_scope.variable_scope(\n scope,\n default_name=self.head_name or \"regression_head\",\n values=(tuple(six.itervalues(features)) +\n (labels, logits, logits_input))):\n labels = self._transform_labels(mode=mode, labels=labels)\n logits = _logits(logits_input, logits, self.logits_dimension)\n return _create_model_fn_ops(\n features=features,\n mode=mode,\n loss_fn=self._loss_fn,\n logits_to_predictions_fn=self._logits_to_predictions,\n metrics_fn=self._metrics,\n create_output_alternatives_fn=self._create_output_alternatives,\n labels=labels,\n train_op_fn=train_op_fn,\n logits=logits,\n logits_dimension=self.logits_dimension,\n head_name=self.head_name,\n weight_column_name=self.weight_column_name,\n enable_centered_bias=self._enable_centered_bias)\n\n def _transform_labels(self, mode, labels):\n \"\"\"Applies transformations to labels tensor.\"\"\"\n if (mode == model_fn.ModeKeys.INFER) or (labels is None):\n return None\n labels_tensor = _to_labels_tensor(labels, self._label_name)\n _check_no_sparse_tensor(labels_tensor)\n return labels_tensor\n\n def _logits_to_predictions(self, logits):\n \"\"\"Returns a dict of predictions.\n\n Args:\n logits: logits `Tensor` after applying possible centered bias.\n\n Returns:\n Dict of prediction `Tensor` keyed by `PredictionKey`.\n \"\"\"\n key = prediction_key.PredictionKey.SCORES\n with ops.name_scope(None, \"predictions\", (logits,)):\n if self.logits_dimension == 1:\n logits = array_ops.squeeze(logits, squeeze_dims=(1,), name=key)\n return {key: self._link_fn(logits)}\n\n def _metrics(self, eval_loss, predictions, labels, weights):\n \"\"\"Returns a dict of metrics keyed by name.\"\"\"\n del predictions, labels, weights # Unused by this head.\n with ops.name_scope(\"metrics\", values=[eval_loss]):\n return {\n _summary_key(self.head_name, mkey.LOSS):\n metrics_lib.mean(eval_loss)}\n\n\ndef _log_loss_with_two_classes(labels, logits, weights=None):\n with ops.name_scope(None, \"log_loss_with_two_classes\",\n (logits, labels)) as name:\n logits = ops.convert_to_tensor(logits)\n labels = math_ops.to_float(labels)\n # TODO (ptucker): This will break for dynamic shapes. id:730 gh:731\n # sigmoid_cross_entropy_with_logits requires [batch_size, 1] labels.\n if len(labels.get_shape()) == 1:\n labels = array_ops.expand_dims(labels, dim=(1,))\n loss = nn.sigmoid_cross_entropy_with_logits(labels=labels, logits=logits,\n name=name)\n return _compute_weighted_loss(loss, weights)\n\n\ndef _one_class_to_two_class_logits(logits):\n return array_ops.concat((array_ops.zeros_like(logits), logits), 1)\n\n\nclass _BinaryLogisticHead(_SingleHead):\n \"\"\"`Head` for binary classification with logistic regression.\"\"\"\n\n def __init__(self,\n label_name=None,\n weight_column_name=None,\n enable_centered_bias=False,\n head_name=None,\n loss_fn=None,\n thresholds=None):\n \"\"\"`Head` for binary classification with logistic regression.\n\n Args:\n label_name: String, name of the key in label dict. Can be `None` if label\n is a tensor (single headed models).\n weight_column_name: A string defining feature column name representing\n weights. It is used to down weight or boost examples during training. It\n will be multiplied by the loss of the example.\n enable_centered_bias: A bool. If True, estimator will learn a centered\n bias variable for each class. Rest of the model structure learns the\n residual after centered bias.\n head_name: name of the head. Predictions, summary, metrics keys are\n suffixed by `\"/\" + head_name` and the default variable scope is\n `head_name`.\n loss_fn: Loss function.\n thresholds: thresholds for eval.\n\n Raises:\n ValueError: if n_classes is invalid.\n \"\"\"\n super(_BinaryLogisticHead, self).__init__(\n problem_type=constants.ProblemType.LOGISTIC_REGRESSION,\n logits_dimension=1,\n label_name=label_name,\n weight_column_name=weight_column_name,\n head_name=head_name)\n self._thresholds = thresholds if thresholds else (.5,)\n self._loss_fn = loss_fn if loss_fn else _log_loss_with_two_classes\n self._enable_centered_bias = enable_centered_bias\n\n def create_model_fn_ops(self,\n features,\n mode,\n labels=None,\n train_op_fn=None,\n logits=None,\n logits_input=None,\n scope=None):\n \"\"\"See `Head`.\"\"\"\n with variable_scope.variable_scope(\n scope,\n default_name=self.head_name or \"binary_logistic_head\",\n values=(tuple(six.itervalues(features)) +\n (labels, logits, logits_input))):\n labels = self._transform_labels(mode=mode, labels=labels)\n logits = _logits(logits_input, logits, self.logits_dimension)\n return _create_model_fn_ops(\n features=features,\n mode=mode,\n loss_fn=self._loss_fn,\n logits_to_predictions_fn=self._logits_to_predictions,\n metrics_fn=self._metrics,\n create_output_alternatives_fn=_classification_output_alternatives(\n self.head_name, self._problem_type),\n labels=labels,\n train_op_fn=train_op_fn,\n logits=logits,\n logits_dimension=self.logits_dimension,\n head_name=self.head_name,\n weight_column_name=self.weight_column_name,\n enable_centered_bias=self._enable_centered_bias)\n\n def _transform_labels(self, mode, labels):\n \"\"\"Applies transformations to labels tensor.\"\"\"\n if (mode == model_fn.ModeKeys.INFER) or (labels is None):\n return None\n labels_tensor = _to_labels_tensor(labels, self._label_name)\n _check_no_sparse_tensor(labels_tensor)\n return labels_tensor\n\n def _logits_to_predictions(self, logits):\n \"\"\"Returns a dict of predictions.\n\n Args:\n logits: logits `Output` after applying possible centered bias.\n\n Returns:\n Dict of prediction `Output` keyed by `PredictionKey`.\n \"\"\"\n with ops.name_scope(None, \"predictions\", (logits,)):\n two_class_logits = _one_class_to_two_class_logits(logits)\n return {\n prediction_key.PredictionKey.LOGITS:\n logits,\n prediction_key.PredictionKey.LOGISTIC:\n math_ops.sigmoid(\n logits, name=prediction_key.PredictionKey.LOGISTIC),\n prediction_key.PredictionKey.PROBABILITIES:\n nn.softmax(\n two_class_logits,\n name=prediction_key.PredictionKey.PROBABILITIES),\n prediction_key.PredictionKey.CLASSES:\n math_ops.argmax(\n two_class_logits,\n 1,\n name=prediction_key.PredictionKey.CLASSES)\n }\n\n def _metrics(self, eval_loss, predictions, labels, weights):\n \"\"\"Returns a dict of metrics keyed by name.\"\"\"\n with ops.name_scope(\"metrics\", values=(\n [eval_loss, labels, weights] + list(six.itervalues(predictions)))):\n classes = predictions[prediction_key.PredictionKey.CLASSES]\n logistic = predictions[prediction_key.PredictionKey.LOGISTIC]\n\n metrics = {_summary_key(self.head_name, mkey.LOSS):\n metrics_lib.mean(eval_loss)}\n # TODO (b/29366811): This currently results in both an \"accuracy\" and an id:1194 gh:1195\n # \"accuracy/threshold_0.500000_mean\" metric for binary classification.\n metrics[_summary_key(self.head_name, mkey.ACCURACY)] = (\n metrics_lib.accuracy(labels, classes, weights))\n metrics[_summary_key(self.head_name, mkey.PREDICTION_MEAN)] = (\n _predictions_streaming_mean(logistic, weights))\n metrics[_summary_key(self.head_name, mkey.LABEL_MEAN)] = (\n _indicator_labels_streaming_mean(labels, weights))\n\n # Also include the streaming mean of the label as an accuracy baseline, as\n # a reminder to users.\n metrics[_summary_key(self.head_name, mkey.ACCURACY_BASELINE)] = (\n _indicator_labels_streaming_mean(labels, weights))\n metrics[_summary_key(self.head_name, mkey.AUC)] = (\n _streaming_auc(logistic, labels, weights))\n metrics[_summary_key(self.head_name, mkey.AUC_PR)] = (\n _streaming_auc(logistic, labels, weights, curve=\"PR\"))\n\n for threshold in self._thresholds:\n metrics[_summary_key(\n self.head_name, mkey.ACCURACY_MEAN % threshold)] = (\n _streaming_accuracy_at_threshold(logistic, labels, weights,\n threshold))\n # Precision for positive examples.\n metrics[_summary_key(\n self.head_name, mkey.PRECISION_MEAN % threshold)] = (\n _streaming_precision_at_threshold(logistic, labels, weights,\n threshold))\n # Recall for positive examples.\n metrics[_summary_key(\n self.head_name, mkey.RECALL_MEAN % threshold)] = (\n _streaming_recall_at_threshold(logistic, labels, weights,\n threshold))\n\n return metrics\n\n\ndef _softmax_cross_entropy_loss(labels, logits, weights=None):\n with ops.name_scope(\n None, \"softmax_cross_entropy_loss\", (logits, labels,)) as name:\n labels = ops.convert_to_tensor(labels)\n # Check that we got integer for classification.\n if not labels.dtype.is_integer:\n raise ValueError(\"Labels dtype should be integer \"\n \"Instead got %s.\" % labels.dtype)\n\n # sparse_softmax_cross_entropy_with_logits requires [batch_size] labels.\n is_squeezed_labels = False\n # TODO (ptucker): This will break for dynamic shapes. id:1019 gh:1020\n if len(labels.get_shape()) == 2:\n labels = array_ops.squeeze(labels, squeeze_dims=(1,))\n is_squeezed_labels = True\n\n loss = nn.sparse_softmax_cross_entropy_with_logits(\n labels=labels, logits=logits, name=name)\n\n # Restore squeezed dimension, if necessary, so loss matches weights shape.\n if is_squeezed_labels:\n loss = array_ops.expand_dims(loss, axis=(1,))\n\n return _compute_weighted_loss(loss, weights)\n\n\nclass _MultiClassHead(_SingleHead):\n \"\"\"'Head' for multi class classification.\"\"\"\n\n def __init__(self,\n n_classes,\n label_name=None,\n weight_column_name=None,\n enable_centered_bias=False,\n head_name=None,\n loss_fn=None,\n thresholds=None,\n metric_class_ids=None,\n label_keys=None):\n \"\"\"'Head' for multi class classification.\n\n This head expects to be fed integer labels specifying the class index. But\n if `label_keys` is specified, then labels must be strings from this\n vocabulary, and the predicted classes will be strings from the same\n vocabulary.\n\n Args:\n n_classes: Number of classes, must be greater than 2 (for 2 classes, use\n `_BinaryLogisticHead`).\n label_name: String, name of the key in label dict. Can be null if label\n is a tensor (single headed models).\n weight_column_name: A string defining feature column name representing\n weights. It is used to down weight or boost examples during training. It\n will be multiplied by the loss of the example.\n enable_centered_bias: A bool. If True, estimator will learn a centered\n bias variable for each class. Rest of the model structure learns the\n residual after centered bias.\n head_name: name of the head. If provided, predictions, summary, metrics\n keys will be suffixed by `\"/\" + head_name` and the default variable\n scope will be `head_name`.\n loss_fn: Loss function. Defaults to softmax cross entropy loss.\n thresholds: thresholds for eval.\n metric_class_ids: List of class IDs for which we should report per-class\n metrics. Must all be in the range `[0, n_classes)`.\n label_keys: Optional list of strings with size `[n_classes]` defining the\n label vocabulary.\n\n Raises:\n ValueError: if `n_classes`, `metric_class_ids` or `label_keys` is invalid.\n \"\"\"\n super(_MultiClassHead, self).__init__(\n problem_type=constants.ProblemType.CLASSIFICATION,\n logits_dimension=n_classes,\n label_name=label_name,\n weight_column_name=weight_column_name,\n head_name=head_name)\n\n if (n_classes is None) or (n_classes <= 2):\n raise ValueError(\"n_classes must be > 2: %s.\" % n_classes)\n self._thresholds = thresholds if thresholds else (.5,)\n self._loss_fn = loss_fn if loss_fn else _softmax_cross_entropy_loss\n self._enable_centered_bias = enable_centered_bias\n self._metric_class_ids = tuple([] if metric_class_ids is None else\n metric_class_ids)\n for class_id in self._metric_class_ids:\n if (class_id < 0) or (class_id >= n_classes):\n raise ValueError(\"Class ID %s not in [0, %s).\" % (class_id, n_classes))\n if label_keys and len(label_keys) != n_classes:\n raise ValueError(\"Length of label_keys must equal n_classes.\")\n self._label_keys = label_keys\n\n def create_model_fn_ops(self,\n features,\n mode,\n labels=None,\n train_op_fn=None,\n logits=None,\n logits_input=None,\n scope=None):\n \"\"\"See `Head`.\"\"\"\n with variable_scope.variable_scope(\n scope,\n default_name=self.head_name or \"multi_class_head\",\n values=(tuple(six.itervalues(features)) +\n (labels, logits, logits_input))):\n labels = self._transform_labels(mode=mode, labels=labels)\n logits = _logits(logits_input, logits, self.logits_dimension)\n return _create_model_fn_ops(\n features=features,\n mode=mode,\n loss_fn=self._wrapped_loss_fn,\n logits_to_predictions_fn=self._logits_to_predictions,\n metrics_fn=self._metrics,\n create_output_alternatives_fn=_classification_output_alternatives(\n self.head_name, self._problem_type, self._label_keys),\n labels=labels,\n train_op_fn=train_op_fn,\n logits=logits,\n logits_dimension=self.logits_dimension,\n head_name=self.head_name,\n weight_column_name=self.weight_column_name,\n enable_centered_bias=self._enable_centered_bias)\n\n def _transform_labels(self, mode, labels):\n \"\"\"Returns a dict that contains both the original labels and label IDs.\"\"\"\n if (mode == model_fn.ModeKeys.INFER) or (labels is None):\n return None\n labels_tensor = _to_labels_tensor(labels, self._label_name)\n _check_no_sparse_tensor(labels_tensor)\n if self._label_keys:\n table = lookup_ops.index_table_from_tensor(\n self._label_keys, name=\"label_id_lookup\")\n return {\n \"labels\": labels_tensor,\n \"label_ids\": table.lookup(labels_tensor),\n }\n return {\n \"labels\": labels_tensor,\n \"label_ids\": labels_tensor,\n }\n\n def _labels(self, labels_dict):\n \"\"\"Returns labels `Tensor` of the same type as classes.\"\"\"\n return labels_dict[\"labels\"]\n\n def _label_ids(self, labels_dict):\n \"\"\"Returns integer label ID `Tensor`.\"\"\"\n return labels_dict[\"label_ids\"]\n\n def _wrapped_loss_fn(self, labels, logits, weights=None):\n return self._loss_fn(self._label_ids(labels), logits, weights=weights)\n\n def _logits_to_predictions(self, logits):\n \"\"\"Returns a dict of predictions.\n\n Args:\n logits: logits `Tensor` after applying possible centered bias.\n\n Returns:\n Dict of prediction `Tensor` keyed by `PredictionKey`.\n \"\"\"\n with ops.name_scope(None, \"predictions\", (logits,)):\n class_ids = math_ops.argmax(\n logits, 1, name=prediction_key.PredictionKey.CLASSES)\n if self._label_keys:\n table = lookup_ops.index_to_string_table_from_tensor(\n self._label_keys, name=\"class_string_lookup\")\n classes = table.lookup(class_ids)\n else:\n classes = class_ids\n return {\n prediction_key.PredictionKey.LOGITS: logits,\n prediction_key.PredictionKey.PROBABILITIES:\n nn.softmax(\n logits, name=prediction_key.PredictionKey.PROBABILITIES),\n prediction_key.PredictionKey.CLASSES: classes\n }\n\n def _metrics(self, eval_loss, predictions, labels, weights):\n \"\"\"Returns a dict of metrics keyed by name.\"\"\"\n with ops.name_scope(\n \"metrics\",\n values=((eval_loss, self._labels(labels), self._label_ids(labels),\n weights) + tuple(six.itervalues(predictions)))):\n logits = predictions[prediction_key.PredictionKey.LOGITS]\n probabilities = predictions[prediction_key.PredictionKey.PROBABILITIES]\n classes = predictions[prediction_key.PredictionKey.CLASSES]\n\n metrics = {_summary_key(self.head_name, mkey.LOSS):\n metrics_lib.mean(eval_loss)}\n # TODO (b/29366811): This currently results in both an \"accuracy\" and an id:772 gh:773\n # \"accuracy/threshold_0.500000_mean\" metric for binary classification.\n metrics[_summary_key(self.head_name, mkey.ACCURACY)] = (\n metrics_lib.accuracy(self._labels(labels), classes, weights))\n\n if not self._label_keys:\n # Classes are IDs. Add some metrics.\n for class_id in self._metric_class_ids:\n metrics[_summary_key(\n self.head_name, mkey.CLASS_PREDICTION_MEAN % class_id)] = (\n _class_predictions_streaming_mean(classes, weights, class_id))\n # TODO (ptucker): Add per-class accuracy, precision, recall. id:709 gh:710\n metrics[_summary_key(\n self.head_name, mkey.CLASS_LABEL_MEAN % class_id)] = (\n _class_labels_streaming_mean(\n self._label_ids(labels), weights, class_id))\n metrics[_summary_key(\n self.head_name, mkey.CLASS_PROBABILITY_MEAN % class_id)] = (\n _predictions_streaming_mean(probabilities, weights, class_id))\n metrics[_summary_key(\n self.head_name, mkey.CLASS_LOGITS_MEAN % class_id)] = (\n _predictions_streaming_mean(logits, weights, class_id))\n\n return metrics\n\n\ndef _to_labels_tensor(labels, label_name):\n \"\"\"Returns label as a tensor.\n\n Args:\n labels: Label `Tensor` or `SparseTensor` or a dict containing labels.\n label_name: Label name if labels is a dict.\n\n Returns:\n Label `Tensor` or `SparseTensor`.\n \"\"\"\n labels = labels[label_name] if isinstance(labels, dict) else labels\n return framework_lib.convert_to_tensor_or_sparse_tensor(labels)\n\n\ndef _check_no_sparse_tensor(x):\n \"\"\"Raises ValueError if the given tensor is `SparseTensor`.\"\"\"\n if isinstance(x, sparse_tensor.SparseTensor):\n raise ValueError(\"SparseTensor is not supported.\")\n\n\ndef _sparse_labels_to_indicator(labels, num_classes):\n \"\"\"If labels is `SparseTensor`, converts it to indicator `Tensor`.\n\n Args:\n labels: Label `Tensor` or `SparseTensor`.\n num_classes: Number of classes.\n\n Returns:\n Dense label `Tensor`.\n\n Raises:\n ValueError: If labels is `SparseTensor` and `num_classes` < 2.\n \"\"\"\n if isinstance(labels, sparse_tensor.SparseTensor):\n if num_classes < 2:\n raise ValueError(\"Must set num_classes >= 2 when passing labels as a \"\n \"SparseTensor.\")\n return math_ops.to_int64(\n sparse_ops.sparse_to_indicator(labels, num_classes))\n return labels\n\n\ndef _assert_labels_rank(labels):\n return control_flow_ops.Assert(\n math_ops.less_equal(array_ops.rank(labels), 2),\n (\"labels shape should be either [batch_size, 1] or [batch_size]\",))\n\n\nclass _BinarySvmHead(_SingleHead):\n \"\"\"`Head` for binary classification using SVM.\"\"\"\n\n def __init__(self, label_name, weight_column_name, enable_centered_bias,\n head_name, thresholds):\n\n def _loss_fn(labels, logits, weights=None):\n with ops.name_scope(None, \"hinge_loss\", (logits, labels)) as name:\n with ops.control_dependencies((_assert_labels_rank(labels),)):\n labels = array_ops.reshape(labels, shape=(-1, 1))\n loss = losses_lib.hinge_loss(labels=labels, logits=logits, scope=name,\n reduction=losses_lib.Reduction.NONE)\n return _compute_weighted_loss(loss, weights)\n\n super(_BinarySvmHead, self).__init__(\n problem_type=constants.ProblemType.LOGISTIC_REGRESSION,\n logits_dimension=1,\n label_name=label_name,\n weight_column_name=weight_column_name,\n head_name=head_name)\n self._thresholds = thresholds if thresholds else (.5,)\n self._loss_fn = _loss_fn\n self._enable_centered_bias = enable_centered_bias\n\n def create_model_fn_ops(self,\n features,\n mode,\n labels=None,\n train_op_fn=None,\n logits=None,\n logits_input=None,\n scope=None):\n \"\"\"See `Head`.\"\"\"\n with variable_scope.variable_scope(\n scope,\n default_name=self.head_name or \"binary_svm_head\",\n values=(tuple(six.itervalues(features)) +\n (labels, logits, logits_input))):\n labels = self._transform_labels(mode=mode, labels=labels)\n logits = _logits(logits_input, logits, self.logits_dimension)\n return _create_model_fn_ops(\n features=features,\n mode=mode,\n loss_fn=self._loss_fn,\n logits_to_predictions_fn=self._logits_to_predictions,\n metrics_fn=self._metrics,\n # TODO (zakaria): Handle labels for export. id:734 gh:735\n create_output_alternatives_fn=self._create_output_alternatives,\n labels=labels,\n train_op_fn=train_op_fn,\n logits=logits,\n logits_dimension=self.logits_dimension,\n head_name=self.head_name,\n weight_column_name=self.weight_column_name,\n enable_centered_bias=self._enable_centered_bias)\n\n def _transform_labels(self, mode, labels):\n \"\"\"Applies transformations to labels tensor.\"\"\"\n if (mode == model_fn.ModeKeys.INFER) or (labels is None):\n return None\n labels_tensor = _to_labels_tensor(labels, self._label_name)\n _check_no_sparse_tensor(labels_tensor)\n return labels_tensor\n\n def _logits_to_predictions(self, logits):\n \"\"\"See `_MultiClassHead`.\"\"\"\n with ops.name_scope(None, \"predictions\", (logits,)):\n return {\n prediction_key.PredictionKey.LOGITS:\n logits,\n prediction_key.PredictionKey.CLASSES:\n math_ops.argmax(\n _one_class_to_two_class_logits(logits),\n 1,\n name=prediction_key.PredictionKey.CLASSES)\n }\n\n def _metrics(self, eval_loss, predictions, labels, weights):\n \"\"\"See `_MultiClassHead`.\"\"\"\n with ops.name_scope(\"metrics\", values=(\n [eval_loss, labels, weights] + list(six.itervalues(predictions)))):\n metrics = {_summary_key(self.head_name, mkey.LOSS):\n metrics_lib.mean(eval_loss)}\n\n # TODO (b/29366811): This currently results in both an \"accuracy\" and an id:1197 gh:1198\n # \"accuracy/threshold_0.500000_mean\" metric for binary classification.\n classes = predictions[prediction_key.PredictionKey.CLASSES]\n metrics[_summary_key(self.head_name, mkey.ACCURACY)] = (\n metrics_lib.accuracy(labels, classes, weights))\n # TODO (sibyl-vie3Poto): add more metrics relevant for svms. id:1023 gh:1024\n\n return metrics\n\n\nclass _MultiLabelHead(_SingleHead):\n \"\"\"`Head` for multi-label classification.\"\"\"\n\n # TODO (zakaria): add signature and metric for multilabel. id:774 gh:775\n def __init__(self,\n n_classes,\n label_name,\n weight_column_name,\n enable_centered_bias,\n head_name,\n thresholds,\n metric_class_ids=None,\n loss_fn=None):\n\n super(_MultiLabelHead, self).__init__(\n problem_type=constants.ProblemType.CLASSIFICATION,\n logits_dimension=n_classes,\n label_name=label_name,\n weight_column_name=weight_column_name,\n head_name=head_name)\n\n self._thresholds = thresholds if thresholds else (.5,)\n self._loss_fn = loss_fn if loss_fn else _sigmoid_cross_entropy_loss\n self._enable_centered_bias = enable_centered_bias\n self._metric_class_ids = tuple([] if metric_class_ids is None else\n metric_class_ids)\n for class_id in self._metric_class_ids:\n if (class_id < 0) or (class_id >= n_classes):\n raise ValueError(\"Class ID %s not in [0, %s).\" % (class_id, n_classes))\n\n def create_model_fn_ops(self,\n features,\n mode,\n labels=None,\n train_op_fn=None,\n logits=None,\n logits_input=None,\n scope=None):\n \"\"\"See `Head`.\"\"\"\n with variable_scope.variable_scope(\n scope,\n default_name=self.head_name or \"multi_label_head\",\n values=(tuple(six.itervalues(features)) +\n (labels, logits, logits_input))):\n labels = self._transform_labels(mode=mode, labels=labels)\n logits = _logits(logits_input, logits, self.logits_dimension)\n return _create_model_fn_ops(\n features=features,\n mode=mode,\n loss_fn=self._loss_fn,\n logits_to_predictions_fn=self._logits_to_predictions,\n metrics_fn=self._metrics,\n create_output_alternatives_fn=_classification_output_alternatives(\n self.head_name, self._problem_type),\n labels=labels,\n train_op_fn=train_op_fn,\n logits=logits,\n logits_dimension=self.logits_dimension,\n head_name=self.head_name,\n weight_column_name=self.weight_column_name,\n enable_centered_bias=self._enable_centered_bias)\n\n def _transform_labels(self, mode, labels):\n \"\"\"Applies transformations to labels tensor.\"\"\"\n if (mode == model_fn.ModeKeys.INFER) or (labels is None):\n return None\n labels_tensor = _to_labels_tensor(labels, self._label_name)\n labels_tensor = _sparse_labels_to_indicator(labels_tensor,\n self._logits_dimension)\n return labels_tensor\n\n def _logits_to_predictions(self, logits):\n \"\"\"See `_MultiClassHead`.\"\"\"\n with ops.name_scope(None, \"predictions\", (logits,)):\n return {\n prediction_key.PredictionKey.LOGITS:\n logits,\n prediction_key.PredictionKey.PROBABILITIES:\n math_ops.sigmoid(\n logits, name=prediction_key.PredictionKey.PROBABILITIES),\n prediction_key.PredictionKey.CLASSES:\n math_ops.to_int64(\n math_ops.greater(logits, 0),\n name=prediction_key.PredictionKey.CLASSES)\n }\n\n def _metrics(self, eval_loss, predictions, labels, weights):\n \"\"\"Returns a dict of metrics keyed by name.\"\"\"\n with ops.name_scope(\"metrics\", values=(\n [eval_loss, labels, weights] + list(six.itervalues(predictions)))):\n classes = predictions[prediction_key.PredictionKey.CLASSES]\n probabilities = predictions[prediction_key.PredictionKey.PROBABILITIES]\n logits = predictions[prediction_key.PredictionKey.LOGITS]\n\n metrics = {_summary_key(self.head_name, mkey.LOSS):\n metrics_lib.mean(eval_loss)}\n # TODO (b/29366811): This currently results in both an \"accuracy\" and an id:713 gh:714\n # \"accuracy/threshold_0.500000_mean\" metric for binary classification.\n metrics[_summary_key(self.head_name, mkey.ACCURACY)] = (\n metrics_lib.accuracy(labels, classes, weights))\n metrics[_summary_key(self.head_name, mkey.AUC)] = _streaming_auc(\n probabilities, labels, weights)\n metrics[_summary_key(self.head_name, mkey.AUC_PR)] = _streaming_auc(\n probabilities, labels, weights, curve=\"PR\")\n\n for class_id in self._metric_class_ids:\n # TODO (ptucker): Add per-class accuracy, precision, recall. id:739 gh:740\n metrics[_summary_key(\n self.head_name, mkey.CLASS_PREDICTION_MEAN % class_id)] = (\n _predictions_streaming_mean(classes, weights, class_id))\n metrics[_summary_key(\n self.head_name, mkey.CLASS_LABEL_MEAN % class_id)] = (\n _indicator_labels_streaming_mean(labels, weights, class_id))\n metrics[_summary_key(\n self.head_name, mkey.CLASS_PROBABILITY_MEAN % class_id)] = (\n _predictions_streaming_mean(probabilities, weights, class_id))\n metrics[_summary_key(\n self.head_name, mkey.CLASS_LOGITS_MEAN % class_id)] = (\n _predictions_streaming_mean(logits, weights, class_id))\n metrics[_summary_key(self.head_name, mkey.CLASS_AUC % class_id)] = (\n _streaming_auc(probabilities, labels, weights, class_id))\n metrics[_summary_key(self.head_name, mkey.CLASS_AUC_PR % class_id)] = (\n _streaming_auc(probabilities, labels, weights, class_id,\n curve=\"PR\"))\n\n return metrics\n\n\nclass _LossOnlyHead(Head):\n \"\"\"`Head` implementation for additional loss terms.\n\n This class only holds loss terms unrelated to any other heads (labels),\n e.g. regularization.\n\n Common usage:\n This is oftem combine with other heads in a multi head setup.\n ```python\n head = multi_head([\n head1, head2, loss_only_head('regularizer', regularizer)])\n ```\n \"\"\"\n\n def __init__(self, loss_fn, head_name=None):\n self._loss_fn = loss_fn\n self.head_name = head_name or \"loss_only_head\"\n\n @property\n def logits_dimension(self):\n return 0\n\n def create_model_fn_ops(self,\n features,\n mode,\n labels=None,\n train_op_fn=None,\n logits=None,\n logits_input=None,\n scope=None):\n \"\"\"See `_Head.create_model_fn_ops`.\n\n Args:\n features: Not been used.\n mode: Estimator's `ModeKeys`.\n labels: Labels `Tensor`, or `dict` of same.\n train_op_fn: Function that takes a scalar loss and returns an op to\n optimize with the loss.\n logits: Not been used.\n logits_input: Not been used.\n scope: Optional scope for variable_scope. If provided, will be passed to\n all heads. Most users will want to set this to `None`, so each head\n constructs a separate variable_scope according to its `head_name`.\n\n Returns:\n A `ModelFnOps` object.\n\n Raises:\n ValueError: if `mode` is not recognition.\n \"\"\"\n _check_mode_valid(mode)\n loss = None\n train_op = None\n if mode != model_fn.ModeKeys.INFER:\n with variable_scope.variable_scope(scope, default_name=self.head_name):\n loss = self._loss_fn()\n if isinstance(loss, list):\n loss = math_ops.add_n(loss)\n # The name_scope escapism is needed to maintain the same summary tag\n # after switching away from the now unsupported API.\n with ops.name_scope(\"\"):\n summary_loss = array_ops.identity(loss)\n summary.scalar(_summary_key(self.head_name, mkey.LOSS),\n summary_loss)\n if mode == model_fn.ModeKeys.TRAIN:\n if train_op_fn is None:\n raise ValueError(\"train_op_fn can not be None in TRAIN mode\")\n with ops.name_scope(None, \"train_op\", (loss,)):\n train_op = train_op_fn(loss)\n\n return model_fn.ModelFnOps(\n mode=mode,\n loss=loss,\n train_op=train_op,\n predictions={},\n eval_metric_ops={})\n\n\nclass _MultiHead(Head):\n \"\"\"`Head` implementation for multi objective learning.\n\n This class is responsible for using and merging the output of multiple\n `Head` objects.\n\n All heads stem from the same logits/logit_input tensor.\n\n Common usage:\n For simple use cases you can pass the activation of hidden layer like\n this from your model_fn,\n ```python\n last_hidden_layer_activation = ... Build your model.\n multi_head = ...\n return multi_head.create_model_fn_ops(\n ..., logits_input=last_hidden_layer_activation, ...)\n ```\n\n Or you can create a logits tensor of\n [batch_size, multi_head.logits_dimension] shape. _MultiHead will split the\n logits for you.\n return multi_head.create_model_fn_ops(..., logits=logits, ...)\n\n For more complex use cases like a multi-task/multi-tower model or when logits\n for each head has to be created separately, you can pass a dict of logits\n where the keys match the name of the single heads.\n ```python\n logits = {\"head1\": logits1, \"head2\": logits2}\n return multi_head.create_model_fn_ops(..., logits=logits, ...)\n ```\n\n Here is what this class does,\n + For training, merges losses of each heads according a function provided by\n user, calls user provided train_op_fn with this final loss.\n + For eval, merges metrics by adding head_name suffix to the keys in eval\n metrics.\n + For inference, updates keys in prediction dict to a 2-tuple,\n (head_name, prediction_key)\n \"\"\"\n\n def __init__(self, heads, loss_merger):\n \"\"\"_Head to merges multiple _Head objects.\n\n Args:\n heads: list of _Head objects.\n loss_merger: function that takes a list of loss tensors for the heads\n and returns the final loss tensor for the multi head.\n\n Raises:\n ValueError: if any head does not have a name.\n \"\"\"\n self._logits_dimension = 0\n for head in heads:\n if not head.head_name:\n raise ValueError(\"Members of MultiHead must have names.\")\n self._logits_dimension += head.logits_dimension\n\n self._heads = heads\n self._loss_merger = loss_merger\n\n @property\n def logits_dimension(self):\n return self._logits_dimension\n\n def create_model_fn_ops(self,\n features,\n mode,\n labels=None,\n train_op_fn=None,\n logits=None,\n logits_input=None,\n scope=None):\n \"\"\"See `_Head.create_model_fn_ops`.\n\n Args:\n features: Input `dict` of `Tensor` objects.\n mode: Estimator's `ModeKeys`.\n labels: Labels `Tensor`, or `dict` of same.\n train_op_fn: Function that takes a scalar loss and returns an op to\n optimize with the loss.\n logits: Concatenated logits for all heads or a dict of head name to logits\n tensor. If concatenated logits, it should have (batchsize, x) shape\n where x is the sum of `logits_dimension` of all the heads,\n i.e., same as `logits_dimension` of this class. create_model_fn_ops\n will split the logits tensor and pass logits of proper size to each\n head. This is useful if we want to be agnostic about whether you\n creating a single versus multihead. logits can also be a dict for\n convenience where you are creating the head specific logits explicitly\n and don't want to concatenate them yourself.\n logits_input: tensor to build logits from.\n scope: Optional scope for variable_scope. If provided, will be passed to\n all heads. Most users will want to set this to `None`, so each head\n constructs a separate variable_scope according to its `head_name`.\n\n Returns:\n `ModelFnOps`.\n\n Raises:\n ValueError: if `mode` is not recognized, or neither or both of `logits`\n and `logits_input` is provided.\n \"\"\"\n _check_mode_valid(mode)\n all_model_fn_ops = []\n if logits is None:\n # Use logits_input.\n for head in self._heads:\n all_model_fn_ops.append(\n head.create_model_fn_ops(\n features=features,\n mode=mode,\n labels=labels,\n train_op_fn=no_op_train_fn,\n logits_input=logits_input,\n scope=scope))\n else:\n head_logits_pairs = []\n if isinstance(logits, dict):\n head_logits_pairs = []\n for head in self._heads:\n if isinstance(head, _LossOnlyHead):\n head_logits_pairs.append((head, None))\n else:\n head_logits_pairs.append((head, logits[head.head_name]))\n else:\n # Split logits for each head.\n head_logits_pairs = zip(self._heads, self._split_logits(logits))\n\n for head, head_logits in head_logits_pairs:\n all_model_fn_ops.append(\n head.create_model_fn_ops(\n features=features,\n mode=mode,\n labels=labels,\n train_op_fn=no_op_train_fn,\n logits=head_logits,\n scope=scope))\n\n if mode == model_fn.ModeKeys.TRAIN:\n if train_op_fn is None:\n raise ValueError(\"train_op_fn can not be None in TRAIN mode.\")\n return self._merge_train(all_model_fn_ops, train_op_fn)\n if mode == model_fn.ModeKeys.INFER:\n return self._merge_infer(all_model_fn_ops)\n if mode == model_fn.ModeKeys.EVAL:\n return self._merge_eval(all_model_fn_ops)\n raise ValueError(\"mode=%s unrecognized\" % str(mode))\n\n def _split_logits(self, logits):\n \"\"\"Splits logits for heads.\n\n Args:\n logits: the logits tensor.\n\n Returns:\n A list of logits for the individual heads.\n \"\"\"\n all_logits = []\n begin = 0\n for head in self._heads:\n current_logits_size = head.logits_dimension\n current_logits = array_ops.slice(logits, [0, begin],\n [-1, current_logits_size])\n all_logits.append(current_logits)\n begin += current_logits_size\n return all_logits\n\n def _merge_train(self, all_model_fn_ops, train_op_fn):\n \"\"\"Merges list of ModelFnOps for training.\n\n Args:\n all_model_fn_ops: list of ModelFnOps for the individual heads.\n train_op_fn: Function to create train op. See `create_model_fn_ops`\n documentation for more details.\n\n Returns:\n ModelFnOps that merges all heads for TRAIN.\n \"\"\"\n losses = []\n metrics = {}\n additional_train_ops = []\n for m in all_model_fn_ops:\n losses.append(m.loss)\n if m.eval_metric_ops is not None:\n for k, v in six.iteritems(m.eval_metric_ops):\n # metrics[\"%s/%s\" % (k, head_name)] = v\n metrics[k] = v\n additional_train_ops.append(m.train_op)\n loss = self._loss_merger(losses)\n\n train_op = train_op_fn(loss)\n train_op = control_flow_ops.group(train_op, *additional_train_ops)\n return model_fn.ModelFnOps(\n mode=model_fn.ModeKeys.TRAIN,\n loss=loss,\n train_op=train_op,\n eval_metric_ops=metrics)\n\n def _merge_infer(self, all_model_fn_ops):\n \"\"\"Merges list of ModelFnOps for inference.\n\n Args:\n all_model_fn_ops: list of ModelFnOps for the individual heads.\n\n Returns:\n ModelFnOps that Merges all the heads for INFER.\n \"\"\"\n predictions = {}\n output_alternatives = {}\n for head, m in zip(self._heads, all_model_fn_ops):\n if isinstance(head, _LossOnlyHead):\n continue\n head_name = head.head_name\n output_alternatives[head_name] = m.output_alternatives[head_name]\n for k, v in m.predictions.items():\n predictions[(head_name, k)] = v\n\n return model_fn.ModelFnOps(\n mode=model_fn.ModeKeys.INFER,\n predictions=predictions,\n output_alternatives=output_alternatives)\n\n def _merge_eval(self, all_model_fn_ops):\n \"\"\"Merges list of ModelFnOps for eval.\n\n Args:\n all_model_fn_ops: list of ModelFnOps for the individual heads.\n\n Returns:\n ModelFnOps that merges all the heads for EVAL.\n \"\"\"\n predictions = {}\n metrics = {}\n losses = []\n for head, m in zip(self._heads, all_model_fn_ops):\n losses.append(m.loss)\n head_name = head.head_name\n for k, v in m.predictions.items():\n predictions[(head_name, k)] = v\n for k, v in m.eval_metric_ops.items():\n # metrics[\"%s/%s\" % (k, head_name)] = v\n metrics[k] = v\n loss = self._loss_merger(losses)\n\n return model_fn.ModelFnOps(\n mode=model_fn.ModeKeys.EVAL,\n predictions=predictions,\n loss=loss,\n eval_metric_ops=metrics)\n\n\ndef _weight_tensor(features, weight_column_name):\n \"\"\"Returns weights as `Tensor` of rank 0, or at least 2.\"\"\"\n if not weight_column_name:\n return None\n if weight_column_name not in features:\n raise ValueError(\"Weights {} missing from features.\".format(\n weight_column_name))\n with ops.name_scope(None, \"weight_tensor\", tuple(six.itervalues(features))):\n weight_tensor = math_ops.to_float(features[weight_column_name])\n shape = weight_tensor.get_shape()\n rank = shape.ndims\n # We don't bother with expanding dims of non-staticly shaped tensors or\n # scalars, and >1d is already in a good format.\n if rank == 1:\n logging.warning(\"Weights {} has shape {}, expanding to make it 2d.\".\n format(weight_column_name, shape))\n return (\n sparse_ops.sparse_reshape(weight_tensor, (-1, 1))\n if isinstance(weight_tensor, sparse_tensor.SparseTensor) else\n array_ops.reshape(weight_tensor, (-1, 1)))\n return weight_tensor\n\n\n# TODO (zakaria): This function is needed for backward compatibility and should id:1201 gh:1202\n# be removed when we migrate to core.\ndef _compute_weighted_loss(loss_unweighted, weight, name=\"loss\"):\n \"\"\"Returns a tuple of (loss_train, loss_report).\n\n loss is used for gradient descent while weighted_average_loss is used for\n summaries to be backward compatible.\n\n loss is different from the loss reported on the tensorboard as we\n should respect the example weights when computing the gradient.\n\n L = sum_{i} w_{i} * l_{i} / B\n\n where B is the number of examples in the batch, l_{i}, w_{i} are individual\n losses, and example weight.\n\n Args:\n loss_unweighted: Unweighted loss\n weight: Weight tensor\n name: Optional name\n\n Returns:\n A tuple of losses. First one for training and the second one for reporting.\n \"\"\"\n with ops.name_scope(name, values=(loss_unweighted, weight)) as name_scope:\n if weight is None:\n loss = math_ops.reduce_mean(loss_unweighted, name=name_scope)\n return loss, loss\n weight = weights_broadcast_ops.broadcast_weights(weight, loss_unweighted)\n with ops.name_scope(None, \"weighted_loss\",\n (loss_unweighted, weight)) as name:\n weighted_loss = math_ops.multiply(loss_unweighted, weight, name=name)\n weighted_loss_mean = math_ops.reduce_mean(weighted_loss, name=name_scope)\n weighted_loss_normalized = math_ops.div(\n math_ops.reduce_sum(weighted_loss),\n math_ops.to_float(math_ops.reduce_sum(weight)),\n name=\"weighted_average_loss\")\n\n return weighted_loss_mean, weighted_loss_normalized\n\n\ndef _wrap_custom_loss_fn(loss_fn):\n def _wrapper(labels, logits, weights=None):\n if weights is None:\n loss = loss_fn(labels, logits)\n else:\n loss = loss_fn(labels, logits, weights)\n return loss, loss\n return _wrapper\n\n\ndef _check_mode_valid(mode):\n \"\"\"Raises ValueError if the given mode is invalid.\"\"\"\n if (mode != model_fn.ModeKeys.TRAIN and mode != model_fn.ModeKeys.INFER and\n mode != model_fn.ModeKeys.EVAL):\n raise ValueError(\"mode=%s unrecognized.\" % str(mode))\n\n\ndef _get_arguments(func):\n \"\"\"Returns a spec of given func.\"\"\"\n _, func = tf_decorator.unwrap(func)\n if hasattr(func, \"__code__\"):\n # Regular function.\n return tf_inspect.getargspec(func)\n elif hasattr(func, \"__call__\"):\n # Callable object.\n return _get_arguments(func.__call__)\n elif hasattr(func, \"func\"):\n # Partial function.\n return _get_arguments(func.func)\n\n\ndef _verify_loss_fn_args(loss_fn):\n args = _get_arguments(loss_fn).args\n for arg_name in [\"labels\", \"logits\", \"weights\"]:\n if arg_name not in args:\n raise ValueError(\"Argument %s not found in loss_fn.\" % arg_name)\n\n\ndef _centered_bias(logits_dimension, head_name=None):\n \"\"\"Returns centered_bias `Variable`.\n\n Args:\n logits_dimension: Last dimension of `logits`. Must be >= 1.\n head_name: Optional name of the head.\n\n Returns:\n `Variable` with shape `[logits_dimension]`.\n\n Raises:\n ValueError: if `logits_dimension` is invalid.\n \"\"\"\n if (logits_dimension is None) or (logits_dimension < 1):\n raise ValueError(\"Invalid logits_dimension %s.\" % logits_dimension)\n # Do not create a variable with variable_scope.get_variable, because that may\n # create a PartitionedVariable, which does not support indexing, so\n # summary.scalar will not work.\n centered_bias = variable_scope.variable(\n name=\"centered_bias_weight\",\n initial_value=array_ops.zeros(shape=(logits_dimension,)),\n trainable=True)\n for dim in range(logits_dimension):\n if head_name:\n summary.scalar(\"centered_bias/bias_%d/%s\" % (dim, head_name),\n centered_bias[dim])\n else:\n summary.scalar(\"centered_bias/bias_%d\" % dim, centered_bias[dim])\n return centered_bias\n\n\ndef _centered_bias_step(centered_bias, batch_size, labels, loss_fn, weights):\n \"\"\"Creates and returns training op for centered bias.\"\"\"\n with ops.name_scope(None, \"centered_bias_step\", (labels,)) as name:\n logits_dimension = array_ops.shape(centered_bias)[0]\n logits = array_ops.reshape(\n array_ops.tile(centered_bias, (batch_size,)),\n (batch_size, logits_dimension))\n with ops.name_scope(None, \"centered_bias\", (labels, logits)):\n centered_bias_loss = math_ops.reduce_mean(\n loss_fn(labels, logits, weights), name=\"training_loss\")\n # Learn central bias by an optimizer. 0.1 is a convervative lr for a\n # single variable.\n return training.AdagradOptimizer(0.1).minimize(\n centered_bias_loss, var_list=(centered_bias,), name=name)\n\n\ndef _summary_key(head_name, val):\n return \"%s/%s\" % (val, head_name) if head_name else val\n\n\ndef _train_op(loss, labels, train_op_fn, centered_bias, batch_size, loss_fn,\n weights):\n \"\"\"Returns op for the training step.\"\"\"\n if centered_bias is not None:\n centered_bias_step = _centered_bias_step(\n centered_bias=centered_bias,\n batch_size=batch_size,\n labels=labels,\n loss_fn=loss_fn,\n weights=weights)\n else:\n centered_bias_step = None\n with ops.name_scope(None, \"train_op\", (loss, labels)):\n train_op = train_op_fn(loss)\n if centered_bias_step is not None:\n train_op = control_flow_ops.group(train_op, centered_bias_step)\n return train_op\n\n\ndef _sigmoid_cross_entropy_loss(labels, logits, weights=None):\n with ops.name_scope(None, \"sigmoid_cross_entropy_loss\",\n (logits, labels)) as name:\n # sigmoid_cross_entropy_with_logits requires [batch_size, n_classes] labels.\n loss = nn.sigmoid_cross_entropy_with_logits(\n labels=math_ops.to_float(labels), logits=logits, name=name)\n return _compute_weighted_loss(loss, weights)\n\n\ndef _float_weights_or_none(weights):\n if weights is None:\n return None\n with ops.name_scope(None, \"float_weights\", (weights,)) as name:\n return math_ops.to_float(weights, name=name)\n\n\ndef _indicator_labels_streaming_mean(labels, weights=None, class_id=None):\n labels = math_ops.to_float(labels)\n weights = _float_weights_or_none(weights)\n if weights is not None:\n weights = weights_broadcast_ops.broadcast_weights(weights, labels)\n if class_id is not None:\n if weights is not None:\n weights = weights[:, class_id]\n labels = labels[:, class_id]\n return metrics_lib.mean(labels, weights)\n\n\ndef _predictions_streaming_mean(predictions,\n weights=None,\n class_id=None):\n predictions = math_ops.to_float(predictions)\n weights = _float_weights_or_none(weights)\n if weights is not None:\n weights = weights_broadcast_ops.broadcast_weights(weights, predictions)\n if class_id is not None:\n if weights is not None:\n weights = weights[:, class_id]\n predictions = predictions[:, class_id]\n return metrics_lib.mean(predictions, weights)\n\n\n# TODO (ptucker): Add support for SparseTensor labels. id:1027 gh:1028\ndef _class_id_labels_to_indicator(labels, num_classes):\n if (num_classes is None) or (num_classes < 2):\n raise ValueError(\"Invalid num_classes %s.\" % num_classes)\n with ops.control_dependencies((_assert_labels_rank(labels),)):\n labels = array_ops.reshape(labels, (-1,))\n return array_ops.one_hot(labels, depth=num_classes, axis=-1)\n\n\ndef _class_predictions_streaming_mean(predictions, weights, class_id):\n return metrics_lib.mean(\n array_ops.where(\n math_ops.equal(\n math_ops.to_int32(class_id), math_ops.to_int32(predictions)),\n array_ops.ones_like(predictions),\n array_ops.zeros_like(predictions)),\n weights=weights)\n\n\ndef _class_labels_streaming_mean(labels, weights, class_id):\n return metrics_lib.mean(\n array_ops.where(\n math_ops.equal(\n math_ops.to_int32(class_id), math_ops.to_int32(labels)),\n array_ops.ones_like(labels), array_ops.zeros_like(labels)),\n weights=weights)\n\n\ndef _streaming_auc(predictions, labels, weights=None, class_id=None,\n curve=\"ROC\"):\n # pylint: disable=missing-docstring\n predictions = math_ops.to_float(predictions)\n if labels.dtype.base_dtype != dtypes.bool:\n logging.warning(\"Casting %s labels to bool.\", labels.dtype)\n labels = math_ops.cast(labels, dtypes.bool)\n weights = _float_weights_or_none(weights)\n if weights is not None:\n weights = weights_broadcast_ops.broadcast_weights(weights, predictions)\n if class_id is not None:\n if weights is not None:\n weights = weights[:, class_id]\n predictions = predictions[:, class_id]\n labels = labels[:, class_id]\n return metrics_lib.auc(labels, predictions, weights, curve=curve)\n\n\ndef _assert_class_id(class_id, num_classes=None):\n \"\"\"Average label value for class `class_id`.\"\"\"\n if (class_id is None) or (class_id < 0):\n raise ValueError(\"Invalid class_id %s.\" % class_id)\n if num_classes is not None:\n if num_classes < 2:\n raise ValueError(\"Invalid num_classes %s.\" % num_classes)\n if class_id >= num_classes:\n raise ValueError(\"Invalid class_id %s.\" % class_id)\n\n\ndef _streaming_accuracy_at_threshold(predictions, labels, weights, threshold):\n threshold_predictions = math_ops.to_float(\n math_ops.greater_equal(predictions, threshold))\n return metrics_lib.accuracy(labels, threshold_predictions, weights)\n\n\ndef _streaming_precision_at_threshold(predictions, labels, weights, threshold):\n precision_tensor, update_op = metrics_lib.precision_at_thresholds(\n labels, predictions, (threshold,), _float_weights_or_none(weights))\n return array_ops.squeeze(precision_tensor), array_ops.squeeze(update_op)\n\n\ndef _streaming_recall_at_threshold(predictions, labels, weights, threshold):\n precision_tensor, update_op = metrics_lib.recall_at_thresholds(\n labels, predictions, (threshold,), _float_weights_or_none(weights))\n return array_ops.squeeze(precision_tensor), array_ops.squeeze(update_op)\n\n\ndef _classification_output_alternatives(head_name, problem_type,\n label_keys=None):\n \"\"\"Creates a func to generate output alternatives for classification.\n\n Servo expects classes to be a string tensor, and have the same dimensions\n as the probabilities tensor. It should contain the labels of the corresponding\n entries in probabilities. This function creates a new classes tensor that\n satisfies these conditions and can be exported.\n\n Args:\n head_name: Name of the head.\n problem_type: `ProblemType`\n label_keys: Optional label keys\n\n Returns:\n A function to generate output alternatives.\n \"\"\"\n def _create_output_alternatives(predictions):\n \"\"\"Creates output alternative for the Head.\n\n Args:\n predictions: a dict of {tensor_name: Tensor}, where 'tensor_name' is a\n symbolic name for an output Tensor possibly but not necessarily taken\n from `PredictionKey`, and 'Tensor' is the corresponding output Tensor\n itself.\n\n Returns:\n `dict` of {submodel_name: (problem_type, {tensor_name: Tensor})}, where\n 'submodel_name' is a submodel identifier that should be consistent across\n the pipeline (here likely taken from the head_name),\n 'problem_type' is a `ProblemType`,\n 'tensor_name' is a symbolic name for an output Tensor possibly but not\n necessarily taken from `PredictionKey`, and\n 'Tensor' is the corresponding output Tensor itself.\n\n Raises:\n ValueError: if predictions does not have PredictionKey.PROBABILITIES key.\n \"\"\"\n probabilities = predictions.get(prediction_key.PredictionKey.PROBABILITIES)\n if probabilities is None:\n raise ValueError(\"%s missing in predictions\" %\n prediction_key.PredictionKey.PROBABILITIES)\n\n with ops.name_scope(None, \"_classification_output_alternatives\",\n (probabilities,)):\n batch_size = array_ops.shape(probabilities)[0]\n if label_keys:\n classes = array_ops.tile(\n input=array_ops.expand_dims(input=label_keys, axis=0),\n multiples=[batch_size, 1],\n name=\"classes_tensor\")\n else:\n n = array_ops.shape(probabilities)[1]\n classes = array_ops.tile(\n input=array_ops.expand_dims(input=math_ops.range(n), axis=0),\n multiples=[batch_size, 1])\n classes = string_ops.as_string(classes, name=\"classes_tensor\")\n\n exported_predictions = {\n prediction_key.PredictionKey.PROBABILITIES: probabilities,\n prediction_key.PredictionKey.CLASSES: classes}\n return {head_name: (problem_type, exported_predictions)}\n\n return _create_output_alternatives\n\n# Aliases\n# TODO (zakaria): Remove these aliases, See b/34751732 id:776 gh:777\n_regression_head = regression_head\n_poisson_regression_head = poisson_regression_head\n_multi_class_head = multi_class_head\n_binary_svm_head = binary_svm_head\n_multi_label_head = multi_label_head\n_multi_head = multi_head\n_Head = Head\n",
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Batching dataset transformations.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.python.data.ops import dataset_ops\nfrom tensorflow.python.data.util import nest\nfrom tensorflow.python.data.util import sparse\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import sparse_tensor\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.framework import tensor_util\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import gen_dataset_ops\nfrom tensorflow.python.ops import math_ops\n\n\ndef dense_to_sparse_batch(batch_size, row_shape):\n \"\"\"A transformation that batches ragged elements into `tf.SparseTensor`s.\n\n Like `Dataset.padded_batch()`, this transformation combines multiple\n consecutive elements of the dataset, which might have different\n shapes, into a single element. The resulting element has three\n components (`indices`, `values`, and `dense_shape`), which\n comprise a `tf.SparseTensor` that represents the same data. The\n `row_shape` represents the dense shape of each row in the\n resulting `tf.SparseTensor`, to which the effective batch size is\n prepended. For example:\n\n ```python\n # NOTE: The following examples use `{ ... }` to represent the id:515 gh:516\n # contents of a dataset.\n a = { ['a', 'b', 'c'], ['a', 'b'], ['a', 'b', 'c', 'd'] }\n\n a.apply(tf.contrib.data.dense_to_sparse_batch(batch_size=2, row_shape=[6])) ==\n {\n ([[0, 0], [0, 1], [0, 2], [1, 0], [1, 1]], # indices\n ['a', 'b', 'c', 'a', 'b'], # values\n [2, 6]), # dense_shape\n ([[0, 0], [0, 1], [0, 2], [0, 3]],\n ['a', 'b', 'c', 'd'],\n [1, 6])\n }\n ```\n\n Args:\n batch_size: A `tf.int64` scalar `tf.Tensor`, representing the\n number of consecutive elements of this dataset to combine in a\n single batch.\n row_shape: A `tf.TensorShape` or `tf.int64` vector tensor-like\n object representing the equivalent dense shape of a row in the\n resulting `tf.SparseTensor`. Each element of this dataset must\n have the same rank as `row_shape`, and must have size less\n than or equal to `row_shape` in each dimension.\n\n Returns:\n A `Dataset` transformation function, which can be passed to\n @{tf.data.Dataset.apply}.\n \"\"\"\n\n def _apply_fn(dataset):\n return DenseToSparseBatchDataset(dataset, batch_size, row_shape)\n\n return _apply_fn\n\n\ndef unbatch():\n \"\"\"A Transformation which splits the elements of a dataset.\n\n For example, if elements of the dataset are shaped `[B, a0, a1, ...]`,\n where `B` may vary from element to element, then for each element in\n the dataset, the unbatched dataset will contain `B` consecutive elements\n of shape `[a0, a1, ...]`.\n\n Returns:\n A `Dataset` transformation function, which can be passed to\n @{tf.data.Dataset.apply}.\n \"\"\"\n\n def _apply_fn(dataset):\n\n def unbatch_map(arg, *rest):\n if rest:\n return dataset_ops.Dataset.from_tensor_slices((arg,) + rest)\n else:\n return dataset_ops.Dataset.from_tensor_slices(arg)\n\n return dataset.flat_map(map_func=unbatch_map)\n\n return _apply_fn\n\n\ndef filter_irregular_batches(batch_size):\n \"\"\"Transformation that filters out batches that are not of size batch_size.\"\"\"\n\n def _apply_fn(dataset):\n \"\"\"Function from `Dataset` to `Dataset` that applies the transformation.\"\"\"\n tensor_batch_size = ops.convert_to_tensor(\n batch_size, dtype=dtypes.int64, name=\"batch_size\")\n\n flattened = _RestructuredDataset(\n dataset,\n tuple(nest.flatten(dataset.output_types)),\n output_classes=tuple(nest.flatten(dataset.output_classes)))\n\n def _predicate(*xs):\n \"\"\"Return `True` if this element is a full batch.\"\"\"\n # Extract the dynamic batch size from the first component of the flattened\n # batched element.\n first_component = xs[0]\n first_component_batch_size = array_ops.shape(\n first_component, out_type=dtypes.int64)[0]\n\n return math_ops.equal(first_component_batch_size, tensor_batch_size)\n\n filtered = flattened.filter(_predicate)\n\n maybe_constant_batch_size = tensor_util.constant_value(tensor_batch_size)\n\n def _set_first_dimension(shape):\n return shape.merge_with(\n tensor_shape.vector(maybe_constant_batch_size).concatenate(shape[1:]))\n\n known_shapes = nest.map_structure(_set_first_dimension,\n dataset.output_shapes)\n return _RestructuredDataset(\n filtered,\n dataset.output_types,\n known_shapes,\n output_classes=dataset.output_classes)\n\n return _apply_fn\n\n\ndef batch_and_drop_remainder(batch_size):\n \"\"\"A batching transformation that omits the final small batch (if present).\n\n Like @{tf.data.Dataset.batch}, this transformation combines\n consecutive elements of this dataset into batches. However, if the batch\n size does not evenly divide the input dataset size, this transformation will\n drop the final smaller element.\n\n The following example illustrates the difference between this\n transformation and `Dataset.batch()`:\n\n ```python\n dataset = tf.data.Dataset.range(200)\n batched = dataset.apply(tf.contrib.data.batch_and_drop_remainder(128))\n print(batched.output_shapes) # ==> \"(128,)\" (the batch dimension is known)\n ```\n\n By contrast, `dataset.batch(128)` would yield a two-element dataset with\n shapes `(128,)` and `(72,)`, so the batch dimension would not be statically\n known.\n\n Args:\n batch_size: A `tf.int64` scalar `tf.Tensor`, representing the number of\n consecutive elements of this dataset to combine in a single batch.\n\n Returns:\n A `Dataset` transformation function, which can be passed to\n @{tf.data.Dataset.apply}\n \"\"\"\n\n def _apply_fn(dataset):\n \"\"\"Function from `Dataset` to `Dataset` that applies the transformation.\"\"\"\n batched = dataset.batch(batch_size)\n return filter_irregular_batches(batch_size)(batched)\n\n return _apply_fn\n\n\ndef padded_batch_and_drop_remainder(batch_size,\n padded_shapes,\n padding_values=None):\n \"\"\"A batching and padding transformation that omits the final small batch.\n\n Like @{tf.data.Dataset.padded_batch}, this transformation combines\n consecutive elements of this dataset into batches. However, if the batch\n size does not evenly divide the input dataset size, this transformation will\n drop the final smaller element.\n\n See `@{tf.contrib.data.batch_and_drop_remainder}` for more details.\n\n Args:\n batch_size: A `tf.int64` scalar `tf.Tensor`, representing the number of\n consecutive elements of this dataset to combine in a single batch.\n padded_shapes: A nested structure of `tf.TensorShape` or\n `tf.int64` vector tensor-like objects. See\n @{tf.data.Dataset.padded_batch} for details.\n padding_values: (Optional.) A nested structure of scalar-shaped\n `tf.Tensor`. See @{tf.data.Dataset.padded_batch} for details.\n\n Returns:\n A `Dataset` transformation function, which can be passed to\n @{tf.data.Dataset.apply}\n \"\"\"\n\n def _apply_fn(dataset):\n \"\"\"Function from `Dataset` to `Dataset` that applies the transformation.\"\"\"\n batched = dataset.padded_batch(\n batch_size, padded_shapes=padded_shapes, padding_values=padding_values)\n return filter_irregular_batches(batch_size)(batched)\n\n return _apply_fn\n\n\nclass DenseToSparseBatchDataset(dataset_ops.Dataset):\n \"\"\"A `Dataset` that batches ragged dense elements into `tf.SparseTensor`s.\"\"\"\n\n def __init__(self, input_dataset, batch_size, row_shape):\n \"\"\"See `Dataset.dense_to_sparse_batch()` for more details.\"\"\"\n super(DenseToSparseBatchDataset, self).__init__()\n if not isinstance(input_dataset.output_types, dtypes.DType):\n raise TypeError(\"DenseToSparseDataset requires an input whose elements \"\n \"have a single component, whereas the input has %r.\" %\n input_dataset.output_types)\n self._input_dataset = input_dataset\n self._batch_size = batch_size\n self._row_shape = row_shape\n\n def _as_variant_tensor(self):\n return gen_dataset_ops.dense_to_sparse_batch_dataset(\n self._input_dataset._as_variant_tensor(), # pylint: disable=protected-access\n self._batch_size,\n row_shape=dataset_ops._partial_shape_to_tensor(self._row_shape), # pylint: disable=protected-access\n output_shapes=nest.flatten(\n sparse.as_dense_shapes(self.output_shapes, self.output_classes)),\n output_types=nest.flatten(\n sparse.as_dense_types(self.output_types, self.output_classes)))\n\n @property\n def output_classes(self):\n return sparse_tensor.SparseTensor\n\n @property\n def output_shapes(self):\n return tensor_shape.vector(None).concatenate(self._row_shape)\n\n @property\n def output_types(self):\n return self._input_dataset.output_types\n\n\nclass _RestructuredDataset(dataset_ops.Dataset):\n \"\"\"An internal helper for changing the structure and shape of a dataset.\"\"\"\n\n def __init__(self,\n dataset,\n output_types,\n output_shapes=None,\n output_classes=None):\n \"\"\"Creates a new dataset with the given output types and shapes.\n\n The given `dataset` must have a structure that is convertible:\n * `dataset.output_types` must be the same as `output_types` module nesting.\n * Each shape in `dataset.output_shapes` must be compatible with each shape\n in `output_shapes` (if given).\n\n Note: This helper permits \"unsafe casts\" for shapes, equivalent to using\n `tf.Tensor.set_shape()` where domain-specific knowledge is available.\n\n Args:\n dataset: A `Dataset` object.\n output_types: A nested structure of `tf.DType` objects.\n output_shapes: (Optional.) A nested structure of `tf.TensorShape` objects.\n If omitted, the shapes will be inherited from `dataset`.\n output_classes: (Optional.) A nested structure of class types.\n If omitted, the class types will be inherited from `dataset`.\n\n Raises:\n ValueError: If either `output_types` or `output_shapes` is not compatible\n with the structure of `dataset`.\n \"\"\"\n super(_RestructuredDataset, self).__init__()\n self._dataset = dataset\n\n # Validate that the types are compatible.\n output_types = nest.map_structure(dtypes.as_dtype, output_types)\n flat_original_types = nest.flatten(dataset.output_types)\n flat_new_types = nest.flatten(output_types)\n if flat_original_types != flat_new_types:\n raise ValueError(\n \"Dataset with output types %r cannot be restructured to have output \"\n \"types %r\" % (dataset.output_types, output_types))\n\n self._output_types = output_types\n\n if output_shapes is None:\n # Inherit shapes from the original `dataset`.\n self._output_shapes = nest.pack_sequence_as(output_types,\n nest.flatten(\n dataset.output_shapes))\n else:\n # Validate that the shapes are compatible.\n nest.assert_same_structure(output_types, output_shapes)\n flat_original_shapes = nest.flatten(dataset.output_shapes)\n flat_new_shapes = nest.flatten_up_to(output_types, output_shapes)\n\n for original_shape, new_shape in zip(flat_original_shapes,\n flat_new_shapes):\n if not original_shape.is_compatible_with(new_shape):\n raise ValueError(\n \"Dataset with output shapes %r cannot be restructured to have \"\n \"incompatible output shapes %r\" % (dataset.output_shapes,\n output_shapes))\n self._output_shapes = nest.map_structure_up_to(\n output_types, tensor_shape.as_shape, output_shapes)\n if output_classes is None:\n # Inherit class types from the original `dataset`.\n self._output_classes = nest.pack_sequence_as(output_types,\n nest.flatten(\n dataset.output_classes))\n else:\n self._output_classes = output_classes\n\n def _as_variant_tensor(self):\n return self._dataset._as_variant_tensor() # pylint: disable=protected-access\n\n @property\n def output_classes(self):\n return self._output_classes\n\n @property\n def output_types(self):\n return self._output_types\n\n @property\n def output_shapes(self):\n return self._output_shapes\n\n\nclass _MapAndBatchDataset(dataset_ops.MapDataset):\n \"\"\"A `Dataset` that maps a function over a batch of elements.\"\"\"\n\n def __init__(self, input_dataset, map_func, batch_size, num_parallel_batches):\n \"\"\"See `Dataset.map()` for details.\"\"\"\n super(_MapAndBatchDataset, self).__init__(input_dataset, map_func)\n self._batch_size = ops.convert_to_tensor(\n batch_size, dtype=dtypes.int64, name=\"batch_size\")\n self._num_parallel_batches = ops.convert_to_tensor(\n num_parallel_batches, dtype=dtypes.int64, name=\"num_parallel_batches\")\n\n def _as_variant_tensor(self):\n # pylint: disable=protected-access\n input_resource = self._input_dataset._as_variant_tensor()\n return gen_dataset_ops.map_and_batch_dataset(\n input_resource,\n self._map_func.captured_inputs,\n f=self._map_func,\n batch_size=self._batch_size,\n num_parallel_batches=self._num_parallel_batches,\n output_types=nest.flatten(\n sparse.as_dense_types(self.output_types, self.output_classes)),\n output_shapes=nest.flatten(\n sparse.as_dense_shapes(self.output_shapes, self.output_classes)))\n # pylint: enable=protected-access\n\n @property\n def output_shapes(self):\n return nest.pack_sequence_as(self._output_shapes, [\n tensor_shape.vector(tensor_util.constant_value(\n self._batch_size)).concatenate(s)\n for s in nest.flatten(self._output_shapes)\n ])\n\n @property\n def output_types(self):\n return self._output_types\n\n\ndef map_and_batch(map_func, batch_size, num_parallel_batches=1):\n \"\"\"Fused implementation of `map` and `batch`.\n\n Maps `map_func` across `batch_size` consecutive elements of this dataset\n and then combines them into a batch. Functionally, it is equivalent to `map`\n followed by `batch`. However, by fusing the two transformations together, the\n implementation can be more efficient. Surfacing this transformation in the API\n is temporary. Once automatic input pipeline optimization is implemented,\n the fusing of `map` and `batch` will happen automatically and this API will be\n deprecated.\n\n Args:\n map_func: A function mapping a nested structure of tensors to another\n nested structure of tensors.\n batch_size: A `tf.int64` scalar `tf.Tensor`, representing the number of\n consecutive elements of this dataset to combine in a single batch.\n num_parallel_batches: A `tf.int64` scalar `tf.Tensor`, representing the\n number of batches to create in parallel. On one hand, higher values can\n help mitigate the effect of stragglers. On the other hand, higher values\n can increasing contention if CPU is scarce.\n\n Returns:\n A `Dataset` transformation function, which can be passed to\n @{tf.contrib.data.Dataset.apply}.\n \"\"\"\n\n def _apply_fn(dataset):\n return _MapAndBatchDataset(dataset, map_func, batch_size,\n num_parallel_batches)\n\n return _apply_fn\n",
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Python wrapper for prefetching_ops.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.contrib.data.python.ops import gen_prefetching_ops\nfrom tensorflow.contrib.util import loader\nfrom tensorflow.python.platform import resource_loader\n\n_prefetching_ops = loader.load_op_library(\n resource_loader.get_path_to_datafile(\"../../_prefetching_ops.so\"))\n\n\n# TODO (rohanj): Add a python class that constructs resource in the __init__ id:535 gh:536\n# method and provides a get_next() that calls the prefetch op.\ndef function_buffering_resource(string_arg,\n target_device,\n shared_name,\n f,\n buffer_size,\n thread_pool_size=1,\n container=\"\",\n name=None):\n return gen_prefetching_ops.function_buffering_resource(\n string_arg=string_arg,\n target_device=target_device,\n shared_name=shared_name,\n f=f,\n buffer_size=buffer_size,\n thread_pool_size=thread_pool_size,\n container=container,\n name=name)\n\n\ndef function_buffering_resource_get_next(function_buffer_resource,\n output_types,\n name=None):\n return gen_prefetching_ops.function_buffering_resource_get_next(\n function_buffer_resource=function_buffer_resource,\n output_types=output_types,\n name=name)\n"
] | [
[
"tensorflow.python.ops.array_ops.slice",
"tensorflow.python.ops.gradients_impl.gradients",
"numpy.asarray",
"tensorflow.python.ops.array_ops.placeholder",
"tensorflow.python.ops.array_ops.zeros",
"tensorflow.python.platform.test.main",
"numpy.random.rand",
"numpy.prod",
"tensorflow.python.framework.constant_op.constant",
"numpy.zeros",
"numpy.random.randint"
],
[
"tensorflow.python.ops.array_ops.constant",
"tensorflow.contrib.tpu.python.tpu.tpu_function.check_function_argument_count",
"tensorflow.python.ops.control_flow_ops.while_loop",
"tensorflow.contrib.tpu.python.tpu.tpu_function.get_tpu_context",
"tensorflow.python.framework.ops.convert_to_tensor",
"tensorflow.python.ops.control_flow_ops.tuple"
],
[
"tensorflow.python.framework.tensor_shape.scalar",
"tensorflow.python.framework.tensor_shape.TensorShape",
"tensorflow.python.ops.array_ops.shape",
"tensorflow.contrib.layers.python.layers.feature_column_ops.transform_features",
"tensorflow.contrib.boosted_trees.python.ops.training_ops.tree_ensemble_stats",
"tensorflow.python.ops.array_ops.split",
"tensorflow.python.summary.summary.scalar",
"tensorflow.python.ops.variables.Variable",
"tensorflow.python.ops.array_ops.squeeze",
"tensorflow.python.ops.array_ops.zeros",
"tensorflow.python.ops.math_ops.not_equal",
"tensorflow.python.framework.ops.device",
"tensorflow.contrib.boosted_trees.python.ops.training_ops.grow_tree_ensemble",
"tensorflow.python.ops.control_flow_ops.no_op",
"tensorflow.python.ops.gradients_impl.gradients",
"tensorflow.python.ops.control_flow_ops.cond",
"tensorflow.python.ops.math_ops.to_int32",
"tensorflow.python.ops.array_ops.transpose",
"tensorflow.python.training.device_setter.replica_device_setter",
"tensorflow.contrib.boosted_trees.python.ops.prediction_ops.gradient_trees_prediction",
"tensorflow.python.ops.array_ops.unstack",
"tensorflow.contrib.boosted_trees.python.ops.gen_model_ops.decision_tree_ensemble_resource_handle_op",
"tensorflow.python.ops.array_ops.gather",
"tensorflow.python.framework.sparse_tensor.SparseTensor",
"tensorflow.python.ops.variable_scope.variable_scope",
"tensorflow.python.feature_column.feature_column.input_layer",
"tensorflow.python.framework.ops.control_dependencies",
"tensorflow.contrib.boosted_trees.python.ops.model_ops.tree_ensemble_deserialize",
"tensorflow.contrib.boosted_trees.python.ops.training_ops.center_tree_ensemble_bias",
"tensorflow.python.ops.math_ops.cast",
"tensorflow.python.ops.array_ops.zeros_like",
"tensorflow.contrib.boosted_trees.lib.learner.batch.ordinal_split_handler.DenseSplitHandler",
"tensorflow.contrib.boosted_trees.python.ops.prediction_ops.gradient_trees_partition_examples",
"tensorflow.contrib.boosted_trees.python.ops.batch_ops_utils.run_handler_scheduled_ops",
"tensorflow.contrib.boosted_trees.python.ops.gen_model_ops.create_tree_ensemble_variable",
"tensorflow.contrib.boosted_trees.python.ops.stats_accumulator_ops.StatsAccumulator",
"tensorflow.python.ops.array_ops.stack",
"tensorflow.python.ops.math_ops.range",
"tensorflow.contrib.boosted_trees.python.ops.model_ops.tree_ensemble_serialize",
"tensorflow.python.ops.array_ops.concat",
"tensorflow.python.ops.control_flow_ops.group",
"tensorflow.contrib.boosted_trees.python.ops.model_ops.tree_ensemble_stamp_token",
"tensorflow.python.ops.array_ops.reshape",
"tensorflow.python.framework.ops.name_scope",
"tensorflow.python.ops.math_ops.trace",
"tensorflow.python.ops.math_ops.reduce_sum",
"tensorflow.python.framework.constant_op.constant"
],
[
"tensorflow.python.ops.lookup_ops.index_to_string_table_from_tensor",
"tensorflow.python.ops.math_ops.greater_equal",
"tensorflow.contrib.learn.python.learn.estimators.model_fn.ModelFnOps",
"tensorflow.python.ops.array_ops.shape",
"tensorflow.python.ops.nn.softmax",
"tensorflow.python.summary.summary.scalar",
"tensorflow.python.training.training.AdagradOptimizer",
"tensorflow.python.ops.math_ops.greater",
"tensorflow.python.ops.array_ops.squeeze",
"tensorflow.python.ops.array_ops.zeros",
"tensorflow.python.ops.sparse_ops.sparse_to_indicator",
"tensorflow.python.ops.math_ops.to_float",
"tensorflow.python.ops.control_flow_ops.no_op",
"tensorflow.python.ops.array_ops.identity",
"tensorflow.python.ops.math_ops.to_int32",
"tensorflow.python.ops.losses.losses.hinge_loss",
"tensorflow.python.ops.array_ops.rank",
"tensorflow.contrib.layers.linear",
"tensorflow.python.ops.math_ops.argmax",
"tensorflow.python.ops.metrics.mean",
"tensorflow.python.ops.variable_scope.variable_scope",
"tensorflow.python.ops.weights_broadcast_ops.broadcast_weights",
"tensorflow.python.util.tf_decorator.unwrap",
"tensorflow.python.ops.nn.sparse_softmax_cross_entropy_with_logits",
"tensorflow.python.ops.metrics.auc",
"tensorflow.python.ops.math_ops.cast",
"tensorflow.python.ops.sparse_ops.sparse_reshape",
"tensorflow.python.platform.tf_logging.warning",
"tensorflow.python.ops.lookup_ops.index_table_from_tensor",
"tensorflow.python.ops.array_ops.tile",
"tensorflow.python.ops.array_ops.slice",
"tensorflow.python.ops.nn.sigmoid_cross_entropy_with_logits",
"tensorflow.python.ops.array_ops.zeros_like",
"tensorflow.python.ops.array_ops.one_hot",
"tensorflow.contrib.framework.convert_to_tensor_or_sparse_tensor",
"tensorflow.python.ops.math_ops.reduce_mean",
"tensorflow.python.framework.ops.convert_to_tensor",
"tensorflow.python.ops.nn.bias_add",
"tensorflow.python.ops.metrics.accuracy",
"tensorflow.python.ops.array_ops.ones_like",
"tensorflow.python.ops.nn.log_poisson_loss",
"tensorflow.python.ops.math_ops.sigmoid",
"tensorflow.python.ops.math_ops.range",
"tensorflow.python.ops.control_flow_ops.group",
"tensorflow.python.ops.math_ops.add_n",
"tensorflow.python.ops.string_ops.as_string",
"tensorflow.python.framework.ops.name_scope",
"tensorflow.python.ops.array_ops.reshape",
"tensorflow.python.ops.math_ops.multiply",
"tensorflow.python.util.tf_inspect.getargspec",
"tensorflow.python.ops.array_ops.expand_dims",
"tensorflow.python.ops.math_ops.reduce_sum"
],
[
"tensorflow.python.data.util.nest.map_structure_up_to",
"tensorflow.python.ops.array_ops.shape",
"tensorflow.python.data.ops.dataset_ops.Dataset.from_tensor_slices",
"tensorflow.python.data.util.sparse.as_dense_types",
"tensorflow.python.data.util.nest.assert_same_structure",
"tensorflow.python.ops.math_ops.equal",
"tensorflow.python.data.util.nest.flatten_up_to",
"tensorflow.python.framework.tensor_shape.vector",
"tensorflow.python.framework.tensor_util.constant_value",
"tensorflow.python.data.ops.dataset_ops._partial_shape_to_tensor",
"tensorflow.python.data.util.nest.map_structure",
"tensorflow.python.framework.ops.convert_to_tensor",
"tensorflow.python.data.util.sparse.as_dense_shapes",
"tensorflow.python.data.util.nest.flatten"
],
[
"tensorflow.python.platform.resource_loader.get_path_to_datafile",
"tensorflow.contrib.data.python.ops.gen_prefetching_ops.function_buffering_resource_get_next",
"tensorflow.contrib.data.python.ops.gen_prefetching_ops.function_buffering_resource"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.8",
"1.10",
"1.12",
"2.7",
"2.6",
"1.4",
"1.13",
"2.3",
"2.4",
"2.9",
"1.5",
"1.7",
"2.5",
"0.12",
"1.0",
"2.2",
"1.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.5",
"1.7"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.8",
"1.10",
"1.12",
"1.4",
"2.7",
"2.2",
"1.13",
"2.3",
"2.4",
"2.9",
"1.5",
"1.7",
"2.5",
"2.6",
"1.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.5",
"1.7",
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.5"
]
}
] |
EloyRD/ThesisExp | [
"dfb890708e95d23cc68ff79b0858630c12aa940d"
] | [
"scripts/EA_A_03_2LFact_Data.py"
] | [
"# -*- coding: utf-8 -*-\n# ---\n# jupyter:\n# jupytext:\n# formats: ipynb,scripts//py:percent\n# text_representation:\n# extension: .py\n# format_name: percent\n# format_version: '1.2'\n# jupytext_version: 1.1.6\n# kernelspec:\n# display_name: Python [conda env:thesis] *\n# language: python\n# name: conda-env-thesis-py\n# ---\n\n# %% [raw]\n# \\author{Eloy Ruiz-Donayre}\n# \\title{TESTCASE A - 2-Level 6-Factor Full Factorial (With 30 replicates) - Data Generation}\n# \\date{\\today}\n# \\maketitle\n\n# %% [raw]\n# \\tableofcontents\n\n# %% [markdown]\n# # Preliminaries\n\n# %% [markdown]\n# Importing python packages and setting display parameters\n\n# %%\nimport numpy as np\nimport pandas as pd\nimport itertools as it\nimport scipy.stats as stats\n\nimport seaborn as sns\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\n\nimport thesis_EAfunc as EAf\nimport thesis_visfunc as EAv\n\n# %%\nplt.style.use(\"bmh\")\n# %matplotlib inline\n# %config InlineBackend.figure_format = 'retina'\n\npd.set_option(\"display.latex.repr\", True)\npd.set_option(\"display.latex.longtable\", True)\n\n# %% [markdown] {\"toc-hr-collapsed\": false}\n# # Fitness Landscape Definition\n\n# %%\n# Problem domain\nx_min = -15\nx_max = 15\ny_min = -15\ny_max = 15\n\n# Known minimum\nx_point = -1\ny_point = -1\n\ndomain = (x_min, x_max, y_min, y_max)\npoint = (x_point, y_point)\nimg_size = (8.5, 4.25)\n\n# Problem definition\n\n\ndef f(x, y):\n D = 2\n alpha = 1 / 8\n\n x = (x - 5) / 6\n y = (y - 5) / 6\n\n a = np.abs(x ** 2 + y ** 2 - D) ** (alpha * D)\n b = (0.5 * (x ** 2 + y ** 2) + (x + y)) / D\n\n return a + b + 0.5\n\n\n# %%\n# Testing the minimum\nprint(f(-1, -1))\n\n# %%\n# Testing the function\nprint(f(-1.0, -1.0), f(-11.0, -9.0), f(11.0, 3.0), f(-6.0, 9.0))\n\n# %% [markdown] {\"toc-hr-collapsed\": false}\n# # Setting up the experiment\n# 64 Experiments\n# >L-> In each experiment, one set of parameters is used.\n# >>L-> 40 Replicates per experiment.\n# >>>L-> Each replicate is different due to randomness effects.\n\n# %%\n# starting seed\nnp.random.seed(42)\n\n# %% [markdown]\n# ## Initializing data storage\n\n# %%\nmult_fit_cols = (\n [\"exp\"]\n + [\"pop_s\"]\n + [\"b\"]\n + [\"mut_p\"]\n + [\"mut_s\"]\n + [\"p_sel\"]\n + [\"s_sel\"]\n + [\"run\", \"generation\", \"fitness_min\", \"fitness_max\", \"fitness_mean\", \"fitness_std\"]\n)\nmulti_fit = pd.DataFrame(columns=mult_fit_cols)\nmulti_fit = multi_fit.infer_objects()\n\n# %% [markdown] {\"toc-hr-collapsed\": false}\n# ## Parameter space for the experiment\n\n# %% [markdown]\n# ### Initializing\n\n# %%\n# Algorithm parameters\n# Number of replicates, and generations per experiment\nrep_n = 30\ngen_f = 200\n\n# Population size\npop_s = [10, 160]\n\n# Parent subpopulation's selection method and size\npar_selection = [\"uniform\", \"tournament_k3\"]\nb = [0.5, 5]\npar_s = [z * y for z in pop_s for y in b]\n\n# Progeny subpopulation's size\nprog_s = par_s\n\n# Crossover Method\ncrossover = \"uniform\"\n# Mutation method, probability and size\nmutation = \"random_all_gau_dis\"\nmut_p = [0.1, 0.9]\nmut_s = [0.5, 5]\n\n# New population selection method\nsur_selection = [\"fitness_proportional_selection\", \"uniform\"]\n\n# %% [markdown]\n# ### 2-Level Factors encoded values\n\n# %%\ninputs_labels = {\n \"pop_s\": \"Population size\",\n \"b\": \"Progeny-to-population ratio\",\n \"mut_p\": \"Mutation Probability\",\n \"mut_s\": \"Mutation size\",\n \"p_sel\": \"Parent selection\",\n \"s_sel\": \"Survivor selection method\",\n}\n\ndat = [\n (\"pop_s\", 10, 160, -1, 1, \"Numerical\"),\n (\"b\", 0.5, 5, -1, 1, \"Numerical\"),\n (\"mut_p\", 0.1, 0.9, -1, 1, \"Numerical (<1)\"),\n (\"mut_s\", 0.5, 5, -1, 1, \"Numerical\"),\n (\"p_sel\", \"uniform\", \"tournament k3\", -1, 1, \"Categorical\"),\n (\"s_sel\", \"fitness proportional\", \"uniform\", -1, 1, \"Categorical\"),\n]\n\ninputs_df = pd.DataFrame(\n dat,\n columns=[\n \"Factor\",\n \"Value_low\",\n \"Value_high\",\n \"encoded_low\",\n \"encoded_high\",\n \"Variable type\",\n ],\n)\ninputs_df = inputs_df.set_index([\"Factor\"])\ninputs_df[\"Label\"] = inputs_df.index.map(lambda z: inputs_labels[z])\ninputs_df = inputs_df[\n [\"Label\", \"Variable type\", \"Value_low\", \"Value_high\", \"encoded_low\", \"encoded_high\"]\n]\n\ninputs_df\n\n# %% [markdown]\n# ### Combining the 2-level Factors\n\n# %% [markdown]\n# We create a list with all the possible combinations of the 2-level factors\n\n# %%\nexp_par = list(it.product(pop_s, b, mut_p, mut_s, par_selection, sur_selection))\nprint('Cantidad de combinaciones de parametros en \"exp_par\" :' + str(len(exp_par)))\nprint()\nprint('Primera y última combinación de parametros en \"exp_par\":')\nprint(\"Secuencia (pop_s, b, mut_p, mut_s, p_sel, s_sel)\")\nprint(exp_par[0])\nprint(exp_par[63])\n\n# %% [markdown]\n# # Experiment execution\n\n# %%\n# %%time\nexp_n = 1\nfor (zz, yy, xx, vv, uu, tt) in exp_par:\n sur_selection = tt\n par_selection = uu\n mut_s = vv\n mut_p = xx\n b = yy\n pop_s = zz\n prog_s = int(b * pop_s)\n par_s = prog_s\n\n fitness_res = EAf.EA_exp_only_fitness(\n rep_n,\n gen_f,\n f,\n domain,\n pop_s,\n par_s,\n prog_s,\n mut_p,\n mut_s,\n par_selection,\n crossover,\n mutation,\n sur_selection,\n )\n\n fitness_res.insert(0, \"s_sel\", tt)\n fitness_res.insert(0, \"p_sel\", uu)\n fitness_res.insert(0, \"mut_s\", vv)\n fitness_res.insert(0, \"mut_p\", xx)\n fitness_res.insert(0, \"b\", yy)\n fitness_res.insert(0, \"pop_s\", zz)\n fitness_res.insert(0, \"exp\", exp_n)\n multi_fit = multi_fit.append(fitness_res, ignore_index=True, sort=False)\n multi_fit = multi_fit.infer_objects()\n\n exp_n += 1\n\n# %% [markdown]\n# ## Data storage\n\n# %% [markdown]\n# Writing the Data Frame to a pickle file\n\n# %%\nmulti_fit.to_pickle(\"./Data/TEST_A_2L_FitData.gz\", compression=\"gzip\")\n\n# %% [markdown]\n# Reading the Data Frame from a pickle file\n\n# %%\nmulti_fit = pd.read_pickle(\"./Data/TEST_A_2L_FitData.gz\", compression=\"gzip\")\n\n# %%\nmulti_fit.tail()\n\n# %% [markdown]\n# # Processing data for DOE Analysis\n\n# %% [markdown]\n# Storing the latest generation's population of each replicate\n\n# %%\nquery = multi_fit[\"generation\"] == gen_f\nmulti_final_fitness_res = multi_fit[query]\n\n# %% [markdown]\n# Reordering columns\n\n# %%\nmulti_final_fitness_res = multi_final_fitness_res.drop(\n [\"exp\", \"generation\", \"run\", \"seed\"], axis=1\n)\nmulti_final_fitness_res.columns = [\n \"pop_s\",\n \"b\",\n \"mut_p\",\n \"mut_s\",\n \"p_sel\",\n \"s_sel\",\n \"f_min\",\n \"f_max\",\n \"f_mean\",\n \"f_std\",\n]\nmulti_final_fitness_res = multi_final_fitness_res[\n [\n \"pop_s\",\n \"b\",\n \"mut_p\",\n \"mut_s\",\n \"p_sel\",\n \"s_sel\",\n \"f_min\",\n \"f_max\",\n \"f_mean\",\n \"f_std\",\n ]\n]\nmulti_final_fitness_res = multi_final_fitness_res.reset_index(drop=True)\n\n# %% [markdown]\n# Encoding values for DOE's Factors\n\n# %%\nmulti_final_fitness_res[\"pop_s\"] = (\n multi_final_fitness_res[\"pop_s\"].replace([10, 160], [-1, 1]).infer_objects()\n)\nmulti_final_fitness_res[\"b\"] = (\n multi_final_fitness_res[\"b\"].replace([0.5, 5], [-1, 1]).infer_objects()\n)\nmulti_final_fitness_res[\"mut_p\"] = (\n multi_final_fitness_res[\"mut_p\"].replace([0.1, 0.9], [-1, 1]).infer_objects()\n)\nmulti_final_fitness_res[\"mut_s\"] = (\n multi_final_fitness_res[\"mut_s\"].replace([0.5, 5], [-1, 1]).infer_objects()\n)\nmulti_final_fitness_res[\"p_sel\"] = (\n multi_final_fitness_res[\"p_sel\"]\n .replace([\"uniform\", \"tournament_k3\"], [-1, 1])\n .infer_objects()\n)\nmulti_final_fitness_res[\"s_sel\"] = (\n multi_final_fitness_res[\"s_sel\"]\n .replace([\"fitness_proportional_selection\", \"uniform\"], [-1, 1])\n .infer_objects()\n)\n\n# %% [markdown]\n# Exploring the Data Frame\n\n# %%\nmulti_final_fitness_res.head()\n\n# %%\nmulti_final_fitness_res.tail()\n\n# %% [markdown]\n# Storing the Factor Coding and DOE results Data Frames\n\n# %%\ninputs_df.to_pickle(\"./Data/TEST_A_DOE_code.gz\", compression=\"gzip\")\nmulti_final_fitness_res.to_pickle(\"./Data/TEST_A_DOE_data.gz\", compression=\"gzip\")\n\n# %%\n"
] | [
[
"numpy.abs",
"numpy.random.seed",
"pandas.DataFrame",
"pandas.set_option",
"pandas.read_pickle",
"matplotlib.pyplot.style.use"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
monofo/fairseq | [
"335a4cbd403543ece43e24b41abbe53fc54b5f36"
] | [
"fairseq_cli/train.py"
] | [
"#!/usr/bin/env python3 -u\n# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\"\"\"\nTrain a new model on one or across multiple GPUs.\n\"\"\"\n\nimport argparse\nimport logging\nimport math\nimport os\nimport sys\nfrom typing import Dict, Optional, Any, List, Tuple, Callable\n\nimport numpy as np\nimport torch\nfrom fairseq import (\n checkpoint_utils,\n options,\n quantization_utils,\n tasks,\n utils,\n)\nfrom fairseq.data import iterators\nfrom fairseq.data.plasma_utils import PlasmaStore\nfrom fairseq.dataclass.configs import FairseqConfig\nfrom fairseq.dataclass.utils import convert_namespace_to_omegaconf\nfrom fairseq.distributed import fsdp_enable_wrap, fsdp_wrap, utils as distributed_utils\nfrom fairseq.file_io import PathManager\nfrom fairseq.logging import meters, metrics, progress_bar\nfrom fairseq.model_parallel.megatron_trainer import MegatronTrainer\nfrom fairseq.trainer import Trainer\nfrom omegaconf import DictConfig, OmegaConf\n\n\nlogging.basicConfig(\n format=\"%(asctime)s | %(levelname)s | %(name)s | %(message)s\",\n datefmt=\"%Y-%m-%d %H:%M:%S\",\n level=os.environ.get(\"LOGLEVEL\", \"INFO\").upper(),\n stream=sys.stdout,\n)\nlogger = logging.getLogger(\"fairseq_cli.train\")\n\n\ndef main(cfg: FairseqConfig) -> None:\n if isinstance(cfg, argparse.Namespace):\n cfg = convert_namespace_to_omegaconf(cfg)\n\n utils.import_user_module(cfg.common)\n\n if distributed_utils.is_master(cfg.distributed_training) and \"job_logging_cfg\" in cfg:\n # make hydra logging work with ddp (see # see https://github.com/facebookresearch/hydra/issues/1126)\n logging.config.dictConfig(OmegaConf.to_container(cfg.job_logging_cfg))\n\n assert (\n cfg.dataset.max_tokens is not None or cfg.dataset.batch_size is not None\n ), \"Must specify batch size either with --max-tokens or --batch-size\"\n metrics.reset()\n\n np.random.seed(cfg.common.seed)\n utils.set_torch_seed(cfg.common.seed)\n\n if distributed_utils.is_master(cfg.distributed_training):\n checkpoint_utils.verify_checkpoint_directory(cfg.checkpoint.save_dir)\n\n # Print args\n logger.info(cfg)\n\n if cfg.checkpoint.write_checkpoints_asynchronously:\n try:\n import iopath # noqa: F401\n except ImportError:\n logging.exception(\n \"Asynchronous checkpoint writing is specified but iopath is \"\n \"not installed: `pip install iopath`\"\n )\n return\n\n # Setup task, e.g., translation, language modeling, etc.\n task = tasks.setup_task(cfg.task)\n\n assert cfg.criterion, \"Please specify criterion to train a model\"\n\n # Build model and criterion\n if cfg.distributed_training.ddp_backend == \"fully_sharded\":\n with fsdp_enable_wrap(cfg.distributed_training):\n model = fsdp_wrap(task.build_model(cfg.model))\n else:\n model = task.build_model(cfg.model)\n criterion = task.build_criterion(cfg.criterion)\n logger.info(model)\n logger.info(\"task: {}\".format(task.__class__.__name__))\n logger.info(\"model: {}\".format(model.__class__.__name__))\n logger.info(\"criterion: {}\".format(criterion.__class__.__name__))\n logger.info(\n \"num. shared model params: {:,} (num. trained: {:,})\".format(\n sum(p.numel() for p in model.parameters() if not getattr(p, \"expert\", False)),\n sum(p.numel() for p in model.parameters() if not getattr(p, \"expert\", False) and p.requires_grad)\n )\n )\n\n logger.info(\n \"num. expert model params: {} (num. trained: {})\".format(\n sum(p.numel() for p in model.parameters() if getattr(p, \"expert\", False)),\n sum(p.numel() for p in model.parameters() if getattr(p, \"expert\", False) and p.requires_grad),\n )\n )\n\n # Load valid dataset (we load training data below, based on the latest checkpoint)\n # We load the valid dataset AFTER building the model\n for valid_sub_split in cfg.dataset.valid_subset.split(\",\"):\n task.load_dataset(valid_sub_split, combine=False, epoch=1)\n\n # (optionally) Configure quantization\n if cfg.common.quantization_config_path is not None:\n quantizer = quantization_utils.Quantizer(\n config_path=cfg.common.quantization_config_path,\n max_epoch=cfg.optimization.max_epoch,\n max_update=cfg.optimization.max_update,\n )\n else:\n quantizer = None\n\n # Build trainer\n if cfg.common.model_parallel_size == 1:\n trainer = Trainer(cfg, task, model, criterion, quantizer)\n else:\n trainer = MegatronTrainer(cfg, task, model, criterion)\n logger.info(\n \"training on {} devices (GPUs/TPUs)\".format(\n cfg.distributed_training.distributed_world_size\n )\n )\n logger.info(\n \"max tokens per device = {} and max sentences per device = {}\".format(\n cfg.dataset.max_tokens,\n cfg.dataset.batch_size,\n )\n )\n\n # Load the latest checkpoint if one is available and restore the\n # corresponding train iterator\n extra_state, epoch_itr = checkpoint_utils.load_checkpoint(\n cfg.checkpoint,\n trainer,\n # don't cache epoch iterators for sharded datasets\n disable_iterator_cache=task.has_sharded_data(\"train\"),\n )\n if cfg.common.tpu:\n import torch_xla.core.xla_model as xm\n xm.rendezvous(\"load_checkpoint\") # wait for all workers\n\n max_epoch = cfg.optimization.max_epoch or math.inf\n lr = trainer.get_lr()\n train_meter = meters.StopwatchMeter()\n train_meter.start()\n while epoch_itr.next_epoch_idx <= max_epoch:\n if lr <= cfg.optimization.stop_min_lr:\n logger.info(\n f\"stopping training because current learning rate ({lr}) is smaller \"\n \"than or equal to minimum learning rate \"\n f\"(--stop-min-lr={cfg.optimization.stop_min_lr})\"\n )\n break\n\n # train for one epoch\n valid_losses, should_stop = train(cfg, trainer, task, epoch_itr)\n if should_stop:\n break\n\n # only use first validation loss to update the learning rate\n lr = trainer.lr_step(epoch_itr.epoch, valid_losses[0])\n\n epoch_itr = trainer.get_train_iterator(\n epoch_itr.next_epoch_idx,\n # sharded data: get train iterator for next epoch\n load_dataset=task.has_sharded_data(\"train\"),\n # don't cache epoch iterators for sharded datasets\n disable_iterator_cache=task.has_sharded_data(\"train\"),\n )\n train_meter.stop()\n logger.info(\"done training in {:.1f} seconds\".format(train_meter.sum))\n\n # ioPath implementation to wait for all asynchronous file writes to complete.\n if cfg.checkpoint.write_checkpoints_asynchronously:\n logger.info(\n \"ioPath PathManager waiting for all asynchronous checkpoint \"\n \"writes to finish.\"\n )\n PathManager.async_close()\n logger.info(\"ioPath PathManager finished waiting.\")\n\n\ndef should_stop_early(cfg: DictConfig, valid_loss: float) -> bool:\n # skip check if no validation was done in the current epoch\n if valid_loss is None:\n return False\n if cfg.checkpoint.patience <= 0:\n return False\n\n def is_better(a, b):\n return a > b if cfg.checkpoint.maximize_best_checkpoint_metric else a < b\n\n prev_best = getattr(should_stop_early, \"best\", None)\n if prev_best is None or is_better(valid_loss, prev_best):\n should_stop_early.best = valid_loss\n should_stop_early.num_runs = 0\n return False\n else:\n should_stop_early.num_runs += 1\n if should_stop_early.num_runs >= cfg.checkpoint.patience:\n logger.info(\n \"early stop since valid performance hasn't improved for last {} runs\".format(\n cfg.checkpoint.patience\n )\n )\n return True\n else:\n return False\n\n\[email protected](\"train\")\ndef train(\n cfg: DictConfig, trainer: Trainer, task: tasks.FairseqTask, epoch_itr\n) -> Tuple[List[Optional[float]], bool]:\n \"\"\"Train the model for one epoch and return validation losses.\"\"\"\n # Initialize data iterator\n itr = epoch_itr.next_epoch_itr(\n fix_batches_to_gpus=cfg.distributed_training.fix_batches_to_gpus,\n shuffle=(epoch_itr.next_epoch_idx > cfg.dataset.curriculum),\n )\n update_freq = (\n cfg.optimization.update_freq[epoch_itr.epoch - 1]\n if epoch_itr.epoch <= len(cfg.optimization.update_freq)\n else cfg.optimization.update_freq[-1]\n )\n itr = iterators.GroupedIterator(itr, update_freq)\n if cfg.common.tpu:\n itr = utils.tpu_data_loader(itr)\n progress = progress_bar.progress_bar(\n itr,\n log_format=cfg.common.log_format,\n log_interval=cfg.common.log_interval,\n epoch=epoch_itr.epoch,\n tensorboard_logdir=(\n cfg.common.tensorboard_logdir\n if distributed_utils.is_master(cfg.distributed_training)\n else None\n ),\n default_log_format=(\"tqdm\" if not cfg.common.no_progress_bar else \"simple\"),\n wandb_project=(\n cfg.common.wandb_project\n if distributed_utils.is_master(cfg.distributed_training)\n else None\n ),\n wandb_run_name=os.environ.get(\n \"WANDB_NAME\", os.path.basename(cfg.checkpoint.save_dir)\n ),\n azureml_logging=(\n cfg.common.azureml_logging\n if distributed_utils.is_master(cfg.distributed_training)\n else False\n ),\n )\n progress.update_config(_flatten_config(cfg))\n\n trainer.begin_epoch(epoch_itr.epoch)\n\n valid_subsets = cfg.dataset.valid_subset.split(\",\")\n should_stop = False\n num_updates = trainer.get_num_updates()\n logger.info(\"Start iterating over samples\")\n for i, samples in enumerate(progress):\n with metrics.aggregate(\"train_inner\"), torch.autograd.profiler.record_function(\n \"train_step-%d\" % i\n ):\n log_output = trainer.train_step(samples)\n\n if log_output is not None: # not OOM, overflow, ...\n # log mid-epoch stats\n num_updates = trainer.get_num_updates()\n if num_updates % cfg.common.log_interval == 0:\n stats = get_training_stats(metrics.get_smoothed_values(\"train_inner\"))\n progress.log(stats, tag=\"train_inner\", step=num_updates)\n\n # reset mid-epoch stats after each log interval\n # the end-of-epoch stats will still be preserved\n metrics.reset_meters(\"train_inner\")\n\n end_of_epoch = not itr.has_next()\n valid_losses, should_stop = validate_and_save(\n cfg, trainer, task, epoch_itr, valid_subsets, end_of_epoch\n )\n\n if should_stop:\n break\n\n # log end-of-epoch stats\n logger.info(\"end of epoch {} (average epoch stats below)\".format(epoch_itr.epoch))\n stats = get_training_stats(metrics.get_smoothed_values(\"train\"))\n progress.print(stats, tag=\"train\", step=num_updates)\n\n # reset epoch-level meters\n metrics.reset_meters(\"train\")\n return valid_losses, should_stop\n\n\ndef _flatten_config(cfg: DictConfig):\n config = OmegaConf.to_container(cfg)\n # remove any legacy Namespaces and replace with a single \"args\"\n namespace = None\n for k, v in list(config.items()):\n if isinstance(v, argparse.Namespace):\n namespace = v\n del config[k]\n if namespace is not None:\n config[\"args\"] = vars(namespace)\n return config\n\n\ndef validate_and_save(\n cfg: DictConfig,\n trainer: Trainer,\n task: tasks.FairseqTask,\n epoch_itr,\n valid_subsets: List[str],\n end_of_epoch: bool,\n) -> Tuple[List[Optional[float]], bool]:\n num_updates = trainer.get_num_updates()\n max_update = cfg.optimization.max_update or math.inf\n\n # Stopping conditions (and an additional one based on validation loss later\n # on)\n should_stop = False\n if num_updates >= max_update:\n should_stop = True\n logger.info(\n f\"Stopping training due to \"\n f\"num_updates: {num_updates} >= max_update: {max_update}\"\n )\n\n training_time_hours = trainer.cumulative_training_time() / (60 * 60)\n if (\n cfg.optimization.stop_time_hours > 0\n and training_time_hours > cfg.optimization.stop_time_hours\n ):\n should_stop = True\n logger.info(\n f\"Stopping training due to \"\n f\"cumulative_training_time: {training_time_hours} > \"\n f\"stop_time_hours: {cfg.optimization.stop_time_hours} hour(s)\"\n )\n\n do_save = (\n (end_of_epoch and epoch_itr.epoch % cfg.checkpoint.save_interval == 0)\n or should_stop\n or (\n cfg.checkpoint.save_interval_updates > 0\n and num_updates > 0\n and num_updates % cfg.checkpoint.save_interval_updates == 0\n and num_updates >= cfg.dataset.validate_after_updates\n )\n )\n do_validate = (\n (not end_of_epoch and do_save) # validate during mid-epoch saves\n or (end_of_epoch and epoch_itr.epoch % cfg.dataset.validate_interval == 0)\n or should_stop\n or (\n cfg.dataset.validate_interval_updates > 0\n and num_updates > 0\n and num_updates % cfg.dataset.validate_interval_updates == 0\n )\n ) and not cfg.dataset.disable_validation\n\n # Validate\n valid_losses = [None]\n if do_validate:\n valid_losses = validate(cfg, trainer, task, epoch_itr, valid_subsets)\n\n should_stop |= should_stop_early(cfg, valid_losses[0])\n\n # Save checkpoint\n if do_save or should_stop:\n checkpoint_utils.save_checkpoint(\n cfg.checkpoint, trainer, epoch_itr, valid_losses[0]\n )\n\n return valid_losses, should_stop\n\n\ndef get_training_stats(stats: Dict[str, Any]) -> Dict[str, Any]:\n stats[\"wall\"] = round(metrics.get_meter(\"default\", \"wall\").elapsed_time, 0)\n return stats\n\n\ndef validate(\n cfg: DictConfig,\n trainer: Trainer,\n task: tasks.FairseqTask,\n epoch_itr,\n subsets: List[str],\n) -> List[Optional[float]]:\n \"\"\"Evaluate the model on the validation set(s) and return the losses.\"\"\"\n\n if cfg.dataset.fixed_validation_seed is not None:\n # set fixed seed for every validation\n utils.set_torch_seed(cfg.dataset.fixed_validation_seed)\n\n trainer.begin_valid_epoch(epoch_itr.epoch)\n valid_losses = []\n for subset in subsets:\n logger.info('begin validation on \"{}\" subset'.format(subset))\n\n # Initialize data iterator\n itr = trainer.get_valid_iterator(subset).next_epoch_itr(\n shuffle=False, set_dataset_epoch=False # use a fixed valid set\n )\n if cfg.common.tpu:\n itr = utils.tpu_data_loader(itr)\n progress = progress_bar.progress_bar(\n itr,\n log_format=cfg.common.log_format,\n log_interval=cfg.common.log_interval,\n epoch=epoch_itr.epoch,\n prefix=f\"valid on '{subset}' subset\",\n tensorboard_logdir=(\n cfg.common.tensorboard_logdir\n if distributed_utils.is_master(cfg.distributed_training)\n else None\n ),\n default_log_format=(\"tqdm\" if not cfg.common.no_progress_bar else \"simple\"),\n wandb_project=(\n cfg.common.wandb_project\n if distributed_utils.is_master(cfg.distributed_training)\n else None\n ),\n wandb_run_name=os.environ.get(\n \"WANDB_NAME\", os.path.basename(cfg.checkpoint.save_dir)\n ),\n )\n\n # create a new root metrics aggregator so validation metrics\n # don't pollute other aggregators (e.g., train meters)\n with metrics.aggregate(new_root=True) as agg:\n for i, sample in enumerate(progress):\n if cfg.dataset.max_valid_steps is not None and i > cfg.dataset.max_valid_steps:\n break\n trainer.valid_step(sample)\n\n # log validation stats\n stats = get_valid_stats(cfg, trainer, agg.get_smoothed_values())\n progress.print(stats, tag=subset, step=trainer.get_num_updates())\n\n valid_losses.append(stats[cfg.checkpoint.best_checkpoint_metric])\n return valid_losses\n\n\ndef get_valid_stats(\n cfg: DictConfig, trainer: Trainer, stats: Dict[str, Any]\n) -> Dict[str, Any]:\n stats[\"num_updates\"] = trainer.get_num_updates()\n if hasattr(checkpoint_utils.save_checkpoint, \"best\"):\n key = \"best_{0}\".format(cfg.checkpoint.best_checkpoint_metric)\n best_function = max if cfg.checkpoint.maximize_best_checkpoint_metric else min\n stats[key] = best_function(\n checkpoint_utils.save_checkpoint.best,\n stats[cfg.checkpoint.best_checkpoint_metric],\n )\n return stats\n\n\ndef cli_main(\n modify_parser: Optional[Callable[[argparse.ArgumentParser], None]] = None\n) -> None:\n parser = options.get_training_parser()\n args = options.parse_args_and_arch(parser, modify_parser=modify_parser)\n\n cfg = convert_namespace_to_omegaconf(args)\n\n if cfg.common.use_plasma_view:\n server = PlasmaStore(path=cfg.common.plasma_path)\n logger.info(f\"Started plasma server pid {server.server.pid} {cfg.common.plasma_path}\")\n\n if args.profile:\n with torch.cuda.profiler.profile():\n with torch.autograd.profiler.emit_nvtx():\n distributed_utils.call_main(cfg, main)\n else:\n distributed_utils.call_main(cfg, main)\n\n # if cfg.common.use_plasma_view:\n # server.server.kill()\n\n\nif __name__ == \"__main__\":\n cli_main()\n"
] | [
[
"torch.autograd.profiler.record_function",
"torch.autograd.profiler.emit_nvtx",
"torch.cuda.profiler.profile",
"numpy.random.seed"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Joshuaalbert/neural_deprojection | [
"5f7859bfd514efe1707a61e2a5e7fc6d949f85ce",
"5f7859bfd514efe1707a61e2a5e7fc6d949f85ce"
] | [
"neural_deprojection/models/TwoD_to_2d_dVAE_GCD/graph_networks.py",
"neural_deprojection/models/graph_vae_GCD/mayavi_vis.py"
] | [
"import sys\n\nsys.path.insert(1, '/data/s2675544/git/neural_deprojection/')\nsys.path.insert(1, '/home/matthijs/git/neural_deprojection/')\n\nfrom graph_nets import blocks\nfrom graph_nets.utils_tf import concat\n\nimport tensorflow as tf\nimport sonnet as snt\nfrom graph_nets.graphs import GraphsTuple\nfrom graph_nets.utils_tf import fully_connect_graph_dynamic, fully_connect_graph_static\nfrom neural_deprojection.graph_net_utils import AbstractModule, histogramdd, get_shape\nimport tensorflow_probability as tfp\nfrom neural_deprojection.models.openai_dvae_modules.modules import Encoder, Decoder\n\nclass DiscreteImageVAE(AbstractModule):\n def __init__(self,\n hidden_size: int = 64,\n embedding_dim: int = 64,\n num_embedding: int = 1024,\n num_token_samples: int = 32,\n num_channels=1,\n name=None):\n super(DiscreteImageVAE, self).__init__(name=name)\n # (num_embedding, embedding_dim)\n self.num_channels=num_channels\n self.embeddings = tf.Variable(initial_value=tf.random.truncated_normal((num_embedding, embedding_dim)),\n name='embeddings')\n self.num_token_samples = num_token_samples\n self.num_embedding = num_embedding\n self.embedding_dim = embedding_dim\n self.temperature = tf.Variable(initial_value=tf.constant(1.), name='temperature', trainable=False)\n self.beta = tf.Variable(initial_value=tf.constant(6.6), name='beta', trainable=False)\n\n self.encoder = Encoder(hidden_size=hidden_size, num_embeddings=num_embedding, name='EncoderImage')\n self.decoder = Decoder(hidden_size=hidden_size, num_channels=num_channels, name='DecoderImage')\n\n def set_beta(self, beta):\n self.beta.assign(beta)\n\n def set_temperature(self, temperature):\n self.temperature.assign(temperature)\n\n @tf.function(input_signature=[tf.TensorSpec([None, None, None, None], dtype=tf.float32)])\n def sample_encoder(self, img):\n return self.encoder(img)\n\n @tf.function(input_signature=[tf.TensorSpec([None, None, None, None], dtype=tf.float32),\n tf.TensorSpec([], dtype=tf.float32),\n tf.TensorSpec([], dtype=tf.float32)])\n def sample_decoder(self, img_logits, temperature, num_samples):\n [batch, H, W, _] = get_shape(img_logits)\n\n logits = tf.reshape(img_logits, [batch * H * W, self.num_embedding]) # [batch*H*W, num_embeddings]\n reduce_logsumexp = tf.math.reduce_logsumexp(logits, axis=-1) # [batch*H*W]\n reduce_logsumexp = tf.tile(reduce_logsumexp[:, None], [1, self.num_embedding]) # [batch*H*W, num_embedding]\n logits -= reduce_logsumexp # [batch*H*W, num_embeddings]\n token_distribution = tfp.distributions.RelaxedOneHotCategorical(temperature, logits=logits)\n token_samples_onehot = token_distribution.sample((num_samples,),\n name='token_samples') # [S, batch*H*W, num_embeddings]\n def _single_decode(token_sample_onehot):\n # [batch*H*W, num_embeddings] @ [num_embeddings, embedding_dim]\n token_sample = tf.matmul(token_sample_onehot, self.embeddings) # [batch*H*W, embedding_dim] # = z ~ q(z|x)\n latent_img = tf.reshape(token_sample, [batch, H, W, self.embedding_dim]) # [batch, H, W, embedding_dim]\n decoded_img = self.decoder(latent_img) # [batch, H', W', C*2]\n return decoded_img\n\n decoded_ims = tf.vectorized_map(_single_decode, token_samples_onehot) # [S, batch, H', W', C*2]\n decoded_im = tf.reduce_mean(decoded_ims, axis=0) # [batch, H', W', C*2]\n return decoded_im\n\n\n def log_likelihood(self, img, mu, logb):\n \"\"\"\n Log-Laplace distribution.\n\n Args:\n img: [...,c] assumes of the form log(maximum(1e-5, img))\n mu: [...,c]\n logb: [...,c]\n\n Returns:\n log_prob [...]\n \"\"\"\n log_prob = - tf.math.abs(img - mu) / tf.math.exp(logb) \\\n - tf.math.log(2.) - img - logb\n return tf.reduce_sum(log_prob, axis=-1)\n\n\n def _build(self, img, **kwargs) -> dict:\n \"\"\"\n\n Args:\n img: [batch, H', W', num_channel]\n **kwargs:\n\n Returns:\n\n \"\"\"\n encoded_img_logits = self.encoder(img) # [batch, H, W, num_embedding]\n [batch, H, W, _] = get_shape(encoded_img_logits)\n\n logits = tf.reshape(encoded_img_logits, [batch*H*W, self.num_embedding]) # [batch*H*W, num_embeddings]\n reduce_logsumexp = tf.math.reduce_logsumexp(logits, axis=-1) # [batch*H*W]\n reduce_logsumexp = tf.tile(reduce_logsumexp[:, None], [1, self.num_embedding]) # [batch*H*W, num_embedding]\n logits -= reduce_logsumexp # [batch*H*W, num_embeddings]\n\n temperature = tf.maximum(0.1, tf.cast(1. - 0.1/(self.step/1000), tf.float32))\n token_distribution = tfp.distributions.RelaxedOneHotCategorical(temperature, logits=logits)\n token_samples_onehot = token_distribution.sample((self.num_token_samples,), name='token_samples') # [S, batch*H*W, num_embeddings]\n\n def _single_decode(token_sample_onehot):\n #[batch*H*W, num_embeddings] @ [num_embeddings, embedding_dim]\n token_sample = tf.matmul(token_sample_onehot, self.embeddings) # [batch*H*W, embedding_dim] # = z ~ q(z|x)\n latent_img = tf.reshape(token_sample, [batch, H, W, self.embedding_dim]) # [batch, H, W, embedding_dim]\n decoded_img = self.decoder(latent_img) # [batch, H', W', C*2]\n # print('decod shape', decoded_img)\n img_mu = decoded_img[..., :self.num_channels] #[batch, H', W', C]\n # print('mu shape', img_mu)\n img_logb = decoded_img[..., self.num_channels:]\n # print('logb shape', img_logb)\n log_likelihood = self.log_likelihood(img, img_mu, img_logb)#[batch, H', W', C]\n log_likelihood = tf.reduce_sum(log_likelihood, axis=[-3,-2,-1]) # [batch]\n sum_selected_logits = tf.math.reduce_sum(token_sample_onehot * logits, axis=-1) # [batch*H*W]\n sum_selected_logits = tf.reshape(sum_selected_logits, [batch, H, W])\n kl_term = tf.reduce_sum(sum_selected_logits, axis=[-2,-1])#[batch]\n return log_likelihood, kl_term, decoded_img\n\n #num_samples, batch\n log_likelihood_samples, kl_term_samples, decoded_ims = tf.vectorized_map(_single_decode, token_samples_onehot) # [S, batch], [S, batch]\n\n if self.step % 50 == 0:\n img_mu_0 = tf.reduce_mean(decoded_ims, axis=0)[..., :self.num_channels]\n img_mu_0 -= tf.reduce_min(img_mu_0)\n img_mu_0 /= tf.reduce_max(img_mu_0)\n tf.summary.image('mu', img_mu_0, step=self.step)\n\n smoothed_img = img[..., self.num_channels:]\n smoothed_img = (smoothed_img - tf.reduce_min(smoothed_img)) / (\n tf.reduce_max(smoothed_img) - tf.reduce_min(smoothed_img))\n tf.summary.image(f'img_before_autoencoder', smoothed_img, step=self.step)\n\n var_exp = tf.reduce_mean(log_likelihood_samples, axis=0) # [batch]\n kl_div = tf.reduce_mean(kl_term_samples, axis=0) # [batch]\n elbo = var_exp - kl_div # batch\n loss = - tf.reduce_mean(elbo) # scalar\n\n entropy = -tf.reduce_sum(logits * tf.math.exp(logits), axis=-1) # [batch*H*W]\n perplexity = 2. ** (-entropy / tf.math.log(2.)) # [batch*H*W]\n mean_perplexity = tf.reduce_mean(perplexity) # scalar\n\n if self.step % 2 == 0:\n logits = tf.nn.softmax(logits, axis=-1) # [batch*H*W, num_embedding]\n logits -= tf.reduce_min(logits)\n logits /= tf.reduce_max(logits)\n logits = tf.reshape(logits, [batch, H*W, self.num_embedding])[0] # [H*W, num_embedding]\n # tf.repeat(tf.repeat(logits, 16*[4], axis=0), 512*[4], axis=1)\n tf.summary.image('logits', logits[None, :, :, None], step=self.step)\n tf.summary.scalar('perplexity', mean_perplexity, step=self.step)\n tf.summary.scalar('var_exp', tf.reduce_mean(var_exp), step=self.step)\n tf.summary.scalar('kl_div', tf.reduce_mean(kl_div), step=self.step)\n\n\n return dict(loss=loss,\n metrics=dict(var_exp=var_exp,\n kl_div=kl_div,\n mean_perplexity=mean_perplexity))\n",
"from mayavi import mlab\nimport numpy as np\nimport glob, os\n\n\n\ndef main(eval_dir, plot_property):\n data_files = glob.glob(os.path.join(eval_dir, '*.npz'))\n\n for data_file in data_files:\n data = np.load(data_file)\n positions = data['positions']\n input_prop = data[f'prop_{plot_property}_input']\n decoded_prop = data[f'prop_{plot_property}_decoded']\n\n for prop, source in zip([input_prop, decoded_prop],['input','decoded']):\n mlab.figure(1, bgcolor=(0, 0, 0))\n mlab.clf()\n colors = prop\n pts = mlab.points3d(positions[:,0], positions[:,1], positions[:,2], colors,\n scale_factor=0.015, resolution=10, scale_mode='none')\n mlab.savefig(f'{data_file.replace(\".npz\",f\"{source}.png\")}')\n mlab.show()\n # exit(0)\n\nif __name__ == '__main__':\n main('output_evaluations', 'rho')"
] | [
[
"tensorflow.math.abs",
"tensorflow.reduce_sum",
"tensorflow.cast",
"tensorflow.summary.scalar",
"tensorflow.summary.image",
"tensorflow.math.reduce_sum",
"tensorflow.tile",
"tensorflow.matmul",
"tensorflow.vectorized_map",
"tensorflow.random.truncated_normal",
"tensorflow.math.reduce_logsumexp",
"tensorflow.math.exp",
"tensorflow.reduce_max",
"tensorflow.nn.softmax",
"tensorflow.constant",
"tensorflow.reduce_mean",
"tensorflow.reshape",
"tensorflow.math.log",
"tensorflow.reduce_min",
"tensorflow.TensorSpec"
],
[
"numpy.load"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
bfxavier/GamestonkTerminal | [
"b0a685cacaca1f06fc41d8041bcae5492216dc52"
] | [
"gamestonk_terminal/prediction_techniques/neural_networks.py"
] | [
"import argparse\nimport os\nfrom warnings import simplefilter\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom pandas.plotting import register_matplotlib_converters\nfrom TimeSeriesCrossValidation import splitTrain\nfrom sklearn.preprocessing import MinMaxScaler, StandardScaler\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import LSTM, SimpleRNN, Dense, Dropout\n\nfrom gamestonk_terminal.helper_funcs import (\n check_positive,\n get_next_stock_market_days,\n parse_known_args_and_warn,\n print_pretty_prediction,\n)\n\nfrom gamestonk_terminal import config_neural_network_models as cfg_nn_models\n\n\nregister_matplotlib_converters()\n\nos.environ[\"TF_CPP_MIN_LOG_LEVEL\"] = \"2\"\n\nsimplefilter(action=\"ignore\", category=FutureWarning)\n\n\n# ----------------------------------------------------------------------------------------------------\ndef build_neural_network_model(Recurrent_Neural_Network, n_inputs, n_days):\n model = Sequential()\n\n for idx_layer, d_layer in enumerate(Recurrent_Neural_Network):\n # Recurrent Neural Network\n if str(*d_layer) == \"SimpleRNN\":\n # Is this the input layer? If so, define input_shape\n if idx_layer == 0:\n model.add(SimpleRNN(**d_layer[\"SimpleRNN\"], input_shape=(n_inputs, 1)))\n # Is this the last output layer? If so, set units to prediction days\n elif idx_layer == (len(Recurrent_Neural_Network) - 1):\n model.add(SimpleRNN(**d_layer[\"SimpleRNN\"], units=n_days))\n else:\n model.add(SimpleRNN(**d_layer[\"SimpleRNN\"]))\n\n # Long-Short Term-Memory\n elif str(*d_layer) == \"LSTM\":\n # Is this the input layer? If so, define input_shape\n if idx_layer == 0:\n model.add(LSTM(**d_layer[\"LSTM\"], input_shape=(n_inputs, 1)))\n # Is this the last output layer? If so, set units to prediction days\n elif idx_layer == (len(Recurrent_Neural_Network) - 1):\n model.add(LSTM(**d_layer[\"LSTM\"], units=n_days))\n else:\n model.add(LSTM(**d_layer[\"LSTM\"]))\n\n # Dense (Simple Neuron)\n elif str(*d_layer) == \"Dense\":\n # Is this the input layer? If so, define input_shape\n if idx_layer == 0:\n model.add(Dense(**d_layer[\"Dense\"], input_dim=n_inputs))\n # Is this the last output layer? If so, set units to prediction days\n elif idx_layer == (len(Recurrent_Neural_Network) - 1):\n model.add(Dense(**d_layer[\"Dense\"], units=n_days))\n else:\n model.add(Dense(**d_layer[\"Dense\"]))\n\n # Dropout (Regularization)\n elif str(*d_layer) == \"Dropout\":\n model.add(Dropout(**d_layer[\"Dropout\"]))\n\n else:\n print(f\"Incorrect neuron type: {str(*d_layer)}\")\n\n return model\n\n\ndef mlp(l_args, s_ticker, df_stock):\n parser = argparse.ArgumentParser(\n add_help=False, prog=\"mlp\", description=\"\"\"Multilayer Perceptron. \"\"\"\n )\n\n parser.add_argument(\n \"-d\",\n \"--days\",\n action=\"store\",\n dest=\"n_days\",\n type=check_positive,\n default=5,\n help=\"prediction days.\",\n )\n parser.add_argument(\n \"-i\",\n \"--input\",\n action=\"store\",\n dest=\"n_inputs\",\n type=check_positive,\n default=40,\n help=\"number of days to use for prediction.\",\n )\n parser.add_argument(\n \"-e\",\n \"--epochs\",\n action=\"store\",\n dest=\"n_epochs\",\n type=check_positive,\n default=200,\n help=\"number of training epochs.\",\n )\n parser.add_argument(\n \"-j\",\n \"--jumps\",\n action=\"store\",\n dest=\"n_jumps\",\n type=check_positive,\n default=1,\n help=\"number of jumps in training data.\",\n )\n parser.add_argument(\n \"-p\",\n \"--pp\",\n action=\"store\",\n dest=\"s_preprocessing\",\n default=\"normalization\",\n choices=[\"normalization\", \"standardization\", \"none\"],\n help=\"pre-processing data.\",\n )\n parser.add_argument(\n \"-o\",\n \"--optimizer\",\n action=\"store\",\n dest=\"s_optimizer\",\n default=\"adam\",\n choices=[\n \"adam\",\n \"adagrad\",\n \"adadelta\",\n \"adamax\",\n \"ftrl\",\n \"nadam\",\n \"optimizer\",\n \"rmsprop\",\n \"sgd\",\n ],\n help=\"optimization technique.\",\n )\n parser.add_argument(\n \"-l\",\n \"--loss\",\n action=\"store\",\n dest=\"s_loss\",\n default=\"mae\",\n choices=[\"mae\", \"mape\", \"mse\", \"msle\"],\n help=\"loss function.\",\n )\n\n try:\n ns_parser = parse_known_args_and_warn(parser, l_args)\n if not ns_parser:\n return\n\n # Pre-process data\n if ns_parser.s_preprocessing == \"standardization\":\n scaler = StandardScaler()\n stock_train_data = scaler.fit_transform(\n np.array(df_stock[\"5. adjusted close\"].values.reshape(-1, 1))\n )\n elif ns_parser.s_preprocessing == \"normalization\":\n scaler = MinMaxScaler()\n stock_train_data = scaler.fit_transform(\n np.array(df_stock[\"5. adjusted close\"].values.reshape(-1, 1))\n )\n else: # No pre-processing\n stock_train_data = np.array(\n df_stock[\"5. adjusted close\"].values.reshape(-1, 1)\n )\n\n # Split training data for the neural network\n stock_x, stock_y = splitTrain.split_train(\n stock_train_data,\n ns_parser.n_inputs,\n ns_parser.n_days,\n numJumps=ns_parser.n_jumps,\n )\n stock_x = np.array(stock_x)\n stock_x = np.reshape(stock_x, (stock_x.shape[0], stock_x.shape[1]))\n stock_y = np.array(stock_y)\n stock_y = np.reshape(stock_y, (stock_y.shape[0], stock_y.shape[1]))\n\n # Build Neural Network model\n model = build_neural_network_model(\n cfg_nn_models.MultiLayer_Perceptron, ns_parser.n_inputs, ns_parser.n_days\n )\n model.compile(optimizer=ns_parser.s_optimizer, loss=ns_parser.s_loss)\n\n # Train our model\n model.fit(stock_x, stock_y, epochs=ns_parser.n_epochs, verbose=1)\n print(\"\")\n\n print(model.summary())\n print(\"\")\n\n # Prediction\n yhat = model.predict(\n stock_train_data[-ns_parser.n_inputs :].reshape(1, ns_parser.n_inputs),\n verbose=0,\n )\n\n # Re-scale the data back\n if (ns_parser.s_preprocessing == \"standardization\") or (\n ns_parser.s_preprocessing == \"normalization\"\n ):\n y_pred_test_t = scaler.inverse_transform(yhat.tolist())\n else:\n y_pred_test_t = yhat\n\n l_pred_days = get_next_stock_market_days(\n last_stock_day=df_stock[\"5. adjusted close\"].index[-1],\n n_next_days=ns_parser.n_days,\n )\n df_pred = pd.Series(y_pred_test_t[0].tolist(), index=l_pred_days, name=\"Price\")\n\n # Plotting\n plt.figure()\n plt.plot(df_stock.index, df_stock[\"5. adjusted close\"], lw=3)\n plt.title(f\"MLP on {s_ticker} - {ns_parser.n_days} days prediction\")\n plt.xlim(\n df_stock.index[0], get_next_stock_market_days(df_pred.index[-1], 1)[-1]\n )\n plt.xlabel(\"Time\")\n plt.ylabel(\"Share Price ($)\")\n plt.grid(b=True, which=\"major\", color=\"#666666\", linestyle=\"-\")\n plt.minorticks_on()\n plt.grid(b=True, which=\"minor\", color=\"#999999\", linestyle=\"-\", alpha=0.2)\n plt.plot(\n [df_stock.index[-1], df_pred.index[0]],\n [df_stock[\"5. adjusted close\"].values[-1], df_pred.values[0]],\n lw=1,\n c=\"tab:green\",\n linestyle=\"--\",\n )\n plt.plot(df_pred.index, df_pred, lw=2, c=\"tab:green\")\n plt.axvspan(\n df_stock.index[-1], df_pred.index[-1], facecolor=\"tab:orange\", alpha=0.2\n )\n _, _, ymin, ymax = plt.axis()\n plt.vlines(\n df_stock.index[-1],\n ymin,\n ymax,\n colors=\"k\",\n linewidth=3,\n linestyle=\"--\",\n color=\"k\",\n )\n plt.ion()\n plt.show()\n\n # Print prediction data\n print_pretty_prediction(df_pred, df_stock[\"5. adjusted close\"].values[-1])\n print(\"\")\n\n except Exception as e:\n print(e)\n print(\"\")\n\n\ndef rnn(l_args, s_ticker, df_stock):\n parser = argparse.ArgumentParser(\n add_help=False, prog=\"rnn\", description=\"\"\"Recurrent Neural Network. \"\"\"\n )\n\n parser.add_argument(\n \"-d\",\n \"--days\",\n action=\"store\",\n dest=\"n_days\",\n type=check_positive,\n default=5,\n help=\"prediction days.\",\n )\n parser.add_argument(\n \"-i\",\n \"--input\",\n action=\"store\",\n dest=\"n_inputs\",\n type=check_positive,\n default=40,\n help=\"number of days to use for prediction.\",\n )\n parser.add_argument(\n \"-e\",\n \"--epochs\",\n action=\"store\",\n dest=\"n_epochs\",\n type=check_positive,\n default=200,\n help=\"number of training epochs.\",\n )\n parser.add_argument(\n \"-j\",\n \"--jumps\",\n action=\"store\",\n dest=\"n_jumps\",\n type=check_positive,\n default=1,\n help=\"number of jumps in training data.\",\n )\n parser.add_argument(\n \"-p\",\n \"--pp\",\n action=\"store\",\n dest=\"s_preprocessing\",\n default=\"normalization\",\n choices=[\"normalization\", \"standardization\", \"none\"],\n help=\"pre-processing data.\",\n )\n parser.add_argument(\n \"-o\",\n \"--optimizer\",\n action=\"store\",\n dest=\"s_optimizer\",\n default=\"adam\",\n help=\"optimizer technique\",\n choices=[\n \"adam\",\n \"adagrad\",\n \"adadelta\",\n \"adamax\",\n \"ftrl\",\n \"nadam\",\n \"optimizer\",\n \"rmsprop\",\n \"sgd\",\n ],\n )\n parser.add_argument(\n \"-l\",\n \"--loss\",\n action=\"store\",\n dest=\"s_loss\",\n default=\"mae\",\n choices=[\"mae\", \"mape\", \"mse\", \"msle\"],\n help=\"loss function.\",\n )\n\n try:\n ns_parser = parse_known_args_and_warn(parser, l_args)\n if not ns_parser:\n return\n\n # Pre-process data\n if ns_parser.s_preprocessing == \"standardization\":\n scaler = StandardScaler()\n stock_train_data = scaler.fit_transform(\n np.array(df_stock[\"5. adjusted close\"].values.reshape(-1, 1))\n )\n elif ns_parser.s_preprocessing == \"normalization\":\n scaler = MinMaxScaler()\n stock_train_data = scaler.fit_transform(\n np.array(df_stock[\"5. adjusted close\"].values.reshape(-1, 1))\n )\n else: # No pre-processing\n stock_train_data = np.array(\n df_stock[\"5. adjusted close\"].values.reshape(-1, 1)\n )\n\n # Split training data for the neural network\n stock_x, stock_y = splitTrain.split_train(\n stock_train_data,\n ns_parser.n_inputs,\n ns_parser.n_days,\n numJumps=ns_parser.n_jumps,\n )\n stock_x = np.array(stock_x)\n stock_x = np.reshape(stock_x, (stock_x.shape[0], stock_x.shape[1], 1))\n stock_y = np.array(stock_y)\n stock_y = np.reshape(stock_y, (stock_y.shape[0], stock_y.shape[1], 1))\n\n # Build Neural Network model\n model = build_neural_network_model(\n cfg_nn_models.Recurrent_Neural_Network, ns_parser.n_inputs, ns_parser.n_days\n )\n model.compile(optimizer=ns_parser.s_optimizer, loss=ns_parser.s_loss)\n\n # Train our model\n model.fit(stock_x, stock_y, epochs=ns_parser.n_epochs, verbose=1)\n print(\"\")\n\n print(model.summary())\n print(\"\")\n\n # Prediction\n yhat = model.predict(\n stock_train_data[-ns_parser.n_inputs :].reshape(1, ns_parser.n_inputs, 1),\n verbose=0,\n )\n\n # Re-scale the data back\n if (ns_parser.s_preprocessing == \"standardization\") or (\n ns_parser.s_preprocessing == \"normalization\"\n ):\n y_pred_test_t = scaler.inverse_transform(yhat.tolist())\n else:\n y_pred_test_t = yhat\n\n l_pred_days = get_next_stock_market_days(\n last_stock_day=df_stock[\"5. adjusted close\"].index[-1],\n n_next_days=ns_parser.n_days,\n )\n df_pred = pd.Series(y_pred_test_t[0].tolist(), index=l_pred_days, name=\"Price\")\n\n # Plotting\n plt.figure()\n plt.plot(df_stock.index, df_stock[\"5. adjusted close\"], lw=3)\n plt.title(f\"RNN on {s_ticker} - {ns_parser.n_days} days prediction\")\n plt.xlim(\n df_stock.index[0], get_next_stock_market_days(df_pred.index[-1], 1)[-1]\n )\n plt.xlabel(\"Time\")\n plt.ylabel(\"Share Price ($)\")\n plt.grid(b=True, which=\"major\", color=\"#666666\", linestyle=\"-\")\n plt.minorticks_on()\n plt.grid(b=True, which=\"minor\", color=\"#999999\", linestyle=\"-\", alpha=0.2)\n plt.plot(\n [df_stock.index[-1], df_pred.index[0]],\n [df_stock[\"5. adjusted close\"].values[-1], df_pred.values[0]],\n lw=1,\n c=\"tab:green\",\n linestyle=\"--\",\n )\n plt.plot(df_pred.index, df_pred, lw=2, c=\"tab:green\")\n plt.axvspan(\n df_stock.index[-1], df_pred.index[-1], facecolor=\"tab:orange\", alpha=0.2\n )\n _, _, ymin, ymax = plt.axis()\n plt.vlines(\n df_stock.index[-1],\n ymin,\n ymax,\n colors=\"k\",\n linewidth=3,\n linestyle=\"--\",\n color=\"k\",\n )\n plt.ion()\n plt.show()\n\n # Print prediction data\n print_pretty_prediction(df_pred, df_stock[\"5. adjusted close\"].values[-1])\n print(\"\")\n\n except Exception as e:\n print(e)\n print(\"\")\n\n\ndef lstm(l_args, s_ticker, df_stock):\n parser = argparse.ArgumentParser(\n add_help=False, prog=\"lstm\", description=\"\"\"Long-Short Term Memory. \"\"\"\n )\n\n parser.add_argument(\n \"-d\",\n \"--days\",\n action=\"store\",\n dest=\"n_days\",\n type=check_positive,\n default=5,\n help=\"prediction days\",\n )\n parser.add_argument(\n \"-i\",\n \"--input\",\n action=\"store\",\n dest=\"n_inputs\",\n type=check_positive,\n default=40,\n help=\"number of days to use for prediction.\",\n )\n parser.add_argument(\n \"-e\",\n \"--epochs\",\n action=\"store\",\n dest=\"n_epochs\",\n type=check_positive,\n default=200,\n help=\"number of training epochs.\",\n )\n parser.add_argument(\n \"-j\",\n \"--jumps\",\n action=\"store\",\n dest=\"n_jumps\",\n type=check_positive,\n default=1,\n help=\"number of jumps in training data.\",\n )\n parser.add_argument(\n \"-p\",\n \"--pp\",\n action=\"store\",\n dest=\"s_preprocessing\",\n default=\"normalization\",\n choices=[\"normalization\", \"standardization\", \"none\"],\n help=\"pre-processing data.\",\n )\n parser.add_argument(\n \"-o\",\n \"--optimizer\",\n action=\"store\",\n dest=\"s_optimizer\",\n default=\"adam\",\n help=\"optimization technique.\",\n choices=[\n \"adam\",\n \"adagrad\",\n \"adadelta\",\n \"adamax\",\n \"ftrl\",\n \"nadam\",\n \"optimizer\",\n \"rmsprop\",\n \"sgd\",\n ],\n )\n parser.add_argument(\n \"-l\",\n \"--loss\",\n action=\"store\",\n dest=\"s_loss\",\n default=\"mae\",\n choices=[\"mae\", \"mape\", \"mse\", \"msle\"],\n help=\"loss function.\",\n )\n\n try:\n ns_parser = parse_known_args_and_warn(parser, l_args)\n if not ns_parser:\n return\n\n # Pre-process data\n if ns_parser.s_preprocessing == \"standardization\":\n scaler = StandardScaler()\n stock_train_data = scaler.fit_transform(\n np.array(df_stock[\"5. adjusted close\"].values.reshape(-1, 1))\n )\n elif ns_parser.s_preprocessing == \"normalization\":\n scaler = MinMaxScaler()\n stock_train_data = scaler.fit_transform(\n np.array(df_stock[\"5. adjusted close\"].values.reshape(-1, 1))\n )\n else: # No pre-processing\n stock_train_data = np.array(\n df_stock[\"5. adjusted close\"].values.reshape(-1, 1)\n )\n\n # Split training data for the neural network\n stock_x, stock_y = splitTrain.split_train(\n stock_train_data,\n ns_parser.n_inputs,\n ns_parser.n_days,\n numJumps=ns_parser.n_jumps,\n )\n stock_x = np.array(stock_x)\n stock_x = np.reshape(stock_x, (stock_x.shape[0], stock_x.shape[1], 1))\n stock_y = np.array(stock_y)\n stock_y = np.reshape(stock_y, (stock_y.shape[0], stock_y.shape[1], 1))\n\n # Build Neural Network model\n model = build_neural_network_model(\n cfg_nn_models.Long_Short_Term_Memory, ns_parser.n_inputs, ns_parser.n_days\n )\n model.compile(optimizer=ns_parser.s_optimizer, loss=ns_parser.s_loss)\n\n # Train our model\n model.fit(stock_x, stock_y, epochs=ns_parser.n_epochs, verbose=1)\n print(\"\")\n\n print(model.summary())\n print(\"\")\n\n # Prediction\n yhat = model.predict(\n stock_train_data[-ns_parser.n_inputs :].reshape(1, ns_parser.n_inputs, 1),\n verbose=0,\n )\n\n # Re-scale the data back\n if (ns_parser.s_preprocessing == \"standardization\") or (\n ns_parser.s_preprocessing == \"normalization\"\n ):\n y_pred_test_t = scaler.inverse_transform(yhat.tolist())\n else:\n y_pred_test_t = yhat\n\n l_pred_days = get_next_stock_market_days(\n last_stock_day=df_stock[\"5. adjusted close\"].index[-1],\n n_next_days=ns_parser.n_days,\n )\n df_pred = pd.Series(y_pred_test_t[0].tolist(), index=l_pred_days, name=\"Price\")\n\n # Plotting\n plt.figure()\n plt.plot(df_stock.index, df_stock[\"5. adjusted close\"], lw=3)\n plt.title(f\"LSTM on {s_ticker} - {ns_parser.n_days} days prediction\")\n plt.xlim(\n df_stock.index[0], get_next_stock_market_days(df_pred.index[-1], 1)[-1]\n )\n plt.xlabel(\"Time\")\n plt.ylabel(\"Share Price ($)\")\n plt.grid(b=True, which=\"major\", color=\"#666666\", linestyle=\"-\")\n plt.minorticks_on()\n plt.grid(b=True, which=\"minor\", color=\"#999999\", linestyle=\"-\", alpha=0.2)\n plt.plot(\n [df_stock.index[-1], df_pred.index[0]],\n [df_stock[\"5. adjusted close\"].values[-1], df_pred.values[0]],\n lw=1,\n c=\"tab:green\",\n linestyle=\"--\",\n )\n plt.plot(df_pred.index, df_pred, lw=2, c=\"tab:green\")\n plt.axvspan(\n df_stock.index[-1], df_pred.index[-1], facecolor=\"tab:orange\", alpha=0.2\n )\n _, _, ymin, ymax = plt.axis()\n plt.vlines(\n df_stock.index[-1],\n ymin,\n ymax,\n colors=\"k\",\n linewidth=3,\n linestyle=\"--\",\n color=\"k\",\n )\n plt.ion()\n plt.show()\n\n # Print prediction data\n print_pretty_prediction(df_pred, df_stock[\"5. adjusted close\"].values[-1])\n print(\"\")\n\n except Exception as e:\n print(e)\n print(\"\")\n"
] | [
[
"matplotlib.pyplot.minorticks_on",
"matplotlib.pyplot.plot",
"sklearn.preprocessing.MinMaxScaler",
"tensorflow.keras.layers.Dropout",
"tensorflow.keras.layers.SimpleRNN",
"numpy.reshape",
"pandas.plotting.register_matplotlib_converters",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.vlines",
"tensorflow.keras.models.Sequential",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.title",
"tensorflow.keras.layers.Dense",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ion",
"numpy.array",
"matplotlib.pyplot.axvspan",
"matplotlib.pyplot.ylabel",
"tensorflow.keras.layers.LSTM",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.xlabel",
"sklearn.preprocessing.StandardScaler"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.7",
"2.6",
"2.4",
"2.3",
"2.5",
"2.2"
]
}
] |
markson14/RRPN_pytorch | [
"f30c6180c44c2d6cc65ce4521a3cf839b5215089",
"f30c6180c44c2d6cc65ce4521a3cf839b5215089"
] | [
"maskrcnn_benchmark/modeling/rrpn/inference.py",
"maskrcnn_benchmark/modeling/rbox_coder.py"
] | [
"# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\nimport torch\n\nfrom maskrcnn_benchmark.modeling.box_coder import BoxCoder\nfrom maskrcnn_benchmark.modeling.rbox_coder import RBoxCoder\nfrom maskrcnn_benchmark.structures.bounding_box import BoxList, RBoxList\nfrom maskrcnn_benchmark.structures.rboxlist_ops import cat_boxlist\nfrom maskrcnn_benchmark.structures.rboxlist_ops import boxlist_nms #\nfrom maskrcnn_benchmark.structures.rboxlist_ops import remove_small_boxes\n\nfrom ..utils import cat\n\n\nclass RPNPostProcessor(torch.nn.Module):\n \"\"\"\n Performs post-processing on the outputs of the RPN boxes, before feeding the\n proposals to the heads\n \"\"\"\n\n def __init__(\n self,\n pre_nms_top_n,\n post_nms_top_n,\n nms_thresh,\n min_size,\n box_coder=None,\n fpn_post_nms_top_n=None,\n ):\n \"\"\"\n Arguments:\n pre_nms_top_n (int)\n post_nms_top_n (int)\n nms_thresh (float)\n min_size (int)\n box_coder (BoxCoder)\n fpn_post_nms_top_n (int)\n \"\"\"\n super(RPNPostProcessor, self).__init__()\n self.pre_nms_top_n = pre_nms_top_n\n self.post_nms_top_n = post_nms_top_n\n self.nms_thresh = nms_thresh\n self.min_size = min_size\n\n if box_coder is None:\n box_coder = RBoxCoder(weights=(1.0, 1.0, 1.0, 1.0, 1.0))\n self.box_coder = box_coder\n\n if fpn_post_nms_top_n is None:\n fpn_post_nms_top_n = post_nms_top_n\n self.fpn_post_nms_top_n = fpn_post_nms_top_n\n\n def add_gt_proposals(self, proposals, targets):\n \"\"\"\n Arguments:\n proposals: list[BoxList]\n targets: list[BoxList]\n \"\"\"\n # Get the device we're operating on\n device = proposals[0].bbox.device\n\n gt_boxes = [target.copy_with_fields([]) for target in targets]\n\n # later cat of bbox requires all fields to be present for all bbox\n # so we need to add a dummy for objectness that's missing\n for gt_box in gt_boxes:\n gt_box.add_field(\"objectness\", torch.ones(len(gt_box), device=device))\n\n proposals = [\n cat_boxlist((proposal, gt_box))\n for proposal, gt_box in zip(proposals, gt_boxes)\n ]\n # print('rrpn_proposal:', proposals[0].bbox.size(), proposals[0].bbox[:, 2:4])\n return proposals\n\n # proposal_target_layer\n def forward_for_single_feature_map(self, anchors, objectness, box_regression):\n \"\"\"\n Arguments:\n anchors: list[BoxList]\n objectness: tensor of size N, A, H, W\n box_regression: tensor of size N, A * 5, H, W\n \"\"\"\n device = objectness.device\n N, A, H, W = objectness.shape\n\n # put in the same format as anchors\n objectness = objectness.permute(0, 2, 3, 1).reshape(N, -1)\n objectness = objectness.sigmoid()\n box_regression = box_regression.view(N, -1, 5, H, W).permute(0, 3, 4, 1, 2)\n box_regression = box_regression.reshape(N, -1, 5)\n\n num_anchors = A * H * W\n\n pre_nms_top_n = min(self.pre_nms_top_n, num_anchors)\n objectness, topk_idx = objectness.topk(pre_nms_top_n, dim=1, sorted=True)\n\n batch_idx = torch.arange(N, device=device)[:, None]\n box_regression = box_regression[batch_idx, topk_idx]\n\n image_shapes = [box.size for box in anchors]\n concat_anchors = torch.cat([a.bbox for a in anchors], dim=0)\n concat_anchors = concat_anchors.reshape(N, -1, 5)[batch_idx, topk_idx]\n\n # print('concat_anchors:', concat_anchors.size(), concat_anchors[:, 2:4])\n\n proposals = self.box_coder.decode(\n box_regression.view(-1, 5), concat_anchors.view(-1, 5)\n )\n\n proposals = proposals.view(N, -1, 5)\n # print('outsider:', proposals.size(), proposals[:, 2:4], 'box_regression:', box_regression)\n\n #-------\n result = []\n for proposal, score, im_shape in zip(proposals, objectness, image_shapes):\n boxlist = RBoxList(proposal, im_shape, mode=\"xywha\")\n\n # print('before nms:', boxlist.bbox.size(), boxlist.bbox[:, 2:4])\n\n boxlist.add_field(\"objectness\", score)\n # boxlist = boxlist.clip_to_image(remove_empty=False)\n boxlist = remove_small_boxes(boxlist, self.min_size)\n boxlist = boxlist_nms(\n boxlist,\n self.nms_thresh,\n max_proposals=self.post_nms_top_n,\n score_field=\"objectness\",\n )\n\n # print('after nms:', boxlist.bbox.size(), boxlist.bbox[:, 2:4])\n\n result.append(boxlist)\n return result\n\n def forward(self, anchors, objectness, box_regression, targets=None):\n \"\"\"\n Arguments:\n anchors: list[list[BoxList]]\n objectness: list[tensor]\n box_regression: list[tensor]\n\n Returns:\n boxlists (list[BoxList]): the post-processed anchors, after\n applying box decoding and NMS\n \"\"\"\n sampled_boxes = []\n num_levels = len(objectness)\n anchors = list(zip(*anchors))\n for a, o, b in zip(anchors, objectness, box_regression):\n sampled_boxes.append(self.forward_for_single_feature_map(a, o, b))\n\n boxlists = list(zip(*sampled_boxes))\n boxlists = [cat_boxlist(boxlist) for boxlist in boxlists]\n\n if num_levels > 1:\n boxlists = self.select_over_all_levels(boxlists)\n\n # append ground-truth bboxes to proposals\n if self.training and targets is not None:\n boxlists = self.add_gt_proposals(boxlists, targets)\n\n return boxlists\n\n def select_over_all_levels(self, boxlists):\n num_images = len(boxlists)\n # different behavior during training and during testing:\n # during training, post_nms_top_n is over *all* the proposals combined, while\n # during testing, it is over the proposals for each image\n # TODO resolve this difference and make it consistent. It should be per image,\n # and not per batch\n if self.training:\n objectness = torch.cat(\n [boxlist.get_field(\"objectness\") for boxlist in boxlists], dim=0\n )\n box_sizes = [len(boxlist) for boxlist in boxlists]\n post_nms_top_n = min(self.fpn_post_nms_top_n, len(objectness))\n _, inds_sorted = torch.topk(objectness, post_nms_top_n, dim=0, sorted=True)\n inds_mask = torch.zeros_like(objectness, dtype=torch.uint8)\n inds_mask[inds_sorted] = 1\n inds_mask = inds_mask.split(box_sizes)\n for i in range(num_images):\n boxlists[i] = boxlists[i][inds_mask[i]]\n else:\n for i in range(num_images):\n objectness = boxlists[i].get_field(\"objectness\")\n post_nms_top_n = min(self.fpn_post_nms_top_n, len(objectness))\n _, inds_sorted = torch.topk(\n objectness, post_nms_top_n, dim=0, sorted=True\n )\n boxlists[i] = boxlists[i][inds_sorted]\n return boxlists\n\n\ndef make_rpn_postprocessor(config, rpn_box_coder, is_train):\n fpn_post_nms_top_n = config.MODEL.RPN.FPN_POST_NMS_TOP_N_TRAIN\n if not is_train:\n fpn_post_nms_top_n = config.MODEL.RPN.FPN_POST_NMS_TOP_N_TEST\n\n pre_nms_top_n = config.MODEL.RPN.PRE_NMS_TOP_N_TRAIN\n post_nms_top_n = config.MODEL.RPN.POST_NMS_TOP_N_TRAIN\n if not is_train:\n pre_nms_top_n = config.MODEL.RPN.PRE_NMS_TOP_N_TEST\n post_nms_top_n = config.MODEL.RPN.POST_NMS_TOP_N_TEST\n nms_thresh = config.MODEL.RPN.NMS_THRESH\n min_size = config.MODEL.RPN.MIN_SIZE\n box_selector = RPNPostProcessor(\n pre_nms_top_n=pre_nms_top_n,\n post_nms_top_n=post_nms_top_n,\n nms_thresh=nms_thresh,\n min_size=min_size,\n box_coder=rpn_box_coder,\n fpn_post_nms_top_n=fpn_post_nms_top_n,\n )\n return box_selector\n",
"# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\nimport math\n\nimport torch\n\n\nclass RBoxCoder(object):\n \"\"\"\n This class encodes and decodes a set of bounding boxes into\n the representation used for training the regressors.\n \"\"\"\n\n def __init__(self, weights, bbox_xform_clip=math.log(1000. / 16)):\n \"\"\"\n Arguments:\n weights (4-element tuple)\n bbox_xform_clip (float)\n \"\"\"\n self.weights = weights\n self.bbox_xform_clip = bbox_xform_clip\n\n def encode(self, reference_boxes, proposals):\n \"\"\"\n Encode a set of proposals with respect to some\n reference boxes\n\n Arguments:\n reference_boxes (Tensor): reference boxes\n proposals (Tensor): boxes to be encoded\n \"\"\"\n\n TO_REMOVE = 1 # TODO remove\n ex_widths = proposals[:, 2]# - proposals[:, 0] + TO_REMOVE\n ex_heights = proposals[:, 3]# - proposals[:, 1] + TO_REMOVE\n ex_ctr_x = proposals[:, 0]# + 0.5 * ex_widths\n ex_ctr_y = proposals[:, 1]# + 0.5 * ex_heights\n ex_angle = proposals[:, 4]\n\n gt_widths = reference_boxes[:, 2]# - reference_boxes[:, 0] + TO_REMOVE\n gt_heights = reference_boxes[:, 3]# - reference_boxes[:, 1] + TO_REMOVE\n gt_ctr_x = reference_boxes[:, 0]# + 0.5 * gt_widths\n gt_ctr_y = reference_boxes[:, 1]# + 0.5 * gt_heights\n gt_angle = reference_boxes[:, 4]\n\n wx, wy, ww, wh, wa = self.weights\n targets_dx = wx * (gt_ctr_x - ex_ctr_x) / ex_widths\n targets_dy = wy * (gt_ctr_y - ex_ctr_y) / ex_heights\n targets_dw = ww * torch.log(gt_widths / ex_widths)\n targets_dh = wh * torch.log(gt_heights / ex_heights)\n\n targets_da = wa * (gt_angle - ex_angle)\n #targets_da[np.where((gt_angle <= -30) & (ex_angle >= 120))] += 180\n #targets_da[np.where((gt_angle >= 120) & (ex_angle <= -30))] -= 180\n\n gtle30 = gt_angle.le(-30)\n exge120 = ex_angle.ge(120)\n gtge120 = gt_angle.ge(120)\n exle30 = ex_angle.le(-30)\n\n incre180 = gtle30 * exge120 * 180\n decre180 = gtge120 * exle30 * (-180)\n\n targets_da = targets_da + incre180.float()\n targets_da = targets_da + decre180.float()\n\n targets_da = 3.14159265358979323846264338327950288 / 180 * targets_da\n\n targets = torch.stack((targets_dx, targets_dy, targets_dw, targets_dh, targets_da), dim=1)\n return targets\n\n def decode(self, rel_codes, boxes):\n \"\"\"\n From a set of original boxes and encoded relative box offsets,\n get the decoded boxes.\n\n Arguments:\n rel_codes (Tensor): encoded boxes\n boxes (Tensor): reference boxes.\n \"\"\"\n\n boxes = boxes.to(rel_codes.dtype)\n\n TO_REMOVE = 1 # TODO remove\n widths = boxes[:, 2]# - boxes[:, 0] + TO_REMOVE\n heights = boxes[:, 3]# - boxes[:, 1] + TO_REMOVE\n ctr_x = boxes[:, 0]# + 0.5 * widths\n ctr_y = boxes[:, 1]# + 0.5 * heights\n angle = boxes[:, 4]\n\n wx, wy, ww, wh, wa = self.weights\n dx = rel_codes[:, 0::5] / wx\n dy = rel_codes[:, 1::5] / wy\n dw = rel_codes[:, 2::5] / ww\n dh = rel_codes[:, 3::5] / wh\n da = rel_codes[:, 4::5] / wa\n\n pred_ctr_x = dx * widths[:, None] + ctr_x[:, None]\n pred_ctr_y = dy * heights[:, None] + ctr_y[:, None]\n pred_w = torch.exp(dw) * widths[:, None]\n pred_h = torch.exp(dh) * heights[:, None]\n da = da * 1.0 / 3.141592653 * 180 # arc to angle\n pred_angle = da + angle[:, None]\n\n # print('pred_angle:', pred_angle.size())\n pred_boxes = torch.zeros_like(rel_codes)\n # ctr_x1\n pred_boxes[:, 0::5] = pred_ctr_x\n # ctr_y1\n pred_boxes[:, 1::5] = pred_ctr_y\n # width\n pred_boxes[:, 2::5] = pred_w\n # height\n pred_boxes[:, 3::5] = pred_h\n # angle\n pred_boxes[:, 4::5] = pred_angle\n\n return pred_boxes\n"
] | [
[
"torch.topk",
"torch.zeros_like",
"torch.arange",
"torch.cat"
],
[
"torch.stack",
"torch.exp",
"torch.zeros_like",
"torch.log"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
iwan933/mlmi-federated-learning | [
"e148664304dd7fbbc2cc2a6a34567533748c1720"
] | [
"mlmi/participant.py"
] | [
"import copy\nfrom pathlib import Path\nfrom typing import Dict, List, Optional, Union\n\nimport torch\nfrom pytorch_lightning.metrics import Accuracy\nfrom torch import Tensor, optim\nfrom torch.utils import data\n\nimport pytorch_lightning as pl\nfrom pytorch_lightning.loggers import LightningLoggerBase\nfrom pytorch_lightning.callbacks.base import Callback\n\nfrom mlmi.structs import OptimizerArgs, TrainArgs, ModelArgs\nfrom mlmi.log import getLogger\nfrom mlmi.settings import CHECKPOINT_DIR\n\n\nlogger = getLogger(__name__)\n\n\ndef optimizer_state_dict_to_cpu(optimizer_state_dict):\n c = copy.deepcopy(optimizer_state_dict)\n o = {}\n state_dict = c.get('state')\n r = {}\n for key, state in state_dict.items():\n s = {}\n for k, v in state.items():\n if torch.is_tensor(v):\n s[k] = v.cpu()\n else:\n s[k] = v\n r[key] = s\n o['state'] = r\n o['param_groups'] = c.get('param_groups')\n return o\n\n\nclass BaseParticipant(object):\n\n def __init__(self, participant_name: str, model_args: ModelArgs, context):\n assert participant_name is not None, 'A participant name is required to load and save logs'\n assert model_args is not None, 'Model args are required to initialize a model for the participant'\n assert context is not None, 'Experiment context is required for participant'\n\n self._name = participant_name\n self._cluster_id = None\n self._experiment_context = context\n participant_model_kwargs = self.get_model_kwargs()\n if participant_model_kwargs is not None:\n self._model = model_args(participant_name=participant_name, **participant_model_kwargs)\n else:\n self._model = model_args(participant_name=participant_name)\n self._model_args = model_args\n\n def get_model_kwargs(self) -> Optional[Dict]:\n return None\n\n @property\n def model(self) -> Union[pl.LightningModule, 'BaseParticipantModel']:\n \"\"\"\n The model to train\n :return: The model\n \"\"\"\n return self._model\n\n @property\n def cluster_id(self) -> str:\n return self._cluster_id\n\n @cluster_id.setter\n def cluster_id(self, value: str):\n self._cluster_id = value\n\n def overwrite_model_state(self, model_state: Dict[str, Tensor]):\n \"\"\"\n Loads the model state into the current model instance\n :param model_state: The model state to load\n \"\"\"\n self._model.load_state_dict(model_state, strict=False)\n\n def load_model_state_from_checkpoint(self):\n \"\"\"\n Load the model state from an existing saved checkpoint\n \"\"\"\n self._model = self._model_args.model_class.load_from_checkpoint(\n checkpoint_path=str(self.get_checkpoint_path().absolute()))\n\n def get_checkpoint_path(self, suffix: Union[str, None] = None) -> Path:\n \"\"\"\n Constructs a checkpoint path based on\n :return:\n \"\"\"\n str_suffix = '' if suffix is None else '_' + suffix\n filename = (self._name + str_suffix + '.ckpt')\n return CHECKPOINT_DIR / self._experiment_context.name / filename\n\n def save_model_state(self):\n \"\"\"\n Saves the model state of the aggregated model\n :param target_path: The path to save the model at\n :return:\n \"\"\"\n path = self.get_checkpoint_path()\n path.parent.mkdir(parents=True, exist_ok=True)\n torch.save(self._model.state_dict(), path)\n\n\nclass BaseTrainingParticipant(BaseParticipant):\n def __init__(self, client_id: str, model_args: ModelArgs, context,\n train_dataloader: data.DataLoader, num_train_samples: int,\n test_dataloader: data.DataLoader, num_test_samples: int,\n lightning_logger: LightningLoggerBase, *args, **kwargs):\n self._train_dataloader = train_dataloader\n self._test_dataloader = test_dataloader\n self._num_train_samples = sum([len(y) for x, y in train_dataloader])\n self._num_test_samples = num_test_samples\n self._lightning_logger = lightning_logger\n self._callbacks = None\n self._model_state = None\n self._trainer = None\n super().__init__(client_id, model_args, context)\n\n def create_trainer(self, enable_logging=True, **kwargs) -> pl.Trainer:\n \"\"\"\n Creates a new trainer instance for each training round.\n :param kwargs: additional keyword arguments to send to the trainer for configuration\n :return: a pytorch lightning trainer instance\n \"\"\"\n _kwargs = kwargs.copy()\n _kwargs['logger'] = self.logger\n _kwargs['checkpoint_callback'] = False\n if torch.cuda.is_available():\n _kwargs['gpus'] = 1\n return pl.Trainer(callbacks=self._callbacks, limit_val_batches=0.0, **_kwargs)\n\n def set_trainer_callbacks(self, callbacks: List[Callback]):\n self._callbacks = callbacks\n\n @property\n def logger(self) -> LightningLoggerBase:\n \"\"\"\n Gets the logger to use for the training in later stage.\n :return: The lightning logger to use\n \"\"\"\n return self._lightning_logger\n\n @property\n def train_data_loader(self) -> data.DataLoader:\n return self._train_dataloader\n\n @property\n def test_data_loader(self) -> data.DataLoader:\n return self._test_dataloader\n\n @property\n def num_train_samples(self) -> int:\n return self._num_train_samples\n\n @property\n def num_test_samples(self) -> int:\n return self._num_test_samples\n\n def train(self, training_args: TrainArgs, *args, **kwargs):\n \"\"\"\n Implement the training routine.\n :param training_args:\n :param args:\n :param kwargs:\n :return:\n \"\"\"\n trainer = self.create_trainer(enable_logging=False, **training_args.kwargs)\n train_dataloader = self.train_data_loader\n trainer.fit(self.model, train_dataloader)\n del self.model.trainer\n\n def test(self, model: Optional[torch.nn.Module] = None, use_local_model: bool = False):\n \"\"\"\n Test the model state on this clients data.\n :param\n :param model_state: The model state to evaluate\n :return: The output loss\n \"\"\"\n assert use_local_model or model is not None\n\n trainer = self.create_trainer(enable_logging=False, progress_bar_refresh_rate=0)\n\n if use_local_model:\n result = trainer.test(model=self.model, test_dataloaders=self.test_data_loader, verbose=False)\n self._model = self._model.cpu()\n del self._model.trainer\n else:\n result = trainer.test(model=model, test_dataloaders=self.test_data_loader, verbose=False)\n return result\n\n\nclass BaseAggregatorParticipant(BaseParticipant):\n\n def __init__(self, participant_name: str, model_args: ModelArgs, context):\n super().__init__(participant_name, model_args, context)\n\n def aggregate(self, participants: List['BaseTrainingParticipant'], *args, **kwargs):\n \"\"\"\n Aggregate the models of other participants with their models.\n :param participants: Participants to apply the model changes from\n :return:\n \"\"\"\n raise NotImplementedError()\n\n\nclass BaseParticipantModel(object):\n\n def __init__(self, *args, participant_name=None, optimizer_args: Optional[OptimizerArgs]=None,\n model=None, **kwargs):\n assert participant_name is not None, 'Please provide a participant name parameter in model args to identify' \\\n 'your model in logging'\n assert optimizer_args is not None, 'Optimizer args not set!'\n assert model is not None, 'Model not passed!'\n self.participant_name = participant_name\n self.optimizer_args = optimizer_args\n super().__init__(*args, **kwargs)\n self.model = model\n self._optimizer_state = None\n\n @property\n def optimizer_state(self):\n return self._optimizer_state\n\n @optimizer_state.setter\n def optimizer_state(self, value):\n self._optimizer_state = value\n\n def configure_optimizers(self):\n return self.optimizer_args(self.model.parameters())\n \"\"\"\n Do not restore state\n if self.optimizer_state is not None:\n optimizer.load_state_dict(self.optimizer_state)\n return optimizer\n \"\"\"\n"
] | [
[
"torch.is_tensor",
"torch.cuda.is_available"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Flsahkong/transferlearning | [
"0fe84de59dcb2871e2dca24130dc24e1ccce8506"
] | [
"code/distance/mmd_pytorch.py"
] | [
"# Compute MMD distance using pytorch\n\nimport torch\nimport torch.nn as nn\n\n\nclass MMD_loss(nn.Module):\n def __init__(self, kernel_type='rbf', kernel_mul=2.0, kernel_num=5):\n super(MMD_loss, self).__init__()\n self.kernel_num = kernel_num\n self.kernel_mul = kernel_mul\n self.fix_sigma = None\n self.kernel_type = kernel_type\n\n def guassian_kernel(self, source, target, kernel_mul=2.0, kernel_num=5, fix_sigma=None):\n n_samples = int(source.size()[0]) + int(target.size()[0])\n total = torch.cat([source, target], dim=0)\n total0 = total.unsqueeze(0).expand(\n int(total.size(0)), int(total.size(0)), int(total.size(1)))\n total1 = total.unsqueeze(1).expand(\n int(total.size(0)), int(total.size(0)), int(total.size(1)))\n L2_distance = ((total0-total1)**2).sum(2)\n if fix_sigma:\n bandwidth = fix_sigma\n else:\n bandwidth = torch.sum(L2_distance.data) / (n_samples**2-n_samples)\n bandwidth /= kernel_mul ** (kernel_num // 2)\n bandwidth_list = [bandwidth * (kernel_mul**i)\n for i in range(kernel_num)]\n kernel_val = [torch.exp(-L2_distance / bandwidth_temp)\n for bandwidth_temp in bandwidth_list]\n return sum(kernel_val)\n\n def linear_mmd2(self, f_of_X, f_of_Y):\n loss = 0.0\n delta = f_of_X.float().mean(0) - f_of_Y.float().mean(0)\n loss = delta.dot(delta.T)\n return loss\n\n def forward(self, source, target):\n if self.kernel_type == 'linear':\n return self.linear_mmd2(source, target)\n elif self.kernel_type == 'rbf':\n batch_size = int(source.size()[0])\n kernels = self.guassian_kernel(\n source, target, kernel_mul=self.kernel_mul, kernel_num=self.kernel_num, fix_sigma=self.fix_sigma)\n with torch.no_grad():\n XX = torch.mean(kernels[:batch_size, :batch_size])\n YY = torch.mean(kernels[batch_size:, batch_size:])\n XY = torch.mean(kernels[:batch_size, batch_size:])\n YX = torch.mean(kernels[batch_size:, :batch_size])\n loss = torch.mean(XX + YY - XY - YX)\n torch.cuda.empty_cache()\n return loss\n"
] | [
[
"torch.mean",
"torch.cat",
"torch.sum",
"torch.cuda.empty_cache",
"torch.exp",
"torch.no_grad"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
goudfroo/pysiaf | [
"ca8350ce814950344789a9674079b8d0168ac05e"
] | [
"pysiaf/iando/write.py"
] | [
"\"\"\"Functions to write Science Instrument Aperture Files (SIAF).\n\nSIAF content in an aperture_collection object can be written to an xml file that can be ingested in\nthe PRD. Format and order of the xml fields are defined in SIAF reference files.\nWriting to Microsoft Excel .xlsx format is supported.\nWriting to .csv and other formats supported by astropy.table.Table.write is enabled.\n\nAuthors\n-------\n Johannes Sahlmann\n\n\"\"\"\n\nimport os\n\nimport numpy as np\nimport lxml.etree as ET\nfrom astropy.time import Time\nfrom astropy.table import Table, Column\nfrom openpyxl import Workbook\nfrom openpyxl.styles import Font, Color\nfrom openpyxl.styles import Alignment\n\nfrom ..version import __version__\nfrom ..constants import _JWST_TEMPORARY_ROOT\nfrom ..aperture import PRD_REQUIRED_ATTRIBUTES_ORDERED, SIAF_XML_FIELD_FORMAT, FLOAT_ATTRIBUTES\n\n# dictionary used to set field precision in SIAF.XML\nxml_decimal_precision = {}\nfield_names = list(SIAF_XML_FIELD_FORMAT['field_name'])\nfor attr in PRD_REQUIRED_ATTRIBUTES_ORDERED:\n index = field_names.index(attr)\n xml_decimal_precision[attr] = SIAF_XML_FIELD_FORMAT['pyformat'][index]\n\n\ndef write_jwst_siaf(aperture_collection, filename=None, basepath=None, label=None,\n file_format='xml', verbose=True):\n \"\"\"Write the content of aperture_collection into xml and xlsx files that are PRD-compliant.\n\n Parameters\n ----------\n aperture_collection : ApertureCollection\n dictionary of apertures\n filename\n basepath\n label\n file_format : str list\n one of ['xml', 'xlsx', 'csv', and formats supported by astropy Table.write]\n verbose\n\n Returns\n -------\n\n TODO\n ----\n test support of astropy Table.write formats (FITS not working)\n\n\n \"\"\"\n if type(file_format) == str:\n file_format = [file_format]\n\n aperture_names = np.array([key for key in aperture_collection.apertures.keys()])\n instrument = aperture_collection.apertures[aperture_names[0]].InstrName\n\n if instrument == 'NIRCAM':\n instrument = 'NIRCam'\n elif instrument == 'NIRSPEC':\n instrument = 'NIRSpec'\n\n if (filename is not None) and (len(list(file_format)) != 1):\n raise RuntimeError('When filename is specified, only one output format is supported')\n\n if label is not None:\n name_seed = instrument + '_SIAF_{}'.format(label)\n else:\n name_seed = instrument + '_SIAF'\n\n filenames = []\n # hostname = os.uname()[1]\n username = os.getlogin()\n timestamp = Time.now()\n\n for file_format in list(file_format):\n if filename is None:\n if basepath is None:\n basepath = _JWST_TEMPORARY_ROOT\n if not os.path.isdir(basepath):\n raise RuntimeError(\"Could not write SIAF data \"\n \"to {}. Directory does not exist.\".format(basepath))\n if file_format == 'xml':\n out_filename = os.path.join(basepath, name_seed+'.xml')\n elif file_format == 'xlsx':\n out_filename = os.path.join(basepath, name_seed+'.xlsx')\n # elif file_format == 'csv':\n # out_filename = os.path.join(basepath, name_seed+'.csv')\n else:\n out_filename = os.path.join(basepath, name_seed+'.{}'.format(file_format))\n else:\n out_filename = filename\n\n if file_format == 'xml':\n root = ET.Element('SiafEntries')\n\n # add generation info as comment to SIAFXML\n root.append(ET.Comment('Generated {} {}'.format(timestamp.isot, timestamp.scale)))\n root.append(ET.Comment('by {}'.format(username)))\n # try:\n # repo = git.Repo(os.path.abspath(__file__), search_parent_directories=True)\n # git_version = git.Git(repo.working_dir).describe()\n # root.append(ET.Comment('pysiaf git-version {}'.format(git_version)))\n # except git.exc.InvalidGitRepositoryError:\n root.append(ET.Comment('pysiaf version {}'.format(__version__)))\n\n for aperture_name in aperture_names:\n\n aperture = aperture_collection.apertures[aperture_name]\n siaf_entry = ET.SubElement(root, 'SiafEntry')\n for attribute in PRD_REQUIRED_ATTRIBUTES_ORDERED:\n attribute_value = getattr(aperture_collection.apertures[aperture_name],\n attribute)\n if attribute_value is None:\n attribute_text = None\n\n # NIRSpec special case\n elif (aperture.AperType in ['TRANSFORM']) and \\\n (attribute in 'XSciRef YSciRef XSciScale YSciScale V2Ref V3Ref'.\n split()):\n attribute_text = '{:{prec}}'.format(attribute_value,\n prec='.15e').strip()\n elif attribute in FLOAT_ATTRIBUTES:\n attribute_text = '{:{prec}}'.format(\n attribute_value, prec=xml_decimal_precision[attribute]).strip()\n else:\n attribute_text = str(attribute_value)\n\n if (not isinstance(attribute_value, str)) and (attribute_text is not None):\n if np.isnan(attribute_value):\n attribute_text = None\n\n ET.SubElement(siaf_entry, attribute).text = attribute_text\n\n doc = ET.ElementTree(root)\n\n doc.write(out_filename, pretty_print=True, xml_declaration=False)\n if verbose:\n print('Wrote Siaf to xml file {}'.format(out_filename))\n\n elif file_format == 'xlsx':\n siaf_workbook = Workbook()\n\n ws1 = siaf_workbook.active\n ws1.title = 'SIAF'\n\n header_row_description = 1\n header_row_attributes = 2\n\n # write descriptive header\n for j, attribute_name in enumerate(PRD_REQUIRED_ATTRIBUTES_ORDERED):\n col = j + 1\n if attribute_name == 'InstrName':\n text = 'Aperture Basic Info'\n elif attribute_name == 'XDetSize':\n text = 'Detector Frame'\n elif attribute_name == 'XSciSize':\n text = 'Science Frame'\n elif attribute_name == 'V2Ref':\n text = 'V Frame'\n elif attribute_name == 'V2IdlYAngle':\n text = 'Frame Relationships'\n elif attribute_name == 'XIdlVert1':\n text = 'Vertices'\n elif attribute_name == 'Sci2IdlDeg':\n text = 'Science to Ideal Polynomial'\n else:\n text = ''\n\n cell = ws1.cell(column=col, row=header_row_description, value=\"{}\".format(text))\n cell.font = Font(name='Courier', b=True, i=True, family=3.0, sz=14.0)\n # cell.font.color = Color(rgb='FF0000FF', type='rgb')\n\n # write aperture attributes\n for j, attribute_name in enumerate(PRD_REQUIRED_ATTRIBUTES_ORDERED):\n col = j + 1\n cell = ws1.cell(column=col, row=header_row_attributes, value=\"{}\".\n format(attribute_name))\n cell.font = Font(name='Calibri', b=True, family=2.0, sz=15.0)\n cell.alignment = Alignment(horizontal='center')\n\n # write aperture values\n for i, aper_name in enumerate(aperture_names):\n aperture = aperture_collection.apertures[aper_name]\n # aperture = siaf[aper_name]\n\n row = i + 1 + header_row_attributes\n for j, attribute_name in enumerate(PRD_REQUIRED_ATTRIBUTES_ORDERED):\n col = j + 1\n cell = ws1.cell(column=col, row=row, value=\"{}\".\n format(getattr(aperture, attribute_name)))\n if attribute_name not in 'InstrName\tAperName DDCName AperType AperShape'.\\\n split():\n cell.alignment = Alignment(horizontal='right')\n\n # adjust column width\n for column_cells in ws1.columns:\n length = max(len(cell.value or '') for cell in column_cells[1:])\n ws1.column_dimensions[column_cells[0].column].width = length * 1.5\n siaf_workbook.save(filename=out_filename)\n if verbose:\n print('Wrote Siaf to xlsx file {}'.format(out_filename))\n\n else:\n table = Table()\n for attribute_name in PRD_REQUIRED_ATTRIBUTES_ORDERED:\n data = [getattr(aperture_collection.apertures[aperture_name], attribute_name) for\n aperture_name in aperture_names]\n table.add_column(Column(data=data, name=attribute_name))\n table.write(out_filename, format=file_format)\n if verbose:\n print('Wrote Siaf to {} file {}'.format(file_format, out_filename))\n\n filenames.append(out_filename)\n\n return filenames\n"
] | [
[
"numpy.isnan"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
vnarayan13/featuretools | [
"a86b6d8df246a13558d19915b15230c418ad27ab"
] | [
"featuretools/primitives/standard/aggregation_primitives.py"
] | [
"from __future__ import division\n\nfrom datetime import datetime, timedelta\n\nimport numpy as np\nimport pandas as pd\n\nfrom ..base.aggregation_primitive_base import (\n AggregationPrimitive,\n make_agg_primitive\n)\n\nfrom featuretools.variable_types import (\n Boolean,\n DatetimeTimeIndex,\n Discrete,\n Index,\n Numeric,\n Variable\n)\n\n\nclass Count(AggregationPrimitive):\n \"\"\"Counts the number of non null values.\"\"\"\n name = \"count\"\n input_types = [[Index]]\n return_type = Numeric\n stack_on_self = False\n default_value = 0\n\n def get_function(self):\n return 'count'\n\n def generate_name(self, base_feature_names, child_entity_id,\n parent_entity_id, where_str, use_prev_str):\n return u\"COUNT(%s%s%s)\" % (child_entity_id,\n where_str, use_prev_str)\n\n\nclass Sum(AggregationPrimitive):\n \"\"\"Sums elements of a numeric or boolean feature.\"\"\"\n name = \"sum\"\n input_types = [Numeric]\n return_type = Numeric\n stack_on_self = False\n stack_on_exclude = [Count]\n default_value = 0\n\n def get_function(self):\n return np.sum\n\n\nclass Mean(AggregationPrimitive):\n \"\"\"Computes the average value of a numeric feature.\n Defaults to not ignoring NaNs when computing mean.\n\n \"\"\"\n name = \"mean\"\n input_types = [Numeric]\n return_type = Numeric\n\n def __init__(self, skipna=True):\n self.skipna = skipna\n\n def get_function(self):\n if self.skipna:\n # np.mean of series is functionally nanmean\n return np.mean\n\n def mean(series):\n return np.mean(series.values)\n return mean\n\n def generate_name(self, base_feature_names, child_entity_id,\n parent_entity_id, where_str, use_prev_str):\n skipna = \"\"\n if not self.skipna:\n skipna = \", skipna=False\"\n base_features_str = \", \".join(base_feature_names)\n return u\"%s(%s.%s%s%s%s)\" % (self.name.upper(),\n child_entity_id,\n base_features_str,\n where_str,\n use_prev_str,\n skipna)\n\n\nclass Mode(AggregationPrimitive):\n \"\"\"Finds the most common element in a categorical feature.\"\"\"\n name = \"mode\"\n input_types = [Discrete]\n return_type = None\n\n def get_function(self):\n def pd_mode(s):\n return s.mode().get(0, np.nan)\n return pd_mode\n\n\nMin = make_agg_primitive(\n np.min,\n [Numeric],\n Numeric,\n name=\"Min\",\n stack_on_self=False,\n description=\"Finds the minimum non-null value of a numeric feature.\")\n\n\nclass Max(AggregationPrimitive):\n \"\"\"Finds the maximum non-null value of a numeric feature.\"\"\"\n name = \"max\"\n input_types = [Numeric]\n return_type = Numeric\n stack_on_self = False\n\n def get_function(self):\n return np.max\n\n\nclass NUnique(AggregationPrimitive):\n \"\"\"Returns the number of unique categorical variables.\"\"\"\n name = \"num_unique\"\n input_types = [Discrete]\n return_type = Numeric\n stack_on_self = False\n\n def get_function(self):\n # note: returning pd.Series.nunique errors for python2,\n # so using this branching code path while we support python2\n from sys import version_info\n if version_info.major < 3:\n def nunique(x):\n return pd.Series(x).nunique()\n return nunique\n else:\n return pd.Series.nunique\n\n\nclass NumTrue(AggregationPrimitive):\n \"\"\"Finds the number of 'True' values in a boolean.\"\"\"\n name = \"num_true\"\n input_types = [Boolean]\n return_type = Numeric\n default_value = 0\n stack_on = []\n stack_on_exclude = []\n\n def get_function(self):\n return np.sum\n\n\nclass PercentTrue(AggregationPrimitive):\n \"\"\"Finds the percent of 'True' values in a boolean feature.\"\"\"\n name = \"percent_true\"\n input_types = [Boolean]\n return_type = Numeric\n stack_on = []\n stack_on_exclude = []\n default_value = 0\n\n def get_function(self):\n def percent_true(s):\n return s.fillna(0).mean()\n return percent_true\n\n\nclass NMostCommon(AggregationPrimitive):\n \"\"\"Finds the N most common elements in a categorical feature.\"\"\"\n name = \"n_most_common\"\n input_types = [Discrete]\n return_type = Discrete\n\n def __init__(self, n=3):\n self.number_output_features = n\n\n def get_function(self):\n def n_most_common(x, n=self.number_output_features):\n array = np.array(x.value_counts()[:n].index)\n if len(array) < n:\n filler = np.full(n - len(array), np.nan)\n array = np.append(array, filler)\n return array\n return n_most_common\n\n\nclass AvgTimeBetween(AggregationPrimitive):\n \"\"\"Computes the average time between consecutive events.\n\n Note: equivalent to Mean(Diff(time_index)), but more performant\n \"\"\"\n\n # Potentially unnecessary if we add an trans_feat that\n # calculates the difference between events. DFS\n # should then calculate the average of that trans_feat\n # which amounts to AvgTimeBetween\n name = \"avg_time_between\"\n input_types = [DatetimeTimeIndex]\n return_type = Numeric\n\n def get_function(self):\n def pd_avg_time_between(x):\n \"\"\"Assumes time scales are closer to order\n of seconds than to nanoseconds\n if times are much closer to nanoseconds\n we could get some floating point errors\n\n this can be fixed with another function\n that calculates the mean before converting\n to seconds\n \"\"\"\n x = x.dropna()\n if x.shape[0] < 2:\n return np.nan\n if isinstance(x.iloc[0], (pd.Timestamp, datetime)):\n x = x.astype('int64')\n # use len(x)-1 because we care about difference\n # between values, len(x)-1 = len(diff(x))\n\n avg = (x.max() - x.min()) / (len(x) - 1)\n avg = avg * 1e-9\n\n # long form:\n # diff_in_ns = x.diff().iloc[1:].astype('int64')\n # diff_in_seconds = diff_in_ns * 1e-9\n # avg = diff_in_seconds.mean()\n return avg\n return pd_avg_time_between\n\n\nclass Median(AggregationPrimitive):\n \"\"\"Finds the median value of any feature with well-ordered values.\"\"\"\n name = \"median\"\n input_types = [Numeric]\n return_type = Numeric\n\n def get_function(self):\n return lambda x: x.median()\n\n\nclass Skew(AggregationPrimitive):\n \"\"\"Computes the skewness of a data set.\n\n For normally distributed data, the skewness should be about 0. A skewness\n value > 0 means that there is more weight in the left tail of the\n distribution.\n \"\"\"\n name = \"skew\"\n input_types = [Numeric]\n return_type = Numeric\n stack_on = []\n stack_on_self = False\n\n def get_function(self):\n return 'skew'\n\n\nclass Std(AggregationPrimitive):\n \"\"\"Finds the standard deviation of a numeric feature ignoring null values.\n \"\"\"\n name = \"std\"\n input_types = [Numeric]\n return_type = Numeric\n stack_on_self = False\n\n def get_function(self):\n return np.std\n\n\nclass Last(AggregationPrimitive):\n \"\"\"Returns the last value.\"\"\"\n name = \"last\"\n input_types = [Variable]\n return_type = None\n stack_on_self = False\n\n def get_function(self):\n def pd_last(x):\n return x.iloc[-1]\n return pd_last\n\n\nclass Any(AggregationPrimitive):\n \"\"\"Test if any value is 'True'.\"\"\"\n name = \"any\"\n input_types = [Boolean]\n return_type = Boolean\n stack_on_self = False\n\n def get_function(self):\n return np.any\n\n\nclass All(AggregationPrimitive):\n \"\"\"Test if all values are 'True'.\"\"\"\n name = \"all\"\n input_types = [Boolean]\n return_type = Boolean\n stack_on_self = False\n\n def get_function(self):\n return np.all\n\n\nclass TimeSinceLast(AggregationPrimitive):\n \"\"\"Time since last related instance.\"\"\"\n name = \"time_since_last\"\n input_types = [DatetimeTimeIndex]\n return_type = Numeric\n uses_calc_time = True\n\n def get_function(self):\n\n def time_since_last(values, time=None):\n time_since = time - values.iloc[-1]\n return time_since.total_seconds()\n\n return time_since_last\n\n\nclass TimeSinceFirst(AggregationPrimitive):\n \"\"\"Time since first related instance.\"\"\"\n name = \"time_since_first\"\n input_types = [DatetimeTimeIndex]\n return_type = Numeric\n uses_calc_time = True\n\n def get_function(self):\n\n def time_since_first(values, time=None):\n time_since = time - values.iloc[0]\n return time_since.total_seconds()\n\n return time_since_first\n\n\nclass Trend(AggregationPrimitive):\n \"\"\"Calculates the slope of the linear trend of variable overtime.\"\"\"\n name = \"trend\"\n input_types = [Numeric, DatetimeTimeIndex]\n return_type = Numeric\n\n def get_function(self):\n def pd_trend(y, x):\n df = pd.DataFrame({\"x\": x, \"y\": y}).dropna()\n if df.shape[0] <= 2:\n return np.nan\n if isinstance(df['x'].iloc[0], (datetime, pd.Timestamp)):\n x = convert_datetime_to_floats(df['x'])\n else:\n x = df['x'].values\n\n if isinstance(df['y'].iloc[0], (datetime, pd.Timestamp)):\n y = convert_datetime_to_floats(df['y'])\n elif isinstance(df['y'].iloc[0], (timedelta, pd.Timedelta)):\n y = convert_timedelta_to_floats(df['y'])\n else:\n y = df['y'].values\n\n x = x - x.mean()\n y = y - y.mean()\n\n # prevent divide by zero error\n if len(np.unique(x)) == 1:\n return 0\n\n # consider scipy.stats.linregress for large n cases\n coefficients = np.polyfit(x, y, 1)\n\n return coefficients[0]\n return pd_trend\n\n\ndef convert_datetime_to_floats(x):\n first = int(x.iloc[0].value * 1e-9)\n x = pd.to_numeric(x).astype(np.float64).values\n dividend = find_dividend_by_unit(first)\n x *= (1e-9 / dividend)\n return x\n\n\ndef convert_timedelta_to_floats(x):\n first = int(x.iloc[0].total_seconds())\n dividend = find_dividend_by_unit(first)\n x = pd.TimedeltaIndex(x).total_seconds().astype(np.float64) / dividend\n return x\n\n\ndef find_dividend_by_unit(time):\n \"\"\"Finds whether time best corresponds to a value in\n days, hours, minutes, or seconds.\n \"\"\"\n for dividend in [86400, 3600, 60]:\n div = time / dividend\n if round(div) == div:\n return dividend\n return 1\n"
] | [
[
"numpy.polyfit",
"pandas.Series",
"pandas.TimedeltaIndex",
"numpy.unique",
"pandas.DataFrame",
"numpy.append",
"numpy.mean",
"pandas.to_numeric"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
yardenas/meta-learning-tutorial | [
"c5154eae85f6255f58fe6028ab630e3499238b3a"
] | [
"omniglot_dataset.py"
] | [
"from typing import Iterator, List, Tuple\n\nimport os\nimport random\n\nimport numpy as np\n\nfrom tensorflow import data as tfd\nfrom tensorflow import image as tfi\nfrom tensorflow import io as tfio\nfrom tensorflow import dtypes\nimport tensorflow as tf\n\nfrom google_drive_downloader import GoogleDriveDownloader\n\n\nclass Omniglot:\n\n def __init__(self,\n meta_batch_size: int,\n num_classes: int,\n num_samples_per_class: int,\n seed: int = 666):\n self.meta_batch_size = meta_batch_size\n self.num_samples_per_class = num_samples_per_class\n self.num_classes = num_classes\n self.seed = seed\n if not os.path.isdir('./omniglot_resized'):\n GoogleDriveDownloader.download_file_from_google_drive(\n file_id='1iaSFXIYC3AB8q9K_M-oVMa4pmB7yKMtI',\n dest_path='./omniglot_resized.zip',\n unzip=True)\n\n data_folder = './omniglot_resized'\n self.img_size = 28, 28\n\n character_folders = [\n os.path.join(data_folder, family, character)\n for family in os.listdir(data_folder)\n if os.path.isdir(os.path.join(data_folder, family))\n for character in os.listdir(os.path.join(data_folder, family))\n if os.path.isdir(os.path.join(data_folder, family, character))\n ]\n\n random.seed(1)\n random.shuffle(character_folders)\n num_val = 100\n num_train = 1100\n self.metatrain = self._make_dataset(character_folders[:num_train])\n self.metaval = self._make_dataset(character_folders[num_train:num_train +\n num_val])\n self.metatest = self._make_dataset(character_folders[num_train + num_val:])\n\n @property\n def train_set(\n self\n ) -> Iterator[Tuple[Tuple[np.ndarray, np.ndarray], Tuple[np.ndarray,\n np.ndarray]]]:\n yield from self.metatrain.as_numpy_iterator()\n\n @property\n def eval_set(\n self\n ) -> Iterator[Tuple[Tuple[np.ndarray, np.ndarray], Tuple[np.ndarray,\n np.ndarray]]]:\n yield from self.metaval.as_numpy_iterator()\n\n @property\n def test_set(\n self\n ) -> Iterator[Tuple[Tuple[np.ndarray, np.ndarray], Tuple[np.ndarray,\n np.ndarray]]]:\n yield from self.metatest.as_numpy_iterator()\n\n def _make_dataset(self, folders: List[str]) -> tfd.Dataset:\n characters = tfd.Dataset.from_tensor_slices(folders).shuffle(\n 1100, seed=self.seed, reshuffle_each_iteration=True)\n\n def get_images_filenames(char):\n all_images = tfio.matching_files(char + '/*.png')\n return tfd.Dataset.from_tensor_slices(\n tf.random.shuffle(all_images,\n seed=self.seed)[:self.num_samples_per_class + 1])\n\n # Use interleave to read the relevant .png files as we iterate through the\n # 1100 different chars. Set block_length to num_samples_per_class so that\n # we can next batch images from same char together.\n image_filenames = characters.interleave(\n get_images_filenames,\n num_parallel_calls=tfd.AUTOTUNE,\n block_length=self.num_samples_per_class + 1).repeat()\n\n def load_image(image_filename):\n img = tfio.read_file(image_filename)\n img = tfio.decode_png(img, channels=1)\n img = tfi.resize(img, self.img_size)\n img = tf.cast(img, dtypes.float32) / 255.0\n img = 1.0 - img\n return img\n\n # Unbatch map and batch to allow tf to read images concurrently. Class\n # grouping is maintained.\n shots = image_filenames.map(\n load_image,\n num_parallel_calls=tfd.AUTOTUNE).batch(self.num_samples_per_class + 1)\n ways = shots.batch(self.num_classes)\n tasks = ways.batch(self.meta_batch_size)\n\n def to_support_and_query_sets(batch):\n support_x, query_x = tf.split(\n tf.transpose(batch, (0, 2, 1, 3, 4, 5)),\n (self.num_samples_per_class, 1),\n axis=1)\n support_y, query_y = tf.split(\n tf.eye(\n self.num_classes,\n batch_shape=(self.meta_batch_size,\n self.num_samples_per_class + 1)),\n (self.num_samples_per_class, 1),\n axis=1)\n ids = tf.range(0, self.num_classes, dtype=dtypes.int32)\n ids = tf.random.shuffle(ids, seed=self.seed)\n query_x = tf.gather(query_x, ids, axis=2)\n query_y = tf.gather(query_y, ids, axis=2)\n new_shape = lambda x: tf.concat([(self.meta_batch_size, -1),\n tf.shape(x)[3:]], 0)\n reshape = lambda x: tf.reshape(x, new_shape(x))\n return (reshape(support_x), reshape(support_y)), (reshape(query_x),\n reshape(query_y))\n\n return tasks.map(\n to_support_and_query_sets,\n num_parallel_calls=tfd.AUTOTUNE).prefetch(tfd.AUTOTUNE)\n"
] | [
[
"tensorflow.transpose",
"tensorflow.range",
"tensorflow.shape",
"tensorflow.io.decode_png",
"tensorflow.data.Dataset.from_tensor_slices",
"tensorflow.cast",
"tensorflow.eye",
"tensorflow.gather",
"tensorflow.random.shuffle",
"tensorflow.io.matching_files",
"tensorflow.image.resize",
"tensorflow.io.read_file"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
zyxwvu321/Classifer_SSL_Longtail | [
"e6c09414c49e695b0f4221a3c6245ae3929a1788",
"e6c09414c49e695b0f4221a3c6245ae3929a1788",
"e6c09414c49e695b0f4221a3c6245ae3929a1788"
] | [
"modeling/backbones/senet.py",
"utils/parse_meta.py",
"preproc_segloc.py"
] | [
"\"\"\"\nResNet code gently borrowed from\nhttps://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py\n\"\"\"\nfrom __future__ import print_function, division, absolute_import\nfrom collections import OrderedDict\nimport math\nimport torch\nimport torch.nn as nn\nfrom torch.utils import model_zoo\n\n__all__ = ['SENet', 'senet154', 'se_resnet50', 'se_resnet101', 'se_resnet152',\n 'se_resnext50_32x4d', 'se_resnext101_32x4d']\n\npretrained_settings = {\n 'senet154': {\n 'imagenet': {\n 'url': 'http://data.lip6.fr/cadene/pretrainedmodels/senet154-c7b49a05.pth',\n 'input_space': 'RGB',\n 'input_size': [3, 224, 224],\n 'input_range': [0, 1],\n 'mean': [0.485, 0.456, 0.406],\n 'std': [0.229, 0.224, 0.225],\n 'num_classes': 1000\n }\n },\n 'se_resnet50': {\n 'imagenet': {\n 'url': 'http://data.lip6.fr/cadene/pretrainedmodels/se_resnet50-ce0d4300.pth',\n 'input_space': 'RGB',\n 'input_size': [3, 224, 224],\n 'input_range': [0, 1],\n 'mean': [0.485, 0.456, 0.406],\n 'std': [0.229, 0.224, 0.225],\n 'num_classes': 1000\n }\n },\n 'se_resnet101': {\n 'imagenet': {\n 'url': 'http://data.lip6.fr/cadene/pretrainedmodels/se_resnet101-7e38fcc6.pth',\n 'input_space': 'RGB',\n 'input_size': [3, 224, 224],\n 'input_range': [0, 1],\n 'mean': [0.485, 0.456, 0.406],\n 'std': [0.229, 0.224, 0.225],\n 'num_classes': 1000\n }\n },\n 'se_resnet152': {\n 'imagenet': {\n 'url': 'http://data.lip6.fr/cadene/pretrainedmodels/se_resnet152-d17c99b7.pth',\n 'input_space': 'RGB',\n 'input_size': [3, 224, 224],\n 'input_range': [0, 1],\n 'mean': [0.485, 0.456, 0.406],\n 'std': [0.229, 0.224, 0.225],\n 'num_classes': 1000\n }\n },\n 'se_resnext50_32x4d': {\n 'imagenet': {\n 'url': 'http://data.lip6.fr/cadene/pretrainedmodels/se_resnext50_32x4d-a260b3a4.pth',\n 'input_space': 'RGB',\n 'input_size': [3, 224, 224],\n 'input_range': [0, 1],\n 'mean': [0.485, 0.456, 0.406],\n 'std': [0.229, 0.224, 0.225],\n 'num_classes': 1000\n }\n },\n 'se_resnext101_32x4d': {\n 'imagenet': {\n 'url': 'http://data.lip6.fr/cadene/pretrainedmodels/se_resnext101_32x4d-3b2fe3d8.pth',\n 'input_space': 'RGB',\n 'input_size': [3, 224, 224],\n 'input_range': [0, 1],\n 'mean': [0.485, 0.456, 0.406],\n 'std': [0.229, 0.224, 0.225],\n 'num_classes': 1000\n }\n },\n}\n\n\nclass SEModule(nn.Module):\n\n def __init__(self, channels, reduction):\n super(SEModule, self).__init__()\n self.avg_pool = nn.AdaptiveAvgPool2d(1)\n self.fc1 = nn.Conv2d(channels, channels // reduction, kernel_size=1,\n padding=0)\n self.relu = nn.ReLU(inplace=True)\n self.fc2 = nn.Conv2d(channels // reduction, channels, kernel_size=1,\n padding=0)\n self.sigmoid = nn.Sigmoid()\n\n def forward(self, x):\n module_input = x\n x = self.avg_pool(x)\n x = self.fc1(x)\n x = self.relu(x)\n x = self.fc2(x)\n x = self.sigmoid(x)\n return module_input * x\n\n\nclass Bottleneck(nn.Module):\n \"\"\"\n Base class for bottlenecks that implements `forward()` method.\n \"\"\"\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out = self.se_module(out) + residual\n out = self.relu(out)\n\n return out\n\n\nclass SEBottleneck(Bottleneck):\n \"\"\"\n Bottleneck for SENet154.\n \"\"\"\n expansion = 4\n\n def __init__(self, inplanes, planes, groups, reduction, stride=1,\n downsample=None):\n super(SEBottleneck, self).__init__()\n self.conv1 = nn.Conv2d(inplanes, planes * 2, kernel_size=1, bias=False)\n self.bn1 = nn.BatchNorm2d(planes * 2)\n self.conv2 = nn.Conv2d(planes * 2, planes * 4, kernel_size=3,\n stride=stride, padding=1, groups=groups,\n bias=False)\n self.bn2 = nn.BatchNorm2d(planes * 4)\n self.conv3 = nn.Conv2d(planes * 4, planes * 4, kernel_size=1,\n bias=False)\n self.bn3 = nn.BatchNorm2d(planes * 4)\n self.relu = nn.ReLU(inplace=True)\n self.se_module = SEModule(planes * 4, reduction=reduction)\n self.downsample = downsample\n self.stride = stride\n\n\nclass SEResNetBottleneck(Bottleneck):\n \"\"\"\n ResNet bottleneck with a Squeeze-and-Excitation module. It follows Caffe\n implementation and uses `stride=stride` in `conv1` and not in `conv2`\n (the latter is used in the torchvision implementation of ResNet).\n \"\"\"\n expansion = 4\n\n def __init__(self, inplanes, planes, groups, reduction, stride=1,\n downsample=None):\n super(SEResNetBottleneck, self).__init__()\n self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False,\n stride=stride)\n self.bn1 = nn.BatchNorm2d(planes)\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, padding=1,\n groups=groups, bias=False)\n self.bn2 = nn.BatchNorm2d(planes)\n self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)\n self.bn3 = nn.BatchNorm2d(planes * 4)\n self.relu = nn.ReLU(inplace=True)\n self.se_module = SEModule(planes * 4, reduction=reduction)\n self.downsample = downsample\n self.stride = stride\n\n\nclass SEResNeXtBottleneck(Bottleneck):\n \"\"\"\n ResNeXt bottleneck type C with a Squeeze-and-Excitation module.\n \"\"\"\n expansion = 4\n\n def __init__(self, inplanes, planes, groups, reduction, stride=1,\n downsample=None, base_width=4):\n super(SEResNeXtBottleneck, self).__init__()\n width = math.floor(planes * (base_width / 64)) * groups\n self.conv1 = nn.Conv2d(inplanes, width, kernel_size=1, bias=False,\n stride=1)\n self.bn1 = nn.BatchNorm2d(width)\n self.conv2 = nn.Conv2d(width, width, kernel_size=3, stride=stride,\n padding=1, groups=groups, bias=False)\n self.bn2 = nn.BatchNorm2d(width)\n self.conv3 = nn.Conv2d(width, planes * 4, kernel_size=1, bias=False)\n self.bn3 = nn.BatchNorm2d(planes * 4)\n self.relu = nn.ReLU(inplace=True)\n self.se_module = SEModule(planes * 4, reduction=reduction)\n self.downsample = downsample\n self.stride = stride\n\n\nclass SENet(nn.Module):\n\n def __init__(self, block, layers, groups, reduction, dropout_p=0.2,\n inplanes=128, input_3x3=True, downsample_kernel_size=3,\n downsample_padding=1, last_stride=2, last2_stride = 2):\n \"\"\"\n Parameters\n ----------\n block (nn.Module): Bottleneck class.\n - For SENet154: SEBottleneck\n - For SE-ResNet models: SEResNetBottleneck\n - For SE-ResNeXt models: SEResNeXtBottleneck\n layers (list of ints): Number of residual blocks for 4 layers of the\n network (layer1...layer4).\n groups (int): Number of groups for the 3x3 convolution in each\n bottleneck block.\n - For SENet154: 64\n - For SE-ResNet models: 1\n - For SE-ResNeXt models: 32\n reduction (int): Reduction ratio for Squeeze-and-Excitation modules.\n - For all models: 16\n dropout_p (float or None): Drop probability for the Dropout layer.\n If `None` the Dropout layer is not used.\n - For SENet154: 0.2\n - For SE-ResNet models: None\n - For SE-ResNeXt models: None\n inplanes (int): Number of input channels for layer1.\n - For SENet154: 128\n - For SE-ResNet models: 64\n - For SE-ResNeXt models: 64\n input_3x3 (bool): If `True`, use three 3x3 convolutions instead of\n a single 7x7 convolution in layer0.\n - For SENet154: True\n - For SE-ResNet models: False\n - For SE-ResNeXt models: False\n downsample_kernel_size (int): Kernel size for downsampling convolutions\n in layer2, layer3 and layer4.\n - For SENet154: 3\n - For SE-ResNet models: 1\n - For SE-ResNeXt models: 1\n downsample_padding (int): Padding for downsampling convolutions in\n layer2, layer3 and layer4.\n - For SENet154: 1\n - For SE-ResNet models: 0\n - For SE-ResNeXt models: 0\n num_classes (int): Number of outputs in `last_linear` layer.\n - For all models: 1000\n \"\"\"\n super(SENet, self).__init__()\n self.inplanes = inplanes\n if input_3x3:\n layer0_modules = [\n ('conv1', nn.Conv2d(3, 64, 3, stride=2, padding=1,\n bias=False)),\n ('bn1', nn.BatchNorm2d(64)),\n ('relu1', nn.ReLU(inplace=True)),\n ('conv2', nn.Conv2d(64, 64, 3, stride=1, padding=1,\n bias=False)),\n ('bn2', nn.BatchNorm2d(64)),\n ('relu2', nn.ReLU(inplace=True)),\n ('conv3', nn.Conv2d(64, inplanes, 3, stride=1, padding=1,\n bias=False)),\n ('bn3', nn.BatchNorm2d(inplanes)),\n ('relu3', nn.ReLU(inplace=True)),\n ]\n else:\n layer0_modules = [\n ('conv1', nn.Conv2d(3, inplanes, kernel_size=7, stride=2,\n padding=3, bias=False)),\n ('bn1', nn.BatchNorm2d(inplanes)),\n ('relu1', nn.ReLU(inplace=True)),\n ]\n # To preserve compatibility with Caffe weights `ceil_mode=True`\n # is used instead of `padding=1`.\n layer0_modules.append(('pool', nn.MaxPool2d(3, stride=2,\n ceil_mode=True)))\n self.layer0 = nn.Sequential(OrderedDict(layer0_modules))\n self.layer1 = self._make_layer(\n block,\n planes=64,\n blocks=layers[0],\n groups=groups,\n reduction=reduction,\n downsample_kernel_size=1,\n downsample_padding=0\n )\n self.layer2 = self._make_layer(\n block,\n planes=128,\n blocks=layers[1],\n stride=2,\n groups=groups,\n reduction=reduction,\n downsample_kernel_size=downsample_kernel_size,\n downsample_padding=downsample_padding\n )\n self.layer3 = self._make_layer(\n block,\n planes=256,\n blocks=layers[2],\n stride=last2_stride,\n groups=groups,\n reduction=reduction,\n downsample_kernel_size=downsample_kernel_size,\n downsample_padding=downsample_padding\n )\n self.layer4 = self._make_layer(\n block,\n planes=512,\n blocks=layers[3],\n stride=last_stride,\n groups=groups,\n reduction=reduction,\n downsample_kernel_size=downsample_kernel_size,\n downsample_padding=downsample_padding\n )\n self.avg_pool = nn.AvgPool2d(7, stride=1)\n self.dropout = nn.Dropout(dropout_p) if dropout_p is not None else None\n\n def _make_layer(self, block, planes, blocks, groups, reduction, stride=1,\n downsample_kernel_size=1, downsample_padding=0):\n downsample = None\n if stride != 1 or self.inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n nn.Conv2d(self.inplanes, planes * block.expansion,\n kernel_size=downsample_kernel_size, stride=stride,\n padding=downsample_padding, bias=False),\n nn.BatchNorm2d(planes * block.expansion),\n )\n\n layers = []\n layers.append(block(self.inplanes, planes, groups, reduction, stride,\n downsample))\n self.inplanes = planes * block.expansion\n for i in range(1, blocks):\n layers.append(block(self.inplanes, planes, groups, reduction))\n\n return nn.Sequential(*layers)\n \n def load_param(self, model_path):\n param_dict = torch.load(model_path)\n for i in param_dict:\n if 'last_linear' in i:\n continue\n self.state_dict()[i].copy_(param_dict[i])\n\n def forward(self, x):\n x = self.layer0(x)\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n return x",
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jan 7 15:17:20 2020\n\n@author: minjie\n\"\"\"\nimport numpy as np\nimport math\ndef parse_age(age_list, th_list=np.arange(5,90,5)): \n #parse age to +1/-1/0 0(NaN)\n \n age_list = np.array(age_list)\n n_sample = len(age_list)\n n_dim = len(th_list)\n th_list = np.array(th_list).astype('float32')\n \n meta_age = np.zeros((n_sample,n_dim),dtype = 'float32')\n \n for idx,age in enumerate(age_list):\n age =float(age)\n if age ==0.0 or math.isnan(age) is True:\n pass\n else:\n idx_th = np.where(th_list<age)[0]\n if len(idx_th)>0:\n idx_th = idx_th[-1]+1\n meta_age[idx][:idx_th] = 1\n meta_age[idx][idx_th:] = -1\n else:\n meta_age[idx][:] = -1\n \n return meta_age\n \n \n \ndef parse_sex(sex_list): \n #parse age to +1(male)/-1(female)/0 0(NaN)\n \n sex_list = np.array(sex_list)\n n_sample = len(sex_list)\n \n\n meta_sex = np.zeros((n_sample,1),dtype = 'float32')\n \n for idx,sex in enumerate(sex_list):\n if isinstance(sex,str): \n if sex.lower() =='male':\n meta_sex[idx] = 1\n elif sex.lower() =='female':\n meta_sex[idx] = -1\n else: \n raise ValueError('wrong sex input')\n elif math.isnan(sex) is True:\n pass\n else:\n raise ValueError('wrong pos input')\n \n \n return meta_sex\n\n\n\n\ndef parse_pos(pos_list, all_pos): \n #parse pos to +1(is the pos)/ 0(not the position) \n \n pos_list = np.array(pos_list)\n n_sample = len(pos_list)\n \n n_dim = len(all_pos)\n meta_pos = np.zeros((n_sample,n_dim),dtype = 'float32')\n \n for idx,pos in enumerate(pos_list):\n \n if isinstance(pos,str):\n \n pos = pos.lower()\n if pos in all_pos:\n # in ISIC19, there are more position ,need fix\n pos_idx = all_pos.index(pos) \n meta_pos[idx][pos_idx] = 1\n else: # if meta info is a superset of given pos\n pos_out = []\n for idx,pp in enumerate(all_pos):\n if pp in pos:\n pos_out.append(idx)\n if len(pos_out)==1:\n meta_pos[idx][pos_out[0]] = 1\n elif len(pos_out)==0:\n pass\n else:\n raise ValueError('two match')\n \n elif math.isnan(pos) is True:\n pass\n else:\n raise ValueError('wrong pos input')\n \n return meta_pos\n\n\ndef parse_boxsz(hw, box_info):\n assert hw.shape[0]==box_info.shape[0], 'parse_boxsz, sample not match'\n \n hw = hw.astype('float32')\n box_info = box_info.astype('float32')\n boxsz = np.zeros((hw.shape[0], 2), dtype = 'float32')\n \n \n boxsz[:,0] = (box_info[:,3] - box_info[:,1])/hw[:,0]\n boxsz[:,1] = (box_info[:,2] - box_info[:,0])/hw[:,1]\n return boxsz\n \n \ndef parse_kpds(kp_aug,hw_in,hw_out):\n hin,win = hw_in\n hout,wout = hw_out\n #points = [[ww/2.0,hh/2.0,1.0],[0.0,0.0,1.0]]\n pt_center, pt_corner = kp_aug\n \n \n \n d_in = math.sqrt((hin/2.0)**2+(win/2.0)**2)\n d_out = math.sqrt((hout/2.0)**2+(wout/2.0)**2)\n \n d_in_t = math.sqrt((pt_center[0]-pt_corner[0])**2+(pt_center[1]-pt_corner[0])**2)\n \n ss = d_out/d_in_t - 0.7\n \n \n d_c = math.sqrt((pt_center[0]-hout/2.0)**2+(pt_center[1]-wout/2.0)**2)\n d_c_ori = d_c / pt_center[2] # scale adjustment\n \n dc_sft = d_c_ori/d_in\n \n\n \n\n return (dc_sft,ss)",
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Dec 28 22:11:41 2019\n\n@author: cmj\n\"\"\"\n\nfrom pathlib import Path\nimport numpy as np\nimport cv2\nfrom tqdm import tqdm\nimport pandas as pd\nfd = 'D:/dataset/ISIC/ISIC07_task1_train'\nfd_img = 'ISIC-2017_Training_Data'\nfd_anno = 'ISIC-2017_Training_Part1_GroundTruth'\nfd_box = 'ISIC-2017_Box'\nfd_imgt = 'ISIC-2017_Img'\n\nout_wh = (512,512)\n\nfd_img = Path(fd)/fd_img\nfd_anno = Path(fd)/fd_anno\nfd_box = Path(fd)/fd_box\nfd_imgt = Path(fd)/fd_imgt\n\n\nfd_box.mkdir(parents = True,exist_ok = True)\nfd_imgt.mkdir(parents = True,exist_ok = True)\n\n\nflist = sorted(list(fd_img.glob('*.jpg')))\n\n\n\npos_infos = list()\n\nfor fn in tqdm(flist):\n img = cv2.imread(str(fn))\n \n fn_mask = str(Path(fd_anno)/fn.name).replace('.jpg','_segmentation.png')\n img_mask = cv2.imread(str(fn_mask))[...,0]\n \n maskpos = np.where(img_mask)\n \n ymin,ymax = maskpos[0].min(),maskpos[0].max()\n xmin,xmax = maskpos[1].min(),maskpos[1].max()\n \n im_w,im_h = img.shape[1],img.shape[0]\n \n img_t = cv2.resize(img,out_wh)\n \n \n xx = (xmin+xmax)/2.0\n yy = (ymin+ymax)/2.0\n ww = xmax - xmin + 1\n hh = ymax - ymin + 1\n \n pos_infos.append(np.array([fn.name,img_mask.shape[1],img_mask.shape[0],round(xx),round(yy),ww,hh]))\n \n xx = round(xx * out_wh[0]/float(im_w))\n ww = round(ww * out_wh[0]/float(im_w))\n yy = round(yy * out_wh[1]/float(im_h))\n hh = round(hh * out_wh[1]/float(im_h))\n \n fn_imgt = str(fd_imgt/fn.name)\n fn_txt = str(fd_box/fn.name).replace('.jpg','.txt')\n \n cv2.imwrite(fn_imgt,img_t)\n \n np.savetxt(fn_txt,np.array([xx,yy,ww,hh])[None,...], fmt = '%d')\n \n \n\npos_infos = np.array(pos_infos)\n\ndf = pd.DataFrame(data = pos_infos,columns=['fname','x','y','boxx','boxy','boxw','boxh'])\ndf.to_csv(str(Path(fd)/'info.csv'))\n\n\n\n"
] | [
[
"torch.nn.Sequential",
"torch.nn.Dropout",
"torch.load",
"torch.nn.Conv2d",
"torch.nn.Sigmoid",
"torch.nn.MaxPool2d",
"torch.nn.AvgPool2d",
"torch.nn.AdaptiveAvgPool2d",
"torch.nn.BatchNorm2d",
"torch.nn.ReLU"
],
[
"numpy.arange",
"numpy.array",
"numpy.zeros",
"numpy.where"
],
[
"numpy.array",
"numpy.where",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
emirhanai/Machine-Learning-Prediction-Software-Based-on-Classification-and-Regression-Based-on-Processor-CPU- | [
"051be998eb9195dccf28c2e7607ead0812c79cf1"
] | [
"Machine Learning Prediction Software Based on Classification and Regression Based on Processor [CPU] Specifications.py"
] | [
"import pandas as pd\r\nimport numpy as np\r\nfrom sklearn.tree import *\r\nfrom sklearn.ensemble import *\r\nfrom sklearn.preprocessing import *\r\nfrom sklearn.model_selection import *\r\nfrom sklearn.metrics import *\r\n\r\n\r\ndata = pd.read_csv('data.csv')\r\n\r\nX = data.drop(['Company','Processor Name'],axis='columns')\r\ny = data.drop(['Turbo Speed (GHz)','Processor Name','Processor Cores','Processor Threads','Typical TDP (W)','Average CPU Mark'],axis='columns')\r\n\r\n#load of change function for columns changing.\r\ny_data = LabelEncoder()\r\n\r\n#print(y)\r\n\r\ny['Company_Change'] = y_data.fit_transform(y['Company'])\r\n\r\ny_update_data = y.drop(['Company'],axis='columns')\r\n\r\nfloat_y_update_data = np.float64(y_update_data)\r\n\r\n#print(float_y_update_data)\r\n\r\n#for i in np.arange(0,1,1):\r\n\r\n#X_train,X_test,y_train and y_test files of creating (with suitable parameters).\r\nX_train, X_test, y_train, y_test = train_test_split(X, y_update_data, test_size=0.2, random_state=15, shuffle=True,\r\n stratify=None)\r\n# model - processor classifier\r\nmodel_processor = ExtraTreeClassifier(criterion=\"gini\", splitter=\"random\")\r\n\r\n# model - processor regression\r\nmodel_processor_regression = ExtraTreesRegressor(n_estimators=1)\r\n\r\n# model - processor fit\r\nmodel_processor_regression.fit(X_train, y_train)\r\n\r\n# model - processor classifier fit\r\nmodel_processor.fit(X_train, y_train)\r\n\r\n# \"\"CLASSIFIER OF SCORE AND RESULT\"\"\r\n\r\n# model - processor classifier y_pred\r\n\r\ny_pred_of_model = model_processor.predict(X_test)\r\n\r\n# model classifier score of result\r\n# print(\"Select of X {} \".format(i))\r\nprint(\"Classifier Accuracy Score: {} \".format(accuracy_score(y_test,y_pred_of_model)))\r\nprint(\"Classifier Precision Score: {} \".format(precision_score(y_test,y_pred_of_model)))\r\nprint(\"Classifier Recall Score: {} \".format(recall_score(y_test,y_pred_of_model)))\r\nprint(\"Classifier F1 Score: {} \".format(f1_score(y_test,y_pred_of_model)))\r\na,b,_ = roc_curve(y_test,y_pred_of_model)\r\nprint(\"Classifier AUC Score: {} \".format(auc(a,b)))\r\nprint(\"Classifier Confision Matrix: {} \".format(confusion_matrix(y_test,y_pred_of_model)))\r\n\r\n# \"\"REGRESSION OF SCORE AND RESULT\"\"\r\n\r\ny_pred_of_regression_in_model = model_processor_regression.predict(X_test)\r\n\r\n# print(\"Select of X {} \".format(i))\r\nprint(\"Regression Accuracy Score: {} \".format(accuracy_score(y_test, y_pred_of_regression_in_model)))\r\nprint(\"Regression Precision Score: {} \".format(precision_score(y_test, y_pred_of_regression_in_model)))\r\nprint(\"Regression Recall Score: {} \".format(recall_score(y_test, y_pred_of_regression_in_model)))\r\nprint(\"Regression F1 Score: {} \".format(f1_score(y_test, y_pred_of_regression_in_model)))\r\na, b, _ = roc_curve(y_test, y_pred_of_regression_in_model)\r\nprint(\"Regression AUC Score: {} \".format(auc(a, b)))\r\nprint(\"Regression Confision Matrix: {} \".format(confusion_matrix(y_test, y_pred_of_regression_in_model)))\r\n\r\n# Enter you random value for Features :)\r\nProcessor_Cores = int(input(\"Enter, Processor Cores: \"))\r\nProcessor_Threads = int(input(\"Enter, Processor Threads: \"))\r\nTurbo_Speed_GHz = float(input(\"Enter, Turbo Speed (GHz): \"))\r\nTypical_TDP_W = int(input(\"Enter, Typical TDP (W): \"))\r\nAverage_CPU_Mark = int(input(\"Enter, Average CPU Mark: \"))\r\n\r\n# prediction, random value of Company!\r\nprediction_of_company_random_value = model_processor_regression.predict(\r\n [[Processor_Cores, Processor_Threads, Turbo_Speed_GHz, Typical_TDP_W, Average_CPU_Mark]])\r\n\r\n# I create of algorithm :)\r\ndata_class = pd.read_csv('class.csv', index_col=None, na_values=None)\r\nclass_value_detect = data_class.columns.values[int(prediction_of_company_random_value)]\r\nprint('Prediction company: {} '.format(class_value_detect))\r\n\r\n# model classifier save of format to .dot file :)\r\nfrom graphviz import Source\r\ndotfile = open(\"emirhan_project.dot\",'w')\r\n\r\ngraph_of_data_dot = Source(export_graphviz(model_processor,\r\n filled=True,\r\n rounded=True,\r\n out_file=dotfile,\r\n feature_names=X.columns,\r\n class_names=['AMD = 0','INTEL = 1']))\r\ndotfile.close()\r\n\r\n#CLASSIFICATION RESULT\r\n\r\n#Classifier Accuracy Score: 1.0\r\n#Classifier Precision Score: 1.0\r\n#Classifier Recall Score: 1.0\r\n#Classifier F1 Score: 1.0\r\n#Classifier AUC Score: 1.0\r\n#Classifier Confision Matrix: [[5 0]\r\n #[0 2]]\r\n\r\n#REGRESSION RESULT\r\n\r\n#Regression Accuracy Score: 1.0\r\n#Regression Precision Score: 1.0\r\n#Regression Recall Score: 1.0\r\n#Regression F1 Score: 1.0\r\n#Regression AUC Score: 1.0\r\n#Regression Confision Matrix: [[5 0]\r\n #[0 2]]\r\n"
] | [
[
"pandas.read_csv",
"numpy.float64"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
ytorzuk-altran/openvino | [
"031e998a15ec738c64cc2379d7f30fb73087c272",
"68d460a3bb578a738ba0e4d0e1f2e321afa73ab0",
"031e998a15ec738c64cc2379d7f30fb73087c272",
"68d460a3bb578a738ba0e4d0e1f2e321afa73ab0",
"031e998a15ec738c64cc2379d7f30fb73087c272",
"68d460a3bb578a738ba0e4d0e1f2e321afa73ab0",
"68d460a3bb578a738ba0e4d0e1f2e321afa73ab0",
"68d460a3bb578a738ba0e4d0e1f2e321afa73ab0",
"68d460a3bb578a738ba0e4d0e1f2e321afa73ab0",
"68d460a3bb578a738ba0e4d0e1f2e321afa73ab0",
"68d460a3bb578a738ba0e4d0e1f2e321afa73ab0"
] | [
"src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_yolo_box.py",
"tools/mo/openvino/tools/mo/front/onnx/lstm_ext.py",
"src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_softmax.py",
"tools/mo/openvino/tools/mo/back/PackBinaryWeights.py",
"tools/pot/openvino/tools/pot/api/samples/3d_segmentation/3d_segmentation_sample.py",
"tools/mo/unit_tests/mo/back/moc_preprocessing_test_actual.py",
"tools/mo/openvino/tools/mo/ops/elementwise.py",
"tools/mo/openvino/tools/mo/front/FillToBroadcast.py",
"tools/mo/openvino/tools/mo/front/binary_quantize_normalization.py",
"tools/mo/openvino/tools/mo/middle/StridedSliceNormalizer.py",
"tools/mo/openvino/tools/mo/front/mxnet/conv_ext.py"
] | [
"#\n# pool2d paddle model generator\n#\nimport numpy as np\nfrom save_model import saveModel\nimport sys\n\ndef yolo_box(name : str, x, img_size, attrs : dict):\n import paddle as pdpd\n pdpd.enable_static()\n \n with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()):\n node_x = pdpd.static.data(name='x', shape=x.shape, dtype=x.dtype)\n node_img_size = pdpd.static.data(name='img_size', shape=img_size.shape, dtype=img_size.dtype)\n boxes, scores = pdpd.vision.ops.yolo_box(node_x,\n node_img_size,\n anchors=attrs['anchors'],\n class_num=attrs['class_num'],\n conf_thresh=attrs['conf_thresh'],\n downsample_ratio=attrs['downsample_ratio'],\n clip_bbox=attrs['clip_bbox'],\n name=None, \n scale_x_y=attrs['scale_x_y'])\n\n cpu = pdpd.static.cpu_places(1)\n exe = pdpd.static.Executor(cpu[0])\n # startup program will call initializer to initialize the parameters.\n exe.run(pdpd.static.default_startup_program())\n\n outs = exe.run(\n feed={'x': x, 'img_size': img_size},\n fetch_list=[boxes, scores])\n \n # Save inputs in order of ngraph function, to facilite Fuzzy test, \n # which accepts inputs and outputs in this order as well. \n saveModel(name, exe, feedkeys=['x', 'img_size'], fetchlist=[boxes, scores],\n inputs=[x, img_size], outputs=outs, target_dir=sys.argv[1])\n\n return outs\n\n\ndef TEST1():\n # yolo_box\n pdpd_attrs = {\n 'name': \"yolo_box_default\",\n 'anchors': [10, 13, 16, 30, 33, 23],\n 'class_num': 2,\n 'conf_thresh': 0.5,\n 'downsample_ratio': 32,\n 'clip_bbox': False,\n 'scale_x_y': 1.0\n }\n\n pdpd_attrs_clip_box = {\n 'name': \"yolo_box_clip_box\",\n 'anchors': [10, 13, 16, 30, 33, 23],\n 'class_num': 2,\n 'conf_thresh': 0.5,\n 'downsample_ratio': 32,\n 'clip_bbox': True,\n 'scale_x_y': 1.0\n }\n\n pdpd_attrs_scale_xy = {\n 'name': \"yolo_box_scale_xy\",\n 'anchors': [10, 13, 16, 30, 33, 23],\n 'class_num': 2,\n 'conf_thresh': 0.5,\n 'downsample_ratio': 32,\n 'clip_bbox': True,\n 'scale_x_y': 1.2\n }\n\n pdpd_attrs_list = [pdpd_attrs, pdpd_attrs_clip_box, pdpd_attrs_scale_xy]\n \n N = 32\n num_anchors = int(len(pdpd_attrs['anchors'])//2)\n x_shape = (N, num_anchors * (5 + pdpd_attrs['class_num']), 13, 13)\n imgsize_shape = (N, 2)\n\n data = np.random.random(x_shape).astype('float32')\n data_ImSize = np.random.randint(10, 20, imgsize_shape).astype('int32') \n\n for item in pdpd_attrs_list:\n pred_pdpd = yolo_box(item['name'], data, data_ImSize, item)\n\n\ndef TEST2():\n # yolo_box uneven spatial width and height\n pdpd_attrs = {\n 'name': \"yolo_box_uneven_wh\",\n 'anchors': [10, 13, 16, 30, 33, 23],\n 'class_num': 2,\n 'conf_thresh': 0.5,\n 'downsample_ratio': 32,\n 'clip_bbox': False,\n 'scale_x_y': 1.0\n }\n\n N = 16\n SPATIAL_WIDTH = 13\n SPATIAL_HEIGHT = 9\n num_anchors = int(len(pdpd_attrs['anchors'])//2)\n x_shape = (N, num_anchors * (5 + pdpd_attrs['class_num']), SPATIAL_HEIGHT, SPATIAL_WIDTH)\n imgsize_shape = (N, 2)\n\n data = np.random.random(x_shape).astype('float32')\n data_ImSize = np.random.randint(10, 20, imgsize_shape).astype('int32')\n \n pred_pdpd = yolo_box(pdpd_attrs['name'], data, data_ImSize, pdpd_attrs)\n\nif __name__ == \"__main__\":\n TEST1()\n TEST2()",
"# Copyright (C) 2018-2021 Intel Corporation\n# SPDX-License-Identifier: Apache-2.0\n\nimport numpy as np\n\nfrom openvino.tools.mo.ops.LSTM import LSTM\nfrom openvino.tools.mo.front.extractor import FrontExtractorOp\nfrom openvino.tools.mo.front.onnx.extractors.utils import onnx_attr\n\n\nclass LSTMFrontExtractor(FrontExtractorOp):\n op = 'LSTM'\n enabled = True\n\n @classmethod\n def extract(cls, node):\n activation_alpha = onnx_attr(node, 'activation_alpha', 'floats',\n default=None, dst_type=lambda x: np.array(x, dtype=np.float32))\n activation_beta = onnx_attr(node, 'activation_beta', 'floats',\n default=None, dst_type=lambda x: np.array(x, dtype=np.float32))\n activations = onnx_attr(node, 'activations', 'strings', default=None,\n dst_type=lambda x: list(map(lambda s: s.decode(encoding=\"utf-8\").lower(), list(x))))\n clip = onnx_attr(node, 'clip', 'f', default=None)\n input_forget = onnx_attr(node, 'input_forget', 'i', default=0)\n\n attrs = {\n 'batch_dim': 1,\n 'sequence_dim': 0,\n 'blobs_wrb': True,\n 'has_num_directions': True,\n 'num_layers': 1,\n 'format': 'onnx',\n 'multilayers': False,\n 'gate_order': [2, 0, 3, 1], # iofc --> fico\n\n # ONNX attrs\n 'activation_alpha': activation_alpha,\n 'activation_beta': activation_beta,\n 'activations': activations,\n 'clip': clip,\n 'direction': onnx_attr(node, 'direction', 's', b'forward').decode().lower(),\n 'hidden_size': np.array(onnx_attr(node, 'hidden_size', 'i'), dtype=np.int64),\n 'input_forget': input_forget,\n }\n\n LSTM.update_node_stat(node, attrs)\n return cls.enabled\n",
"#\n# softmax paddle model generator\n#\nimport numpy as np\nimport sys\nfrom save_model import saveModel\n\n\ndef softmax(name: str, x, axis):\n import paddle as pdpd\n pdpd.enable_static()\n\n node_x = pdpd.static.data(name='x', shape=x.shape, dtype='float32')\n out = pdpd.nn.functional.softmax(x=node_x, axis=axis)\n\n cpu = pdpd.static.cpu_places(1)\n exe = pdpd.static.Executor(cpu[0])\n # startup program will call initializer to initialize the parameters.\n exe.run(pdpd.static.default_startup_program())\n\n outs = exe.run(\n feed={'x': x},\n fetch_list=[out])\n\n saveModel(name, exe, feedkeys=['x'], fetchlist=[out], inputs=[x], outputs=[outs[0]], target_dir=sys.argv[1])\n\n return outs[0]\n\n\ndef main():\n data = np.array(\n [[[2.0, 3.0, 4.0, 5.0],\n [3.0, 4.0, 5.0, 6.0],\n [7.0, 8.0, 8.0, 9.0]],\n [[1.0, 2.0, 3.0, 4.0],\n [5.0, 6.0, 7.0, 8.0],\n [6.0, 7.0, 8.0, 9.0]]]\n ).astype(np.float32)\n\n softmax(\"softmax\", data, axis=1)\n softmax(\"softmax_minus\", data, axis=-1)\n\n\nif __name__ == \"__main__\":\n main()\n",
"# Copyright (C) 2018-2021 Intel Corporation\n# SPDX-License-Identifier: Apache-2.0\n\nimport numpy as np\n\nfrom openvino.tools.mo.back.replacement import BackReplacementPattern\nfrom openvino.tools.mo.graph.graph import Graph\n\n\nclass PackBinaryWeights(BackReplacementPattern):\n enabled = True\n\n @staticmethod\n def pattern():\n return dict(\n nodes=[\n ('op', dict(kind='op', type='BinaryConvolution'))],\n edges=[]\n )\n\n @staticmethod\n def replace_pattern(graph: Graph, match: dict):\n conv = match['op']\n assert len(conv.in_nodes()) == 2\n initial_shape = conv.in_port(1).data.get_shape()\n assert initial_shape is not None\n weights = conv.in_port(1).data.get_value().flatten()\n weights_rounded = np.round(weights)\n assert np.all(np.isclose(weights, weights_rounded))\n assert len(conv.in_node(1).out_nodes()) == 1\n weights_rounded = np.array(weights_rounded, dtype=np.int32) + 1 # -1 --> 0\n # Reversing element in chunks by 8 elements to pack bits correctly\n # First need to pad data with necessary number of element to make the length dividable by 8\n pad = (-len(weights_rounded)) % 8\n weights_rounded = np.array(np.concatenate((weights_rounded, np.zeros([pad]))), dtype=np.int32)\n assert len(weights_rounded) % 8 == 0\n weights_rounded = weights_rounded.reshape([len(weights_rounded) // 8, 8])\n weights_rounded = np.flip(weights_rounded, axis=1)\n weights_rounded = weights_rounded.flatten()\n packed = np.packbits(weights_rounded)\n conv.in_port(1).data.set_value(packed)\n conv['packed_weights'] = 1\n\n conv.in_node(1)['force_shape'] = initial_shape.copy()\n conv.in_node(1)['shape'] = initial_shape.copy()\n conv.in_node(1)['force_type'] = 'U1'\n",
"# Copyright (C) 2020-2021 Intel Corporation\n# SPDX-License-Identifier: Apache-2.0\n\nimport os\n\nimport nibabel as nib\nimport numpy as np\nfrom scipy.ndimage import interpolation\nfrom addict import Dict\n\nfrom openvino.tools.pot.api import Metric, DataLoader\nfrom openvino.tools.pot.engines.ie_engine import IEEngine\nfrom openvino.tools.pot.graph import load_model, save_model\nfrom openvino.tools.pot.graph.model_utils import compress_model_weights\nfrom openvino.tools.pot.pipeline.initializer import create_pipeline\nfrom openvino.tools.pot.utils.logger import init_logger\nfrom openvino.tools.pot.api.samples.utils.argument_parser import get_common_argparser\n\n# Initialize the logger to print the quantization process in the console.\ninit_logger(level='INFO')\n\n\n# Custom DataLoader class implementation that is required for\n# the proper reading of BRATS 3D Segmentation images and annotations.\nclass BRATSDataLoader(DataLoader):\n\n # Required methods:\n def __init__(self, config):\n if not isinstance(config, Dict):\n config = Dict(config)\n super().__init__(config)\n self._img_ids = sorted(os.listdir(self.config.data_source))\n\n def __getitem__(self, index):\n \"\"\"\n Returns annotation, image and image metadata at the specified index.\n Possible formats:\n (img_id, img_annotation), image\n (img_id, img_annotation), image, image_metadata\n \"\"\"\n\n if index >= len(self):\n raise IndexError\n mask_path = os.path.join(self.config.mask_dir, self._img_ids[index])\n image_path = os.path.join(self.config.data_source, self._img_ids[index])\n\n annotation = (index, self._read_image(mask_path))\n image, image_meta = self._preprocess_image(self._read_image(image_path))\n return annotation, image, image_meta\n\n def __len__(self):\n \"\"\" Returns size of the dataset \"\"\"\n return len(self._img_ids)\n\n # Methods specific to the current implementation\n def _read_image(self, data_id):\n nib_image = nib.load(str(os.path.join(self.config.data_source, data_id)))\n image = np.array(nib_image.dataobj)\n if len(image.shape) != 4: # Make sure 4D\n image = np.expand_dims(image, -1)\n image = np.transpose(image, (3, 0, 1, 2))\n\n return image\n\n def _preprocess_image(self, image):\n image_meta = {'image_shape': image.shape}\n\n # Swap modalities (mri_sequence)\n image = image[self.config.modality_order, :, :, :]\n # Crop\n image, bbox = self.crop(image)\n # Normalize\n image = self.normalize_img(image)\n # Resize\n shape = (image.shape[0],) + self.config.size\n image = resize3d(image, shape)\n\n image_meta['bbox'] = bbox\n\n return image, image_meta\n\n @staticmethod\n def crop(image):\n def bbox3d(img):\n nonzero_rows = np.any(img, axis=(1, 2)).nonzero()[0]\n nonzero_cols = np.any(img, axis=(0, 2)).nonzero()[0]\n nonzero_slices = np.any(img, axis=(0, 1)).nonzero()[0]\n\n bbox_ = np.array([[-1, -1, -1], [0, 0, 0]])\n if nonzero_rows.size > 0:\n bbox_[:, 0] = nonzero_rows[[0, -1]]\n bbox_[:, 1] = nonzero_cols[[0, -1]]\n bbox_[:, 2] = nonzero_slices[[0, -1]]\n\n return bbox_\n\n bboxes = np.stack([bbox3d(i) for i in image])\n bbox = np.stack([np.min(bboxes[:, 0, :], axis=0), np.max(bboxes[:, 1, :], axis=0)])\n\n image = image[:, bbox[0, 0]:bbox[1, 0], bbox[0, 1]:bbox[1, 1], bbox[0, 2]:bbox[1, 2]]\n\n return image, bbox\n\n @staticmethod\n def normalize_img(image):\n for channel in range(image.shape[0]):\n img = image[channel, :, :, :].copy()\n mask = img > 0\n image_masked = np.ma.masked_array(img, ~mask)\n mean, std = np.mean(image_masked), np.std(image_masked)\n\n img -= mean\n img /= std\n image[channel, :, :, :] = img\n\n return image\n\n\n# Custom implementation of Dice Index metric.\nclass DiceIndex(Metric):\n\n # Required methods\n def __init__(self, num_classes):\n self._classes_num = num_classes\n super().__init__()\n self._name = 'dice_index'\n self._overall_metric = []\n\n @property\n def value(self):\n \"\"\" Returns accuracy metric value for the last model output.\n Possible format: {metric_name: [metric_values_per_image]}\n \"\"\"\n return {self._name: [np.mean(self._overall_metric[-1])]}\n\n @property\n def avg_value(self):\n \"\"\" Returns accuracy metric value for all model outputs.\n Possible format: {metric_name: metric_value}\n \"\"\"\n return {self._name: np.mean(self._overall_metric)}\n\n def update(self, output, target):\n \"\"\" Calculates and updates metric value\n :param output: model output\n :param target: annotations\n \"\"\"\n if len(output) != 1 or len(target) != 1:\n raise Exception('The Dice Index metric cannot be calculated '\n 'for a model with multiple outputs')\n\n output = output[0]\n target = target[0]\n result = np.zeros(shape=self._classes_num)\n for i in range(1, self._classes_num):\n annotation_data_ = (target == i)\n prediction_data_ = (output == i)\n\n intersection_count = np.logical_and(annotation_data_, prediction_data_).sum()\n union_count = annotation_data_.sum() + prediction_data_.sum()\n if union_count > 0:\n result[i] += 2.0*intersection_count / union_count\n\n annotation = (target > 0)\n prediction = (output > 0)\n\n intersection_count = np.logical_and(annotation, prediction).sum()\n union_count = annotation.sum() + prediction.sum()\n if union_count > 0:\n result[0] += 2.0 * intersection_count / union_count\n\n self._overall_metric.append(result)\n\n def reset(self):\n \"\"\" Resets metric \"\"\"\n self._overall_metric = []\n\n def get_attributes(self):\n \"\"\"\n Returns a dictionary of metric attributes {metric_name: {attribute_name: value}}.\n Required attributes: 'direction': 'higher-better' or 'higher-worse'\n 'type': metric type\n \"\"\"\n return {self._name: {'direction': 'higher-better',\n 'type': 'dice_index'}}\n\n\n# Custom wrapper over IEEngine that implements postrocessor function, which can process the\n# raw output of the model using metadata obtained during the image reading and preprocessing.\nclass SegmentationEngine(IEEngine):\n @staticmethod\n def postprocess_output(outputs, metadata):\n \"\"\"\n Processes model raw output for future metric and loss calculation.\n Uses image metadata that can be passed using dataloader.\n :param outputs: network infer result in format of numpy ndarray (batch x image shape)\n :param metadata: dictionary of image metadata\n :return: processed numpy ndarray with the same shape as the original output\n \"\"\"\n processed_outputs = []\n for output, meta in zip(outputs, metadata):\n # Resize to bounding box size and extend to mask size\n low = meta['bbox'][0]\n high = meta['bbox'][1]\n box_shape = tuple((high - low).astype(np.int32))\n\n image_shape = meta['image_shape'][-3:]\n processed_output = np.zeros(shape=(output.shape[0],) + image_shape)\n\n processed_output[:, low[0]:high[0], low[1]:high[1], low[2]:high[2]] = \\\n resize3d(output, shape=(output.shape[0],) + box_shape)\n\n # Transforms prediction from WT-TC-ET format to NCR/NET-ED-ET.\n # Elements passing the threshold of 0.5 fill with specified values\n result = np.zeros(shape=processed_output.shape[1:], dtype=np.int8) # pylint: disable=E1136\n\n label = processed_output > 0.5\n wt = label[0]\n tc = label[1]\n et = label[2]\n\n result[wt] = 1\n result[tc] = 2\n result[et] = 3\n\n processed_outputs.append(result)\n\n return np.stack(processed_outputs, axis=0)\n\n\ndef resize3d(image, shape):\n image = np.asarray(image)\n\n factor = [float(o) / i for i, o in zip(image.shape, shape)]\n image = interpolation.zoom(image, zoom=factor, order=1)\n\n return image\n\n\ndef main():\n parser = get_common_argparser()\n parser.add_argument(\n '--mask-dir',\n help='Path to the directory with segmentation masks',\n required=True\n )\n\n args = parser.parse_args()\n if not args.weights:\n args.weights = '{}.bin'.format(os.path.splitext(args.model)[0])\n\n model_config = Dict({\n 'model_name': 'brain-tumor-segmentation-0002',\n 'model': os.path.expanduser(args.model),\n 'weights': os.path.expanduser(args.weights)\n })\n\n engine_config = Dict({\n 'device': 'CPU',\n 'stat_requests_number': 4,\n 'eval_requests_number': 4\n })\n\n dataset_config = Dict({\n 'data_source': os.path.expanduser(args.dataset),\n 'mask_dir': os.path.expanduser(args.mask_dir),\n 'modality_order': [1, 2, 3, 0],\n 'size': (128, 128, 128)\n })\n\n algorithms = [\n {\n 'name': 'DefaultQuantization',\n 'params': {\n 'target_device': 'ANY',\n 'preset': 'performance',\n 'stat_subset_size': 200\n }\n }\n ]\n\n # Step 1: Load the model.\n model = load_model(model_config)\n\n # Step 2: Initialize the data loader.\n data_loader = BRATSDataLoader(dataset_config)\n\n # Step 3 (Optional. Required for AccuracyAwareQuantization): Initialize the metric.\n metric = DiceIndex(num_classes=4)\n\n # Step 4: Initialize the engine for metric calculation and statistics collection.\n engine = SegmentationEngine(config=engine_config,\n data_loader=data_loader,\n metric=metric)\n\n # Step 5: Create a pipeline of compression algorithms.\n pipeline = create_pipeline(algorithms, engine)\n\n # Step 6: Execute the pipeline.\n compressed_model = pipeline.run(model)\n\n # Step 7 (Optional): Compress model weights to quantized precision\n # in order to reduce the size of final .bin file.\n compress_model_weights(compressed_model)\n\n # Step 8: Save the compressed model to the desired path.\n save_model(compressed_model, os.path.join(os.path.curdir, 'optimized'))\n\n # Step 9 (Optional): Evaluate the compressed model. Print the results.\n metric_results = pipeline.evaluate(compressed_model)\n if metric_results:\n for name, value in metric_results.items():\n print('{: <27s}: {}'.format(name, value))\n\n\nif __name__ == '__main__':\n main()\n",
"# Copyright (C) 2018-2021 Intel Corporation\n# SPDX-License-Identifier: Apache-2.0\n\nimport unittest\nfrom argparse import Namespace\n\nfrom openvino.tools.mo.utils.error import Error\n\nimport numpy as np\n\n\ntry:\n # pylint: disable=no-name-in-module,import-error\n from openvino.tools.mo.back.preprocessing import apply_preprocessing\n\n # pylint: disable=no-name-in-module,import-error\n import openvino.runtime.opset8 as ops\n from openvino.runtime import Function, Layout, PartialShape\n\nexcept Exception:\n print(\"No OpenVINO API available,\"\n \"ensure to set correct PYTHONPATH when running these tests\")\n raise\n\n\ndef create_function2(shape1=[2, 2], shape2=[2, 2], dtype1=np.float32, dtype2=np.float32):\n input1 = ops.parameter(shape1, dtype=dtype1, name=\"input1\")\n input1.get_output_tensor(0).set_names({'input1', 'input1a'})\n relu1 = ops.relu(input1)\n res1 = ops.result(relu1, \"res1\")\n res1.get_output_tensor(0).set_names({'res1', 'res1a'})\n input2 = ops.parameter(shape2, dtype=dtype2, name=\"input2\")\n input2.get_output_tensor(0).set_names({'input2', 'input2a'})\n relu2 = ops.relu(input2)\n res2 = ops.result(relu2, \"res2\")\n res2.get_output_tensor(0).set_names({'res2', 'res2a'})\n function = Function(results=[res1, res2], parameters=[input1, input2], name=\"TestFunction\")\n return function\n\n\ndef create_function1(shape1=[2, 2]):\n input1 = ops.parameter(shape1, dtype=np.float32, name=\"input1\")\n input1.get_output_tensor(0).set_names({'input1', 'input1a'})\n relu1 = ops.relu(input1)\n res1 = ops.result(relu1, \"res1\")\n res1.get_output_tensor(0).set_names({'res1', 'res1a'})\n function = Function(results=[res1], parameters=[input1], name=\"TestFunction\")\n return function\n\n\ndef process_function(ov_function: Function, argv: Namespace):\n apply_preprocessing(ov_function=ov_function, argv=argv)\n\n\nclass TestPreprocessingMOC(unittest.TestCase):\n def setUp(self):\n pass\n\n def check_scale_constant(self, node, expected, shape=None):\n const_node = node.input(1).get_source_output().get_node()\n self.assertEqual(const_node.get_type_name(), 'Constant')\n if node.get_type_name() == 'Divide':\n self.assertTrue(np.allclose(const_node.get_vector(), expected))\n else:\n self.assertTrue(np.allclose(const_node.get_vector(), 1. / expected))\n if shape:\n assert const_node.shape == PartialShape(shape)\n\n def check_mean_constant(self, node, expected, shape=None):\n const_node = node.input(1).get_source_output().get_node()\n self.assertEqual(const_node.get_type_name(), 'Constant')\n if node.get_type_name() == 'Subtract':\n self.assertTrue(np.allclose(const_node.get_vector(), expected))\n else:\n self.assertTrue(np.allclose(const_node.get_vector(), -expected.toList()))\n if shape:\n self.assertEqual(const_node.shape, PartialShape(shape))\n\n def test_scale_single_value(self):\n argv = Namespace(mean_scale_values=None, scale=2.0)\n function = create_function2()\n process_function(ov_function=function, argv=argv)\n\n for param in function.get_parameters():\n op_node = list(param.output(0).get_target_inputs())[0].get_node()\n self.assertTrue(op_node.get_type_name() == 'Divide' or op_node.get_type_name() == 'Multiply')\n self.check_scale_constant(op_node, [2.0])\n\n def test_scale_single_value_fp64(self):\n argv = Namespace(mean_scale_values=None, scale=2.0)\n function = create_function2(dtype1=np.float64)\n process_function(ov_function=function, argv=argv)\n\n for ov_input in function.inputs:\n op_node = list(ov_input.get_target_inputs())[0].get_node()\n self.assertTrue(op_node.get_type_name() == 'Divide' or op_node.get_type_name() == 'Multiply')\n self.check_scale_constant(op_node, [2.0])\n\n def test_scale_single_value_fp16(self):\n argv = Namespace(mean_scale_values=None, scale=2.0)\n function = create_function2(dtype1=np.float16)\n process_function(ov_function=function, argv=argv)\n\n for ov_input in function.inputs:\n op_node = list(ov_input.get_target_inputs())[0].get_node()\n self.assertTrue(op_node.get_type_name() == 'Divide' or op_node.get_type_name() == 'Multiply')\n\n def test_scale_vector(self):\n argv = Namespace(mean_scale_values={'input1': {'scale': np.array([4.]), 'mean': None}}, scale=None)\n function = create_function2()\n process_function(ov_function=function, argv=argv)\n op_node = list(function.get_parameters()[0].output(0).get_target_inputs())[0].get_node()\n self.assertTrue(op_node.get_type_name() == 'Divide' or op_node.get_type_name() == 'Multiply')\n self.check_scale_constant(op_node, [4.0], shape=None)\n # Verify that input2 is not affected\n op_node = list(function.get_parameters()[1].output(0).get_target_inputs())[0].get_node()\n self.assertEqual(op_node.get_type_name(), 'Relu')\n\n def test_scale_vector3(self):\n argv = Namespace(mean_scale_values={'input1': {'scale': np.array([2., 4., 8.]), 'mean': None}}, scale=None)\n function = create_function2(shape1=[1, 3, 224, 224])\n process_function(ov_function=function, argv=argv)\n op_node = list(function.get_parameters()[0].output(0).get_target_inputs())[0].get_node()\n self.assertTrue(op_node.get_type_name() == 'Divide' or op_node.get_type_name() == 'Multiply')\n self.check_scale_constant(op_node, expected=[2., 4., 8.], shape=[1, 3, 1, 1])\n\n # Verify that input2 is not affected\n op_node = list(function.get_parameters()[1].output(0).get_target_inputs())[0].get_node()\n self.assertEqual(op_node.get_type_name(), 'Relu')\n\n # Verify that guessed layout (?C??) is not appeared in input1\n self.assertEqual(function.get_parameters()[0].layout, Layout())\n\n def test_scale_vector4_layout(self):\n argv = Namespace(mean_scale_values={'input1': {'scale': np.array([2., 4., 8., 9.]), 'mean': None}},\n layout_values={'input1': {'source_layout': 'nhwc'}},\n scale=None)\n function = create_function2(shape1=[1, 3, 3, 4]) # Use layout to determine channels dim\n\n process_function(ov_function=function, argv=argv)\n op_node = list(function.get_parameters()[0].output(0).get_target_inputs())[0].get_node()\n self.assertTrue(op_node.get_type_name() == 'Divide' or op_node.get_type_name() == 'Multiply')\n self.check_scale_constant(op_node, expected=[2., 4., 8., 9.], shape=[1, 1, 1, 4])\n\n # Verify that input2 is not affected\n op_node = list(function.get_parameters()[1].output(0).get_target_inputs())[0].get_node()\n self.assertEqual(op_node.get_type_name(), 'Relu')\n\n # Verify that layout (NHWC) is appeared in input1\n self.assertEqual(function.get_parameters()[0].layout, Layout('nhwc'))\n\n def test_mean_single(self):\n argv = Namespace(mean_scale_values={'input1': {'mean': np.array([4.]), 'scale': None}}, scale=None)\n function = create_function2()\n process_function(ov_function=function, argv=argv)\n op_node = list(function.get_parameters()[0].output(0).get_target_inputs())[0].get_node()\n self.assertTrue(op_node.get_type_name() == 'Subtract' or op_node.get_type_name() == 'Add')\n self.check_mean_constant(op_node, [4.0], shape=None)\n # Verify that input2 is not affected\n op_node = list(function.get_parameters()[1].output(0).get_target_inputs())[0].get_node()\n self.assertEqual(op_node.get_type_name(), 'Relu')\n\n def test_mean_single_fp64(self):\n argv = Namespace(mean_scale_values={'input1': {'mean': np.array([4.]), 'scale': None}}, scale=None)\n function = create_function2(dtype1=np.float64)\n process_function(ov_function=function, argv=argv)\n op_node = list(function.get_parameters()[0].output(0).get_target_inputs())[0].get_node()\n self.assertTrue(op_node.get_type_name() == 'Subtract' or op_node.get_type_name() == 'Add')\n self.check_mean_constant(op_node, [4.0], shape=None)\n # Verify that input2 is not affected\n op_node = list(function.get_parameters()[1].output(0).get_target_inputs())[0].get_node()\n self.assertEqual(op_node.get_type_name(), 'Relu')\n\n def test_mean_single_fp16(self):\n argv = Namespace(mean_scale_values={'input1': {'mean': np.array([4.]), 'scale': None}}, scale=None)\n function = create_function2(dtype1=np.float16)\n process_function(ov_function=function, argv=argv)\n op_node = list(function.get_parameters()[0].output(0).get_target_inputs())[0].get_node()\n self.assertTrue(op_node.get_type_name() == 'Subtract' or op_node.get_type_name() == 'Add')\n # Verify that input2 is not affected\n op_node = list(function.get_parameters()[1].output(0).get_target_inputs())[0].get_node()\n self.assertEqual(op_node.get_type_name(), 'Relu')\n\n def test_mean_vector3(self):\n argv = Namespace(mean_scale_values={'input2': {'mean': np.array([2., 4., 8.]), 'scale': None}}, scale=None)\n function = create_function2(shape2=[1, 3, 224, 224])\n process_function(ov_function=function, argv=argv)\n op_node = list(function.get_parameters()[1].output(0).get_target_inputs())[0].get_node()\n self.assertTrue(op_node.get_type_name() == 'Subtract' or op_node.get_type_name() == 'Add')\n self.check_mean_constant(op_node, expected=[2., 4., 8.], shape=[1, 3, 1, 1])\n\n # Verify that input1 is not affected\n op_node = list(function.get_parameters()[0].output(0).get_target_inputs())[0].get_node()\n self.assertEqual(op_node.get_type_name(), 'Relu')\n\n # Verify that guessed layout (?C??) is not appeared in input2\n self.assertEqual(function.get_parameters()[1].layout, Layout())\n\n def test_mean_scale(self):\n argv = Namespace(mean_scale_values={'input2a': {'mean': np.array([1., 2., 3.]),\n 'scale': np.array([2., 4., 8.])}},\n scale=None)\n function = create_function2(shape2=[1, 3, 224, 224])\n process_function(ov_function=function, argv=argv)\n # Verify that first is 'subtract mean', then 'scale'\n op_node = list(function.get_parameters()[1].output(0).get_target_inputs())[0].get_node()\n self.assertTrue(op_node.get_type_name() == 'Subtract' or op_node.get_type_name() == 'Add')\n self.check_mean_constant(op_node, expected=[1., 2., 3.], shape=[1, 3, 1, 1])\n\n op_node = list(op_node.output(0).get_target_inputs())[0].get_node()\n self.assertTrue(op_node.get_type_name() == 'Divide' or op_node.get_type_name() == 'Multiply')\n self.check_scale_constant(op_node, expected=[2., 4., 8.], shape=[1, 3, 1, 1])\n\n # Verify that input1 is not affected\n op_node = list(function.get_parameters()[0].output(0).get_target_inputs())[0].get_node()\n self.assertEqual(op_node.get_type_name(), 'Relu')\n\n # Verify that guessed layout (?C??) is not appeared in input2\n self.assertEqual(function.get_parameters()[1].layout, Layout())\n\n def test_mean_scale_with_layout(self):\n argv = Namespace(mean_scale_values={'input2a': {'mean': np.array([1., 2., 3., 4.]),\n 'scale': np.array([2., 4., 8., 9.])}},\n scale=None)\n function = create_function2(shape2=[1, 3, 3, 4])\n function.get_parameters()[1].layout = Layout(\"NHWC\")\n process_function(ov_function=function, argv=argv)\n # Verify that first is 'subtract mean', then 'scale'\n op_node = list(function.get_parameters()[1].output(0).get_target_inputs())[0].get_node()\n self.assertTrue(op_node.get_type_name() == 'Subtract' or op_node.get_type_name() == 'Add')\n self.check_mean_constant(op_node, expected=[1., 2., 3., 4.], shape=[1, 1, 1, 4])\n\n op_node = list(op_node.output(0).get_target_inputs())[0].get_node()\n self.assertTrue(op_node.get_type_name() == 'Divide' or op_node.get_type_name() == 'Multiply')\n self.check_scale_constant(op_node, expected=[2., 4., 8., 9.], shape=[1, 1, 1, 4])\n\n # Verify that input1 is not affected\n op_node = list(function.get_parameters()[0].output(0).get_target_inputs())[0].get_node()\n self.assertEqual(op_node.get_type_name(), 'Relu')\n\n # Verify that layout presents in function after preprocessing\n self.assertEqual(function.get_parameters()[1].layout, Layout(\"NHWC\"))\n\n def test_mean_scale_with_layout_dynamic(self):\n argv = Namespace(mean_scale_values={'input2a': {'mean': np.array([1., 2., 3., 4.]),\n 'scale': np.array([2., 4., 8., 9.])}},\n scale=None)\n function = create_function2(shape2=[-1, -1, -1, -1])\n function.get_parameters()[1].layout = Layout(\"NHWC\")\n process_function(ov_function=function, argv=argv)\n # Verify that first is 'subtract mean', then 'scale'\n op_node = list(function.get_parameters()[1].output(0).get_target_inputs())[0].get_node()\n self.assertTrue(op_node.get_type_name() == 'Subtract' or op_node.get_type_name() == 'Add')\n self.check_mean_constant(op_node, expected=[1., 2., 3., 4.], shape=[1, 1, 1, 4])\n\n op_node = list(op_node.output(0).get_target_inputs())[0].get_node()\n self.assertTrue(op_node.get_type_name() == 'Divide' or op_node.get_type_name() == 'Multiply')\n self.check_scale_constant(op_node, expected=[2., 4., 8., 9.], shape=[1, 1, 1, 4])\n\n # Verify that input1 is not affected\n op_node = list(function.get_parameters()[0].output(0).get_target_inputs())[0].get_node()\n self.assertEqual(op_node.get_type_name(), 'Relu')\n\n # Verify that layout presents in function after preprocessing\n self.assertEqual(function.get_parameters()[1].layout, Layout(\"NHWC\"))\n\n def test_no_param_name(self):\n argv = Namespace(mean_scale_values=list(np.array([(np.array([1., 2., 3.]), np.array([2., 4., 6.])),\n (np.array([7., 8., 9.]), None)],\n dtype='object')), scale=None)\n function = create_function2(shape1=[1, 3, 224, 224], shape2=[1, 224, 224, 3])\n process_function(ov_function=function, argv=argv)\n\n op_node = list(function.get_parameters()[0].output(0).get_target_inputs())[0].get_node()\n self.assertTrue(op_node.get_type_name() == 'Subtract' or op_node.get_type_name() == 'Add')\n self.check_mean_constant(op_node, expected=[1., 2., 3.], shape=[1, 3, 1, 1])\n\n op_node = list(op_node.output(0).get_target_inputs())[0].get_node()\n self.assertTrue(op_node.get_type_name() == 'Divide' or op_node.get_type_name() == 'Multiply')\n self.check_scale_constant(op_node, expected=[2., 4., 6.], shape=[1, 3, 1, 1])\n\n op_node = list(function.get_parameters()[1].output(0).get_target_inputs())[0].get_node()\n self.assertTrue(op_node.get_type_name() == 'Subtract' or op_node.get_type_name() == 'Add')\n self.check_mean_constant(op_node, expected=[7., 8., 9.], shape=[1, 1, 1, 3])\n\n # Verify that guessed layouts are not appeared in inputs\n self.assertEqual(function.get_parameters()[0].layout, Layout())\n self.assertEqual(function.get_parameters()[1].layout, Layout())\n\n def test_no_param_name_single_value(self):\n argv = Namespace(mean_scale_values=list(np.array([(np.array([1.]), None),\n (np.array([2., 3., 4.]), np.array([5.]))],\n dtype='object')), scale=None)\n function = create_function2(shape1=[1, 3, 224, 224], shape2=[1, 224, 224, 3])\n process_function(ov_function=function, argv=argv)\n\n op_node = list(function.get_parameters()[0].output(0).get_target_inputs())[0].get_node()\n self.assertTrue(op_node.get_type_name() == 'Subtract' or op_node.get_type_name() == 'Add')\n self.check_mean_constant(op_node, expected=[1.], shape=None)\n\n op_node = list(function.get_parameters()[1].output(0).get_target_inputs())[0].get_node()\n self.assertTrue(op_node.get_type_name() == 'Subtract' or op_node.get_type_name() == 'Add')\n self.check_mean_constant(op_node, expected=[2., 3., 4.], shape=[1, 1, 1, 3])\n\n op_node = list(op_node.output(0).get_target_inputs())[0].get_node()\n self.assertTrue(op_node.get_type_name() == 'Divide' or op_node.get_type_name() == 'Multiply')\n self.check_scale_constant(op_node, expected=[5.], shape=None)\n\n # Two inputs, but 'mean_scale_value' has only one array\n def test_error_no_param_name_number_not_match(self):\n argv = Namespace(mean_scale_values=[(np.array([2., 3.]), np.array([4.]))], scale=None)\n function = create_function2(shape1=[1, 3, 224, 224], shape2=[1, 2, 224, 224])\n with self.assertRaisesRegex(Error, '.*question.*61.*'):\n process_function(ov_function=function, argv=argv)\n\n def test_mean_scale_error_no_node_name_found(self):\n argv = Namespace(mean_scale_values={'not_found': {'scale': np.array([1.]), 'mean': np.array([1.])}},\n scale=None)\n function = create_function2(shape1=[1, 3, 224, 224], shape2=[1, 2, 224, 224])\n with self.assertRaisesRegex(Error, '.*question.*83.*'):\n process_function(ov_function=function, argv=argv)\n\n def test_layout_error_no_node_name_found(self):\n argv = Namespace(layout_values={'not_found': {'source_layout': 'nhwc'}},\n scale=None)\n function = create_function2(shape1=[1, 3, 224, 224], shape2=[1, 2, 224, 224])\n with self.assertRaisesRegex(Error, '.*question.*83.*'):\n process_function(ov_function=function, argv=argv)\n\n def test_error_dimension_mismatch(self):\n argv = Namespace(mean_scale_values={'input1': {'scale': np.array([1., 2., 3., 4.]), 'mean': None}},\n scale=None)\n function = create_function2(shape1=[1, 3, 224, 224])\n with self.assertRaises(Exception):\n process_function(ov_function=function, argv=argv)\n\n def test_error_dimension_not_clear(self):\n argv = Namespace(mean_scale_values={'input1': {'scale': np.array([1., 2., 3.]), 'mean': None}},\n scale=None)\n function = create_function2(shape1=[1, 3, 3, 3]) # Not clear to which 3 should scale be applied\n with self.assertRaises(Exception):\n process_function(ov_function=function, argv=argv)\n\n def test_error_dimension_mismatch_with_scale(self):\n argv = Namespace(mean_scale_values={'input1': {'scale': np.array([1., 2., 3., 4.]),\n 'mean': np.array([1., 2., 3.])}},\n scale=None)\n function = create_function2(shape1=[1, 3, 4, 224])\n with self.assertRaises(Exception):\n process_function(ov_function=function, argv=argv)\n\n def test_error_guess_c_wrong_position_3d(self):\n argv = Namespace(mean_scale_values={'input1': {'scale': np.array([1., 2., 3.]),\n 'mean': np.array([1., 2., 3.])}},\n scale=None)\n function = create_function2(shape1=[2, 3, 4])\n with self.assertRaises(Exception):\n process_function(ov_function=function, argv=argv)\n\n def test_error_guess_c_wrong_position_4d(self):\n argv = Namespace(mean_scale_values={'input1': {'scale': np.array([1., 2., 3.]),\n 'mean': np.array([1., 2., 3.])}},\n scale=None)\n function = create_function2(shape1=[1, 2, 3, 4])\n with self.assertRaises(Exception):\n process_function(ov_function=function, argv=argv)\n\n def test_error_guess_c_wrong_position_5d(self):\n argv = Namespace(mean_scale_values={'input1': {'scale': np.array([1., 2., 3.]),\n 'mean': np.array([1., 2., 3.])}},\n scale=None)\n function = create_function2(shape1=[1, 2, 3, 4, 5])\n with self.assertRaises(Exception):\n process_function(ov_function=function, argv=argv)\n\n def test_error_guess_c_wrong_position_6d(self):\n argv = Namespace(mean_scale_values={'input1': {'scale': np.array([1., 2., 3.]),\n 'mean': np.array([1., 2., 3.])}},\n scale=None)\n function = create_function2(shape1=[1, 2, 4, 5, 6, 3])\n with self.assertRaises(Exception):\n process_function(ov_function=function, argv=argv)\n\n def test_error_2_names_to_same_input(self):\n argv = Namespace(mean_scale_values={'input1': {'scale': np.array([1., 2., 3.])},\n 'input1a': {'scale': np.array([1., 2., 3.])}},\n scale=None)\n function = create_function2(shape1=[1, 3, 224, 224])\n with self.assertRaises(Exception):\n process_function(ov_function=function, argv=argv)\n\n def test_error_2_names_to_same_input_single_value(self):\n argv = Namespace(mean_scale_values={'input1': {'scale': np.array([2.])},\n 'input1a': {'scale': np.array([3.])}},\n scale=None)\n function = create_function2(shape1=[1, 3, 224, 224])\n with self.assertRaises(Exception):\n process_function(ov_function=function, argv=argv)\n\n def test_reverse_input_channels(self):\n argv = Namespace(reverse_input_channels=True, mean_scale_values=None, scale=None)\n function = create_function2(shape1=[1, 224, 224, 3], shape2=[1, 3, 224, 224])\n process_function(ov_function=function,\n argv=argv)\n # Verify that some operations are inserted.\n # In future, consider using mock PrePostProcessor to verify that 'reverse_channels' was called\n op_node0 = list(function.get_parameters()[0].output(0).get_target_inputs())[0].get_node()\n self.assertTrue(op_node0.get_type_name() != 'Relu')\n op_node1 = list(function.get_parameters()[1].output(0).get_target_inputs())[0].get_node()\n self.assertTrue(op_node1.get_type_name() != 'Relu')\n\n # Verify that guessed layouts are not appeared in input1,input2\n self.assertEqual(function.get_parameters()[0].layout, Layout())\n self.assertEqual(function.get_parameters()[1].layout, Layout())\n\n def test_reverse_input_channels_func_layout(self):\n argv = Namespace(reverse_input_channels=True, mean_scale_values=None, scale=None)\n function = create_function2(shape1=[1, 3, 3, 3], shape2=[1, 3, 3, 3])\n function.get_parameters()[0].layout = Layout(\"NCHW\")\n function.get_parameters()[1].layout = Layout(\"NHWC\")\n process_function(ov_function=function,\n argv=argv)\n # Verify that some operations are inserted.\n # In future, consider using mock PrePostProcessor to verify that 'reverse_channels' was called\n op_node0 = list(function.get_parameters()[0].output(0).get_target_inputs())[0].get_node()\n self.assertTrue(op_node0.get_type_name() != 'Relu')\n op_node1 = list(function.get_parameters()[1].output(0).get_target_inputs())[0].get_node()\n self.assertTrue(op_node1.get_type_name() != 'Relu')\n\n # Verify that guessed layouts are not appeared in input1,input2\n self.assertEqual(function.get_parameters()[0].layout, Layout(\"NCHW\"))\n self.assertEqual(function.get_parameters()[1].layout, Layout(\"NHWC\"))\n\n def test_reverse_input_channels_layout(self):\n argv = Namespace(reverse_input_channels=True, mean_scale_values=None, scale=None,\n layout_values={'input1a': { 'source_layout': 'nhwc' },\n 'input2a': { 'source_layout': 'nchw' }\n })\n function = create_function2(shape1=[1, 224, 224, 4], shape2=[1, 4, 224, 224])\n process_function(ov_function=function, argv=argv)\n # In future, consider using mock PrePostProcessor to verify that 'reverse_channels' was not called\n # Verify that reverse_channels are not applied.\n op_node0 = list(function.get_parameters()[0].output(0).get_target_inputs())[0].get_node()\n self.assertTrue(op_node0.get_type_name() == 'Relu')\n op_node1 = list(function.get_parameters()[1].output(0).get_target_inputs())[0].get_node()\n self.assertTrue(op_node1.get_type_name() == 'Relu')\n\n def test_reverse_input_channels_3d(self):\n argv = Namespace(reverse_input_channels=True, mean_scale_values=None, scale=None,\n layout_values=None)\n function = create_function2(shape1=[224, 224, 3], shape2=[3, 224, 224])\n process_function(ov_function=function, argv=argv)\n # Verify that reverse_channels are applied.\n op_node0 = list(function.get_parameters()[0].output(0).get_target_inputs())[0].get_node()\n self.assertTrue(op_node0.get_type_name() != 'Relu')\n op_node1 = list(function.get_parameters()[1].output(0).get_target_inputs())[0].get_node()\n self.assertTrue(op_node1.get_type_name() != 'Relu')\n\n def test_reverse_input_channels_6d(self):\n argv = Namespace(reverse_input_channels=True, mean_scale_values=None, scale=None,\n layout_values=None)\n function = create_function2(shape1=[4, 4, 4, 4, 4, 3], shape2=[4, 3, 4, 4, 4, 4])\n process_function(ov_function=function, argv=argv)\n # Verify that reverse_channels are NOT applied.\n op_node0 = list(function.get_parameters()[0].output(0).get_target_inputs())[0].get_node()\n self.assertTrue(op_node0.get_type_name() == 'Relu')\n op_node1 = list(function.get_parameters()[1].output(0).get_target_inputs())[0].get_node()\n self.assertTrue(op_node1.get_type_name() == 'Relu')\n\n def test_reverse_input_channels_dynamic(self):\n argv = Namespace(reverse_input_channels=True, mean_scale_values=None, scale=None,\n layout_values=None)\n function = create_function2(shape1=[1, -1, 5, 5], shape2=[-1, -1, -1, -1])\n process_function(ov_function=function, argv=argv)\n # Verify that reverse_channels are NOT applied.\n op_node0 = list(function.get_parameters()[0].output(0).get_target_inputs())[0].get_node()\n self.assertTrue(op_node0.get_type_name() == 'Relu')\n op_node1 = list(function.get_parameters()[1].output(0).get_target_inputs())[0].get_node()\n self.assertTrue(op_node1.get_type_name() == 'Relu')\n\n def test_reverse_input_channels_dynamic_layout(self):\n argv = Namespace(reverse_input_channels=True, mean_scale_values=None, scale=None,\n layout_values={'input1a': { 'source_layout': 'nchw' },\n 'input2a': { 'source_layout': 'nhwc' }\n })\n function = create_function2(shape1=[1, -1, 5, 5], shape2=[-1, -1, -1, -1])\n process_function(ov_function=function, argv=argv)\n # Verify that reverse_channels are applied.\n op_node0 = list(function.get_parameters()[0].output(0).get_target_inputs())[0].get_node()\n self.assertTrue(op_node0.get_type_name() != 'Relu')\n op_node1 = list(function.get_parameters()[1].output(0).get_target_inputs())[0].get_node()\n self.assertTrue(op_node1.get_type_name() != 'Relu')\n\n def test_reverse_input_channels_2_channels(self):\n argv = Namespace(reverse_input_channels=True,\n mean_scale_values=None,\n scale=None)\n function = create_function2(shape1=[1, 224, 224, 2], shape2=[1, 3, 224, 224])\n process_function(ov_function=function, argv=argv)\n # Verify that some operations are inserted to input2.\n op_node0 = list(function.get_parameters()[0].output(0).get_target_inputs())[0].get_node()\n self.assertTrue(op_node0.get_type_name() == 'Relu')\n op_node1 = list(function.get_parameters()[1].output(0).get_target_inputs())[0].get_node()\n self.assertTrue(op_node1.get_type_name() != 'Relu')\n\n # Verify that guessed layouts are not appeared in input1,input2\n self.assertEqual(function.get_parameters()[0].layout, Layout())\n self.assertEqual(function.get_parameters()[1].layout, Layout())\n\n # When input name for layout is empty for model with one input - it is applied to this input\n def test_scale_vector3_layout_empty_input_name(self):\n argv = Namespace(mean_scale_values=list(np.array([(None, np.array([2., 4., 8.]))],\n dtype='object')),\n layout_values={'': {'source_layout': 'nchw'}},\n scale=None)\n function = create_function1(shape1=[1, 3, 3, 3]) # Use layout to determine channels dim\n\n process_function(ov_function=function, argv=argv)\n op_node = list(function.get_parameters()[0].output(0).get_target_inputs())[0].get_node()\n self.assertTrue(op_node.get_type_name() == 'Divide' or op_node.get_type_name() == 'Multiply')\n self.check_scale_constant(op_node, expected=[2., 4., 8.], shape=[1, 3, 1, 1])\n\n # Verify that layout (nchw) is appeared in input1\n self.assertEqual(function.get_parameters()[0].layout, Layout('nchw'))\n\n def test_layout_output(self):\n argv = Namespace(mean_scale_values=None,\n layout_values={\n 'res1': {\n 'source_layout': 'nchw',\n 'target_layout': 'nhwc'\n },\n 'res2a': {\n 'source_layout': 'ncdhw'\n }\n },\n scale=None)\n function = create_function2(shape1=[1, 3, 3, 3], shape2=[1, 3, 3, 3, 3])\n\n process_function(ov_function=function, argv=argv)\n op_node = function.get_results()[0].input(0).get_source_output().get_node()\n self.assertEqual(op_node.get_type_name(), 'Transpose')\n\n self.assertEqual(function.get_results()[0].layout, Layout('nhwc'))\n self.assertEqual(function.get_results()[1].layout, Layout('ncdhw'))\n\n def test_error_layout_empty_input_name_2_inputs(self):\n argv = Namespace(mean_scale_values=None,\n layout_values={'': {'source_layout': 'nchw'}},\n scale=None)\n function = create_function2(shape1=[1, 3, 3, 3])\n\n # Verify user friendly error message contains number of inputs and their names\n with self.assertRaisesRegex(Error, '.*2.*inputs.*input1.*input2.*'):\n process_function(ov_function=function, argv=argv)\n\n def test_reverse_channels_bad_layout(self):\n argv = Namespace(reverse_input_channels=True, mean_scale_values=None, scale=None)\n function = create_function2(shape1=[1, 224, 224, 3], shape2=[1, 4, 224, 224])\n function.get_parameters()[0].layout = Layout(\"NDHWC\")\n with self.assertRaisesRegex(Error, '.*input1.*'):\n process_function(ov_function=function, argv=argv)\n\n def test_guess_layout_reverse_channels_dont_apply_to_4(self):\n argv = Namespace(reverse_input_channels=True, mean_scale_values=None, scale=None)\n function = create_function2(shape1=[1, 224, 224, 3], shape2=[1, 4, 224, 224])\n process_function(ov_function=function, argv=argv)\n\n op_node0 = list(function.get_parameters()[0].output(0).get_target_inputs())[0].get_node()\n self.assertTrue(op_node0.get_type_name() != 'Relu')\n op_node1 = list(function.get_parameters()[1].output(0).get_target_inputs())[0].get_node()\n self.assertTrue(op_node1.get_type_name() == 'Relu')\n\n def test_error_guess_layout_reverse_channels_multi_3(self):\n argv = Namespace(reverse_input_channels=True, mean_scale_values=None, scale=None)\n function = create_function2(shape1=[1, 224, 224, 3], shape2=[1, 3, 3, 224])\n process_function(ov_function=function, argv=argv)\n # Applied to only input1\n op_node0 = list(function.get_parameters()[0].output(0).get_target_inputs())[0].get_node()\n self.assertTrue(op_node0.get_type_name() != 'Relu')\n op_node1 = list(function.get_parameters()[1].output(0).get_target_inputs())[0].get_node()\n self.assertTrue(op_node1.get_type_name() == 'Relu')\n\n\n def test_no_guess_layout_reverse_channels_has_layout_no_c(self):\n argv = Namespace(reverse_input_channels=True, mean_scale_values=None, scale=None)\n function = create_function2(shape1=[1, 224, 224, 3], shape2=[1, 3, 224, 224])\n function.get_parameters()[0].layout = Layout(\"NHW?\")\n function.get_parameters()[1].layout = Layout(\"N?HW\")\n process_function(ov_function=function, argv=argv)\n # Nothing has applied\n op_node0 = list(function.get_parameters()[0].output(0).get_target_inputs())[0].get_node()\n self.assertTrue(op_node0.get_type_name() == 'Relu')\n op_node1 = list(function.get_parameters()[1].output(0).get_target_inputs())[0].get_node()\n self.assertTrue(op_node1.get_type_name() == 'Relu')\n\n def test_guess_layout_reverse_channels_incorrect_pos(self):\n argv = Namespace(reverse_input_channels=True, mean_scale_values=None, scale=None)\n function = create_function2(shape1=[1, 4, 224, 224], shape2=[1, 224, 224, 2])\n function.get_parameters()[0].layout = Layout(\"NCHW\")\n function.get_parameters()[1].layout = Layout(\"NHWC\")\n process_function(ov_function=function, argv=argv)\n # Nothing has applied\n op_node0 = list(function.get_parameters()[0].output(0).get_target_inputs())[0].get_node()\n self.assertTrue(op_node0.get_type_name() == 'Relu')\n op_node1 = list(function.get_parameters()[1].output(0).get_target_inputs())[0].get_node()\n self.assertTrue(op_node1.get_type_name() == 'Relu')\n\n def test_no_reverse_channels_even_with_layout(self):\n argv = Namespace(reverse_input_channels=True, mean_scale_values=None, scale=None)\n function = create_function2(shape1=[3, 4, 224, 224], shape2=[1, 224, 3, 224])\n process_function(ov_function=function, argv=argv)\n # Nothing has applied\n op_node0 = list(function.get_parameters()[0].output(0).get_target_inputs())[0].get_node()\n self.assertTrue(op_node0.get_type_name() == 'Relu')\n op_node1 = list(function.get_parameters()[1].output(0).get_target_inputs())[0].get_node()\n self.assertTrue(op_node1.get_type_name() == 'Relu')\n",
"# Copyright (C) 2018-2021 Intel Corporation\n# SPDX-License-Identifier: Apache-2.0\n\nimport logging as log\n\nimport numpy as np\n\nfrom openvino.tools.mo.front.common.partial_infer.eltwise import eltwise_infer, bias_add_infer\nfrom openvino.tools.mo.graph.graph import Graph, Node\nfrom openvino.tools.mo.middle.passes.infer import copy_type_infer\nfrom openvino.tools.mo.ops.op import Op\nfrom openvino.tools.mo.pipeline.common import convert_const_node_value_type\nfrom openvino.tools.mo.utils.error import Error\n\n\ndef override_data_type_of_constant(node: Node):\n in_type_0 = node.in_port(0).get_data_type()\n in_type_1 = node.in_port(1).get_data_type()\n if in_type_0 != in_type_1:\n # in case of input values data type mismatch we try to change the type of the constant to match the type of\n # another input. The input values data type mismatch occur when the MO performs replacement of some\n # operations like SquaredDifference of inputs with floating point data type to Power layer with the integer\n # power value 2, or when replacing Neg operation with Mul with -1 as second input.\n in_node_0 = node.in_port(0).get_source().node\n in_node_1 = node.in_port(1).get_source().node\n\n if in_node_0.op != 'Const' and in_node_1.op != 'Const':\n raise Error(\"Elementwise operation '{}' has inputs of different data types: '{}' and '{}' \"\n \"that cannot be aligned\".format(node.soft_get('name'), in_type_0, in_type_1))\n\n if in_node_0.op == 'Const':\n node_to_convert, src_type, dst_type = in_node_0, in_type_0, in_type_1\n else:\n node_to_convert, src_type, dst_type = in_node_1, in_type_1, in_type_0\n log.error(\"Changing Const node '{}' data type from {} to {} for Elementwise operation\".format(\n node_to_convert.soft_get('name', node_to_convert.id), src_type, dst_type),\n extra={'is_warning': True})\n convert_const_node_value_type(node_to_convert, dst_type)\n\n\nclass Elementwise(Op):\n enabled = False\n operation = None\n op = None\n op_type = None\n version = 'opset1'\n\n def __init__(self, graph: Graph, attrs: dict):\n super().__init__(graph, {\n 'op': self.op,\n 'type': self.op_type,\n 'version': self.version,\n 'infer': lambda node: eltwise_infer(node, self.operation),\n 'type_infer': self.type_infer,\n 'can_be_bias': True,\n 'can_be_fused': True,\n 'in_ports_count': 2,\n 'out_ports_count': 1,\n 'is_eltwise': True,\n 'stop_value_propagation': False,\n 'auto_broadcast': 'numpy'\n }, attrs)\n\n @staticmethod\n def type_infer(node):\n override_data_type_of_constant(node)\n node.out_port(0).set_data_type(node.in_port(0).get_data_type())\n\n def backend_attrs(self):\n return ['auto_broadcast']\n\n\nclass UnaryElementwise(Elementwise):\n def __init__(self, graph: Graph, attrs: dict):\n super().__init__(graph, {**{\n 'in_ports_count': 1,\n }, **attrs})\n\n @staticmethod\n def type_infer(node):\n copy_type_infer(node)\n\n def backend_attrs(self):\n return []\n\n\nclass Add(Elementwise):\n op = 'Add'\n op_type = 'Add'\n operation = staticmethod(lambda a, b: a + b)\n\n\nclass BiasAdd(Add):\n op_type = 'BiasAdd'\n\n def __init__(self, graph: Graph, attrs: dict):\n attrs.update({'infer': lambda node: bias_add_infer(node, self.operation)})\n super().__init__(graph, attrs)\n\n\nclass Sub(Elementwise):\n op = 'Sub'\n op_type = 'Subtract'\n operation = staticmethod(lambda a, b: a - b)\n\n\nclass Mul(Elementwise):\n op = 'Mul'\n op_type = 'Multiply'\n operation = staticmethod(lambda a, b: a * b)\n\n\ndef both_types_are_integer(a, b):\n return np.issubdtype(a.dtype, np.integer) and np.issubdtype(b.dtype, np.integer)\n\n\nclass Div(Elementwise):\n op = 'Div'\n op_type = 'Divide'\n operation = staticmethod(lambda a, b: a // b if both_types_are_integer(a, b) else a / b)\n\n\nclass SquaredDifference(Elementwise):\n op = 'SquaredDifference'\n op_type = 'SquaredDifference'\n operation = staticmethod(lambda a, b: (a - b) * (a - b))\n\n\nclass Pow(Elementwise):\n op = 'Pow'\n op_type = 'Power'\n\n @staticmethod\n def operation(a, b):\n if np.any(b < 0) and np.issubdtype(a.dtype, np.signedinteger):\n return np.array(a.astype(np.float32) ** b, dtype=np.float32)\n return a ** b\n\n\nclass LogicalElementwise(Elementwise):\n @staticmethod\n def type_infer(node):\n override_data_type_of_constant(node)\n node.out_port(0).set_data_type(np.bool)\n\n\nclass Greater(LogicalElementwise):\n op = 'Greater'\n op_type = 'Greater'\n operation = staticmethod(lambda a, b: np.ma.greater(a, b))\n\n\nclass GreaterEqual(LogicalElementwise):\n op = 'GreaterEqual'\n op_type = 'GreaterEqual'\n operation = staticmethod(lambda a, b: np.ma.greater_equal(a, b))\n\n\nclass Less(LogicalElementwise):\n op = 'Less'\n op_type = 'Less'\n operation = staticmethod(lambda a, b: np.ma.less(a, b))\n\n\nclass LessEqual(LogicalElementwise):\n op = 'LessEqual'\n op_type = 'LessEqual'\n operation = staticmethod(lambda a, b: np.ma.less_equal(a, b))\n\n\nclass Equal(LogicalElementwise):\n op = 'Equal'\n op_type = 'Equal'\n operation = staticmethod(lambda a, b: np.ma.equal(a, b))\n\n\nclass NotEqual(LogicalElementwise):\n op = 'NotEqual'\n op_type = 'NotEqual'\n operation = staticmethod(lambda a, b: np.ma.not_equal(a, b))\n\n\nclass Maximum(Elementwise):\n op = 'Maximum'\n op_type = 'Maximum'\n operation = staticmethod(lambda a, b: np.ma.maximum(a, b))\n\n\nclass Minimum(Elementwise):\n op = 'Minimum'\n op_type = 'Minimum'\n operation = staticmethod(lambda a, b: np.ma.minimum(a, b))\n\n\nclass Round(UnaryElementwise):\n op = 'Round'\n op_type = 'Round'\n version = 'opset5'\n\n def __init__(self, graph: Graph, attrs):\n round_attrs = {'mode': 'half_to_even',\n 'infer': self.infer\n }\n round_attrs.update(attrs)\n super().__init__(graph, round_attrs)\n\n def backend_attrs(self):\n return ['mode']\n\n @classmethod\n def infer(cls, node: Node):\n node.out_port(0).data.set_shape(node.in_port(0).data.get_shape())\n\n a = node.in_port(0).data.get_value()\n if a is not None:\n assert node.soft_get('mode') in ['half_to_even', 'half_away_from_zero'], \\\n 'Round node {} has unsupported \"mode\" attribute value: {}'.format(node.soft_get('name', node.id),\n node.soft_get('mode'))\n if node.mode == 'half_away_from_zero':\n mask = (a >= 0)\n out = np.ma.empty_like(a)\n out[mask] = np.ma.floor(a[mask] + 0.5)\n out[~mask] = np.ma.ceil(a[~mask] - 0.5)\n else:\n out = np.ma.round(a)\n node.out_port(0).data.set_value(out)\n\n\nclass LogicalOr(LogicalElementwise):\n op = 'LogicalOr'\n op_type = 'LogicalOr'\n operation = staticmethod(lambda a, b: np.ma.logical_or(a, b))\n\n\nclass LogicalXor(Elementwise):\n op = 'LogicalXor'\n op_type = 'LogicalXor'\n operation = staticmethod(lambda a, b: np.ma.logical_xor(a, b))\n\n\nclass LogicalAnd(LogicalElementwise):\n op = 'LogicalAnd'\n op_type = 'LogicalAnd'\n operation = staticmethod(lambda a, b: np.ma.logical_and(a, b))\n\n\nclass FloorMod(Elementwise):\n op = 'FloorMod'\n op_type = 'FloorMod'\n operation = staticmethod(lambda a, b: np.ma.fmod(a, b))\n\n\nclass Mod(Elementwise):\n op = 'Mod'\n op_type = 'Mod'\n operation = staticmethod(lambda a, b: np.ma.mod(a, b))\n\n\nclass Negative(UnaryElementwise):\n op = 'Negative'\n op_type = 'Negative'\n operation = staticmethod(lambda a: -a)\n",
"# Copyright (C) 2018-2021 Intel Corporation\n# SPDX-License-Identifier: Apache-2.0\n\nimport numpy as np\n\nfrom openvino.tools.mo.front.common.replacement import FrontReplacementPattern\nfrom openvino.tools.mo.graph.graph import Graph\nfrom openvino.tools.mo.ops.broadcast import Broadcast\nfrom openvino.tools.mo.ops.const import Const\n\n\nclass FillToBroadcast(FrontReplacementPattern):\n \"\"\"\n Converts the 'Fill' layer to 'Broadcast'.\n \"\"\"\n enabled = True\n\n def find_and_replace_pattern(self, graph: Graph):\n for fill_node in graph.get_op_nodes(op='Fill'):\n name = fill_node.soft_get('name', fill_node.id)\n\n broadcast_node = Broadcast(graph, {'name': name + '/Broadcast'}).create_node()\n fill_node.in_port(0).get_connection().set_destination(broadcast_node.in_port(1))\n fill_node.in_port(1).get_connection().set_destination(broadcast_node.in_port(0))\n fill_node.out_port(0).get_connection().set_source(broadcast_node.out_port(0))\n\n for fill_node in graph.get_op_nodes(op='ConstantFill'):\n name = fill_node.soft_get('name', fill_node.id)\n\n assert fill_node.has_valid('fill_value')\n assert fill_node.has_and_set('input_as_shape')\n\n const = Const(graph, {'value': np.array(fill_node.fill_value), 'name': name + '/value'}).create_node()\n broadcast_node = Broadcast(graph, {'name': name + '/Broadcast'}).create_node()\n fill_node.in_port(0).get_connection().set_destination(broadcast_node.in_port(1))\n const.out_port(0).connect(broadcast_node.in_port(0))\n fill_node.out_port(0).get_connection().set_source(broadcast_node.out_port(0))\n",
"# Copyright (C) 2018-2021 Intel Corporation\n# SPDX-License-Identifier: Apache-2.0\n\nimport numpy as np\n\nfrom openvino.tools.mo.ops.elementwise import Add, Mul\nfrom openvino.tools.mo.front.common.replacement import FrontReplacementPattern\nfrom openvino.tools.mo.graph.graph import Graph\nfrom openvino.tools.mo.ops.const import Const\n\n\nclass BinaryFakeQuantizeNormalization(FrontReplacementPattern):\n \"\"\"\n FakeQuantize in binary form has exceptional meaning of 1 and 2 input nodes.\n This nodes values should be equal and express threshold to quantize tensors to two levels..\n \"\"\"\n enabled = True\n\n @staticmethod\n def pattern():\n return dict(\n nodes=[\n ('min_in', dict()),\n ('max_in', dict()),\n ('quantize', dict(op='FakeQuantize', levels=2))],\n edges=[\n ('min_in', 'quantize', {'in': 1}),\n ('max_in', 'quantize', {'in': 2})\n ]\n )\n\n def replace_pattern(self, graph: Graph, match: dict):\n quantize = match['quantize']\n\n sum_node = Add(graph, dict()).create_node()\n const = Const(graph, {'value': np.array(0.5)}).create_node()\n mul_node = Mul(graph, dict()).create_node()\n\n mul_node.in_port(0).connect(sum_node.out_port(0))\n mul_node.in_port(1).connect(const.out_port(0))\n\n quantize.in_port(1).get_connection().get_source().connect(sum_node.in_port(0))\n quantize.in_port(2).get_connection().get_source().connect(sum_node.in_port(1))\n\n quantize.in_port(1).disconnect()\n quantize.in_port(2).disconnect()\n\n mul_node.out_port(0).connect(quantize.in_port(1))\n mul_node.out_port(0).connect(quantize.in_port(2))\n",
"# Copyright (C) 2018-2021 Intel Corporation\n# SPDX-License-Identifier: Apache-2.0\n\nimport numpy as np\n\nfrom openvino.tools.mo.ops.split import VariadicSplit\nfrom openvino.tools.mo.front.common.partial_infer.utils import int64_array, dynamic_dimension, dynamic_dimension_value, \\\n is_dynamic_slice\nfrom openvino.tools.mo.front.tf.graph_utils import create_op_with_const_inputs\nfrom openvino.tools.mo.graph.graph import Graph, Node\nfrom openvino.tools.mo.graph.perm_inputs import PermuteInputs\nfrom openvino.tools.mo.middle.replacement import MiddleReplacementPattern\nfrom openvino.tools.mo.ops.concat import Concat\nfrom openvino.tools.mo.ops.const import Const\nfrom openvino.tools.mo.ops.op import PermuteAttrs\nfrom openvino.tools.mo.ops.strided_slice import StridedSlice\nfrom openvino.tools.mo.utils.error import Error\n\n\nclass StridedSliceNormalizer(MiddleReplacementPattern):\n r\"\"\"\n StridedSlice is not normal if it cannot be permuted by ApplyPermutations. This normalizer\n inserts blank colons ':' in slice expression so that it can be correctly permuted\n from NHWC to NCHW layout. It changes masks and inserts blank begin, end and strides values.\n In order to successfully handle StridedSlice in ShapeOf subgraphs\n changes must be done by inserting nodes not just by overwriting constants.\n\n StridedSlice is not normal in 2 cases:\n 1. rank of a slice expression is less than rank of input tensor\n 2. there is an ellipsis\n\n 1st case example\n BEFORE:\n |\n begin\n value=[0, 0]\n |\n\n AFTER:\n |\n begin Const\n value=[0, 0] value=[0, 0]\n \\ /\n \\ /\n Concat\n value=[0, 0, 0, 0]\n |\n\n Input of a shape [16, 100, 100, 3] in NHWC layout, output = input[:, 0:50].\n StridedSlice will be extended to input[:, 0:50, :, :].\n After permutation to NCHW output = input[:, :, 0:50, :].\n Example for 'begin' input transformation is shown above on the picture.\n 'end' and 'strides' inputs will be transformed the same way.\n\n 2nd case example\n BEFORE:\n |\n begin\n value=[1, 50]\n |\n\n AFTER:\n |\n begin\n value=[1, 1, 1]\n |\n VariadicSplit\n / \\\n / \\\n / Const \\\n \\ val=[0, 0] /\n \\ | /\n \\ | /\n Concat\n value=[1, 0, 0, 1, 1]\n |\n\n Input of a shape [16, 10, 100, 100, 3] in NDHWC layout, output = input[1:4, ..., 1:51, 1:3],\n output_shape = [3, 10, 100, 50, 2]. In order to perform correct layout permutation\n ellipsis must be replaced with colons: input[1:4, ..., 1:51, 1:3] => input[1:4, :, :, 1:51, 1:3].\n After layour permutation input[1:4, 1:3, :, : 1:5].\n\n In the places of colons blank begin, end and strides values should be inserted.\n In order to do that we split input and insert blank zeros to the middle.\n Example for 'begin' input transformation is shown above on the picture.\n 'end' and 'strides' inputs will be transformed the same way.\n \"\"\"\n enabled = True\n\n def run_before(self):\n from openvino.tools.mo.middle.LayoutChangeForConstantShapePaths import LayoutChangeForConstantShapePaths\n return [LayoutChangeForConstantShapePaths]\n\n def run_after(self):\n from openvino.tools.mo.middle.SliceConverter import ConvertSlice\n return [ConvertSlice]\n\n def find_and_replace_pattern(self, graph: Graph):\n for node in graph.get_op_nodes(type='StridedSlice'):\n StridedSliceNormalizer.normalize_strided_slice(graph, node)\n PermuteAttrs.create_permute_attrs(node,\n attrs=[('begin_mask', 'input:0'), # but indeed depends from slice_rank\n ('end_mask', 'input:0'),\n ('new_axis_mask', 'input:0'),\n ('shrink_axis_mask', 'input:0'),\n ('ellipsis_mask', 'input:0')])\n\n # StridedSliceNormalizer inserted nodes that changed original begin, end, and strides data nodes\n # Until now it was not possible to set correct permutations\n PermuteInputs().set_input_permutation(node.in_node(1), node, 'input:1', 'slice', 'dim_size')\n PermuteInputs().set_input_permutation(node.in_node(2), node, 'input:2', 'slice', 'dim_size')\n if node.is_in_port_connected(3):\n PermuteInputs().set_input_permutation(node.in_node(3), node, 'input:3', 'slice', 'dim_size')\n\n @staticmethod\n def normalize_strided_slice(graph: Graph, node: Node):\n input_shape = node.in_port(0).data.get_shape()\n input_rank = len(input_shape)\n begin = node.in_port(1).data.get_value()\n if begin is not None:\n slice_rank = len(begin)\n else:\n slice_rank = input_rank + np.count_nonzero(node.new_axis_mask) - np.count_nonzero(node.shrink_axis_mask)\n\n StridedSlice.align_mask_with_slice_rank(node, slice_rank) # if StridedSlice is created after partial_infer\n StridedSliceNormalizer.normalize_slices_attr(node)\n\n num_insertions = input_rank - slice_rank + np.count_nonzero(node.new_axis_mask)\n assert num_insertions >= 0, 'slice_rank - num_new_axis must <= input rank. Got instead: ' \\\n 'input_rank = {}, slice_rank = {}, num_new_axis = {}'. \\\n format(input_rank, slice_rank, np.count_nonzero(node.new_axis_mask))\n\n if np.any(node.ellipsis_mask):\n assert np.count_nonzero(node.ellipsis_mask) == 1, 'only one ellipsis_mask nonzero value is allowed'\n ellipsis_start = np.nonzero(node.ellipsis_mask)[0][0]\n # since we don't expect values in begin and end: take the whole range along ellipsis_start\n node.begin_mask[ellipsis_start] = 0\n node.end_mask[ellipsis_start] = 0\n node.ellipsis_mask[ellipsis_start] = 0\n insertion_start_idx = ellipsis_start + 1\n\n StridedSliceNormalizer.unroll_ellipsis_for_inputs(graph, node, ellipsis_start, num_insertions)\n elif num_insertions > 0:\n insertion_start_idx = slice_rank # insert blank values to mask ends\n StridedSliceNormalizer.extend_inputs(node, num_insertions)\n\n if num_insertions > 0:\n # insert blank values for ellipsis unrolling and extending\n for mask_name in StridedSlice.get_mask_names():\n node[mask_name] = np.insert(node[mask_name], insertion_start_idx, [0] * num_insertions).astype(int)\n\n @staticmethod\n def unroll_ellipsis_for_inputs(graph: Graph, node: Node, ellipsis_start: int, num_insertions: int):\n node_name = node.soft_get('name', node.id)\n\n for i, input_name in [(1, 'begin'), (2, 'end'), (3, 'strides')]:\n if i == 3 and not node.is_in_port_connected(3):\n continue # no need to extend strides if they are not connected\n\n blank_values_arr = np.zeros(num_insertions) if input_name != 'strides' else np.ones(num_insertions)\n blank_values_node = Const(graph, {'name': node_name + '/const_to_unroll_{}_ellipsis'.format(input_name),\n 'value': int64_array(blank_values_arr)}).create_node()\n\n concat_in_ports_count = 3 if ellipsis_start != 0 else 2\n concat = Concat(graph, {'axis': 0, 'name': node_name + '/concat_{}'.format(input_name),\n 'in_ports_count': concat_in_ports_count}).create_node()\n\n if ellipsis_start != 0:\n split = create_op_with_const_inputs(graph, VariadicSplit, {1: int64_array(0),\n 2: int64_array([ellipsis_start, -1])},\n {'name': node_name + '/split_for_{}_ellipsis'.format(input_name),\n 'out_ports_count': 2})\n node.in_port(i).get_connection().set_destination(split.in_port(0))\n\n concat.in_port(0).connect(split.out_port(0))\n concat.in_port(1).connect(blank_values_node.out_port(0))\n concat.in_port(2).connect(split.out_port(1))\n else:\n concat.in_port(0).connect(blank_values_node.out_port(0))\n node.in_port(i).get_connection().set_destination(concat.in_port(1))\n\n concat.out_port(0).get_connection().set_destination(node.in_port(i))\n\n @staticmethod\n def extend_inputs(node: Node, num_insertions: int):\n graph = node.graph\n node_name = node.soft_get('name', node.id)\n\n for i, input_name in [(1, 'begin'), (2, 'end'), (3, 'strides')]:\n if i == 3 and not node.is_in_port_connected(3):\n continue # no need to extend strides if they are not connected\n\n blank_values_arr = np.zeros(num_insertions) if input_name != 'strides' else np.ones(num_insertions)\n blank_values_node = Const(graph, {'name': node_name + '/extend_{}_const'.format(input_name),\n 'value': int64_array(blank_values_arr)}).create_node()\n\n if node.in_port(i).get_source().node.soft_get('type') == 'Concat':\n # concat already exists\n concat = node.in_port(i).get_source().node\n last_in_port = max(concat.in_ports().keys())\n assert not concat.in_port(last_in_port).disconnected(), 'The last in_port of Concat node {} ' \\\n 'should be connected'. \\\n format(concat.soft_get('name', node.id))\n\n concat.add_input_port(last_in_port + 1)\n concat.in_port(last_in_port + 1).connect(blank_values_node.out_port(0))\n else:\n # have to create concat\n concat = Concat(graph, {'axis': 0, 'name': node_name + '/concat_{}'.format(input_name),\n 'in_ports_count': 2}).create_node()\n node.in_port(i).get_connection().set_destination(concat.in_port(0))\n concat.in_port(1).connect(blank_values_node.out_port(0))\n concat.out_port(0).get_connection().set_destination(node.in_port(i))\n\n @staticmethod\n def normalize_slices_attr(node: Node):\n # removes negative starts, ends and magic numbers from 'slice' attr which is used by ConvertGroupedStridedSlice\n slice_rank = len(node['slices'])\n data_shape = node.in_port(0).data.get_shape()\n\n node_name = node.soft_get('name', node.id)\n if node.is_in_port_connected(3):\n strides = node.in_port(3).data.get_value()\n if strides is None:\n raise Error('StridedSlice operation for node {} supports only constant strides input'.format(node_name))\n else:\n strides = np.ones(len(node['slices']), dtype=np.int32)\n\n num_ellipsis_inserts = len(data_shape) - slice_rank + np.count_nonzero(node.new_axis_mask) + 1\n res_slices = []\n\n in_idx = 0\n for i, s in enumerate(node['slices']):\n if node.new_axis_mask[i]:\n res_slices.append(slice(0, 1, 1))\n elif node.shrink_axis_mask[i]:\n res_slices.append(slice(s, s + 1, strides[i])) # need strides if shrink index is negative\n elif node.ellipsis_mask[i]:\n for idx in range(num_ellipsis_inserts):\n res_slices.append(slice(0, data_shape[in_idx], 1))\n in_idx += 1\n else:\n res_slices.append(s)\n\n if not (node.new_axis_mask[i] or node.ellipsis_mask[i]):\n if res_slices[-1] != dynamic_dimension_value and data_shape[in_idx] is not dynamic_dimension and \\\n res_slices[-1] is not None and not is_dynamic_slice(res_slices[-1]):\n res_slices[-1] = slice(*res_slices[-1].indices(data_shape[in_idx])) # convert negative begins/ends\n in_idx += 1\n node.slices = np.array(res_slices)\n",
"# Copyright (C) 2018-2021 Intel Corporation\n# SPDX-License-Identifier: Apache-2.0\n\nimport numpy as np\n\nfrom openvino.tools.mo.front.extractor import FrontExtractorOp\nfrom openvino.tools.mo.front.mxnet.extractors.utils import get_mxnet_layer_attrs\nfrom openvino.tools.mo.ops.convolution import Convolution\n\n\nclass ConvFrontExtractor(FrontExtractorOp):\n op = 'Convolution'\n enabled = True\n\n @classmethod\n def extract(cls, node):\n attr = get_mxnet_layer_attrs(node.symbol_dict)\n\n kernel = attr.tuple(\"kernel\", int, None)\n stride = attr.tuple(\"stride\", int, tuple(np.ones(len(kernel), dtype=np.int64)))\n padding = attr.tuple(\"pad\", int, tuple(np.zeros(len(kernel), dtype=np.int64)))\n dilate = attr.tuple(\"dilate\", int, tuple(np.ones(len(kernel), dtype=np.int64)))\n group = attr.int(\"num_group\", 1)\n output = attr.int(\"num_filter\", None)\n bias_term = not attr.bool(\"no_bias\", False)\n\n final_dilations = np.array([1, 1, *[d for d in dilate]], dtype=np.int64) if dilate is not None else None\n\n node_attrs = {\n 'op': __class__.op,\n 'bias_addable': True,\n 'bias_term': bias_term,\n 'pad': np.array([[0, 0], [0, 0], *[[pad, pad] for pad in padding]], dtype=np.int64),\n 'pad_spatial_shape': np.array([[pad, pad] for pad in padding], dtype=np.int64),\n 'dilation': final_dilations,\n 'output_spatial_shape': None,\n 'output_shape': None,\n 'stride': np.array([1, 1, *[s for s in stride]], dtype=np.int64),\n 'group': group,\n 'output': output,\n 'kernel_spatial': np.array([k for k in kernel], dtype=np.int64),\n\n 'input_feature_channel': 1,\n 'output_feature_channel': 0,\n 'kernel_spatial_idx': None,\n 'reshape_kernel': True,\n\n 'spatial_dims': None,\n 'channel_dims': np.array([1], dtype=np.int64),\n 'batch_dims': np.array([0], dtype=np.int64),\n 'layout': 'NCHW',\n }\n\n # update the attributes of the node\n Convolution.update_node_stat(node, node_attrs)\n return cls.enabled\n\n\nclass DeconvFrontExtractor(FrontExtractorOp):\n op = 'Deconvolution'\n enabled = True\n\n @staticmethod\n def get_pad(node, input_shape, kernel_shape):\n padding = np.add.reduce(node.pad, axis=1)\n padding[node.spatial_dims] = node.stride[node.spatial_dims] * (input_shape[node.spatial_dims] - 1) + 1 + \\\n (kernel_shape[node.spatial_dims] - 1) * node.dilation[node.spatial_dims]\n padding[node.spatial_dims] = padding[node.spatial_dims] - node.output_spatial_shape;\n padding[node.spatial_dims] = (padding[node.spatial_dims] + 1) / 2\n return np.array([[0, 0], [0, 0], *[[pad, pad] for pad in padding[2:]]], dtype=np.int64)\n\n @classmethod\n def extract(cls, node):\n attr = get_mxnet_layer_attrs(node.symbol_dict)\n\n kernel = attr.tuple(\"kernel\", int, None)\n stride = attr.tuple(\"stride\", int, tuple(np.ones(len(kernel), dtype=np.int64)))\n padding = attr.tuple(\"pad\", int, tuple(np.zeros(len(kernel), dtype=np.int64)))\n dilate = attr.tuple(\"dilate\", int, tuple(np.ones(len(kernel), dtype=np.int64)))\n group = attr.int(\"num_group\", 1)\n output = attr.int(\"num_filter\", None)\n bias_term = not attr.bool(\"no_bias\", True)\n target_shape = attr.tuple(\"target_shape\", int, None)\n if target_shape:\n target_shape = np.array(target_shape, dtype=np.int64)\n\n final_dilations = np.array([1, 1, *[d for d in dilate]], dtype=np.int64) if dilate is not None else None\n node_attrs = {\n 'op': __class__.op,\n 'type': 'Deconvolution',\n 'bias_addable': True,\n 'bias_term': bias_term,\n 'pad': np.array([[0, 0], [0, 0], *[[pad, pad] for pad in padding]], dtype=np.int64),\n 'pad_spatial_shape': np.array([[pad, pad] for pad in padding], dtype=np.int64),\n 'dilation': final_dilations,\n 'output_spatial_shape': target_shape,\n 'original_output_spatial_shape': target_shape,\n 'output_shape': None,\n 'stride': np.array([1, 1, *[s for s in stride]], dtype=np.int64),\n 'group': group,\n 'output': output,\n 'kernel_spatial': np.array([k for k in kernel], dtype=np.int64),\n 'input_feature_channel': 1,\n 'output_feature_channel': 0,\n 'kernel_spatial_idx': None,\n 'reshape_kernel': True,\n\n 'spatial_dims': None,\n 'channel_dims': np.array([1], dtype=np.int64),\n 'batch_dims': np.array([0], dtype=np.int64),\n 'layout': 'NCHW',\n 'get_pad': DeconvFrontExtractor.get_pad,\n }\n\n output_padding = attr.tuple(\"adj\", int, None)\n if target_shape is None and output_padding:\n node_attrs[\"output_padding\"] = np.array([0, 0, *[s for s in output_padding]], dtype=np.int64)\n\n # update the attributes of the node\n Convolution.update_node_stat(node, node_attrs)\n return cls.enabled\n"
] | [
[
"numpy.random.random",
"numpy.random.randint"
],
[
"numpy.array"
],
[
"numpy.array"
],
[
"numpy.round",
"numpy.packbits",
"numpy.array",
"numpy.flip",
"numpy.zeros",
"numpy.isclose"
],
[
"numpy.expand_dims",
"numpy.logical_and",
"numpy.min",
"numpy.asarray",
"numpy.ma.masked_array",
"numpy.stack",
"numpy.max",
"numpy.std",
"numpy.mean",
"numpy.any",
"numpy.transpose",
"numpy.array",
"scipy.ndimage.interpolation.zoom",
"numpy.zeros"
],
[
"numpy.array"
],
[
"numpy.ma.floor",
"numpy.issubdtype",
"numpy.any",
"numpy.ma.logical_and",
"numpy.ma.greater",
"numpy.ma.empty_like",
"numpy.ma.ceil",
"numpy.ma.greater_equal",
"numpy.ma.logical_or",
"numpy.ma.fmod",
"numpy.ma.round",
"numpy.ma.less",
"numpy.ma.equal",
"numpy.ma.less_equal",
"numpy.ma.maximum",
"numpy.ma.minimum",
"numpy.ma.logical_xor",
"numpy.ma.mod",
"numpy.ma.not_equal"
],
[
"numpy.array"
],
[
"numpy.array"
],
[
"numpy.nonzero",
"numpy.ones",
"numpy.any",
"numpy.count_nonzero",
"numpy.insert",
"numpy.array",
"numpy.zeros"
],
[
"numpy.array",
"numpy.add.reduce"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"0.15",
"1.4",
"0.16",
"1.0",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"0.10",
"0.17",
"1.3"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
guidefloripa/kerasify | [
"cbb2ea6cae61ccd551b0f5327433d23e8e8050ee"
] | [
"make_tests.py"
] | [
"import numpy as np\nimport pprint\n\nfrom keras.models import Sequential\nfrom keras.layers import Convolution2D, Dense, Flatten, Activation, MaxPooling2D, Dropout\nfrom keras.layers.recurrent import LSTM\nfrom keras.layers.advanced_activations import ELU\nfrom keras.layers.embeddings import Embedding\n\nfrom kerasify import export_model\n\nnp.set_printoptions(precision=25, threshold=np.nan)\n\ndef c_array(a):\n s = pprint.pformat(a.flatten())\n s = s.replace('[', '{').replace(']', '}').replace('array(', '').replace(')', '').replace(', dtype=float32', '')\n\n shape = ''\n\n if a.shape == ():\n s = '{%s}' % s\n shape = '(1)'\n else:\n shape = repr(a.shape).replace(',)', ')')\n\n return shape, s\n\n\nTEST_CASE = '''\nbool test_%s(double* load_time, double* apply_time)\n{\n printf(\"TEST %s\\\\n\");\n\n KASSERT(load_time, \"Invalid double\");\n KASSERT(apply_time, \"Invalid double\");\n\n Tensor in%s;\n in.data_ = %s;\n\n Tensor out%s;\n out.data_ = %s;\n\n KerasTimer load_timer;\n load_timer.Start();\n\n KerasModel model;\n KASSERT(model.LoadModel(\"test_%s.model\"), \"Failed to load model\");\n\n *load_time = load_timer.Stop();\n\n KerasTimer apply_timer;\n apply_timer.Start();\n\n Tensor predict = out;\n KASSERT(model.Apply(&in, &out), \"Failed to apply\");\n\n *apply_time = apply_timer.Stop();\n\n for (int i = 0; i < out.dims_[0]; i++)\n {\n KASSERT_EQ(out(i), predict(i), %s);\n }\n\n return true;\n}\n'''\n\ndef output_testcase(model, test_x, test_y, name, eps):\n print(\"Processing %s\" % name)\n model.compile(loss='mean_squared_error', optimizer='adamax')\n model.fit(test_x, test_y, nb_epoch=1, verbose=False)\n predict_y = model.predict(test_x).astype('f')\n print(model.summary())\n\n export_model(model, 'test_%s.model' % name)\n\n with open('test_%s.h' % name, 'w') as f:\n x_shape, x_data = c_array(test_x[0])\n y_shape, y_data = c_array(predict_y[0])\n\n f.write(TEST_CASE % (name, name, x_shape, x_data, y_shape, y_data, name, eps))\n\n\n\n''' Dense 1x1 '''\ntest_x = np.arange(10)\ntest_y = test_x * 10 + 1\nmodel = Sequential()\nmodel.add(Dense(1, input_dim=1))\n\noutput_testcase(model, test_x, test_y, 'dense_1x1', '1e-6')\n\n''' Dense 10x1 '''\ntest_x = np.random.rand(10, 10).astype('f')\ntest_y = np.random.rand(10).astype('f')\nmodel = Sequential()\nmodel.add(Dense(1, input_dim=10))\n\noutput_testcase(model, test_x, test_y, 'dense_10x1', '1e-6')\n\n''' Dense 2x2 '''\ntest_x = np.random.rand(10, 2).astype('f')\ntest_y = np.random.rand(10).astype('f')\nmodel = Sequential()\nmodel.add(Dense(2, input_dim=2))\nmodel.add(Dense(1))\n\noutput_testcase(model, test_x, test_y, 'dense_2x2', '1e-6')\n\n''' Dense 10x10 '''\ntest_x = np.random.rand(10, 10).astype('f')\ntest_y = np.random.rand(10).astype('f')\nmodel = Sequential()\nmodel.add(Dense(10, input_dim=10))\nmodel.add(Dense(1))\n\noutput_testcase(model, test_x, test_y, 'dense_10x10', '1e-6')\n\n''' Dense 10x10x10 '''\ntest_x = np.random.rand(10, 10).astype('f')\ntest_y = np.random.rand(10, 10).astype('f')\nmodel = Sequential()\nmodel.add(Dense(10, input_dim=10))\nmodel.add(Dense(10))\n\noutput_testcase(model, test_x, test_y, 'dense_10x10x10', '1e-6')\n\n''' Conv 2x2 '''\ntest_x = np.random.rand(10, 1, 2, 2).astype('f')\ntest_y = np.random.rand(10, 1).astype('f')\nmodel = Sequential()\nmodel.add(Convolution2D(1, 2, 2, input_shape=(1, 2, 2)))\nmodel.add(Flatten())\nmodel.add(Dense(1))\n\noutput_testcase(model, test_x, test_y, 'conv_2x2', '1e-6')\n\n''' Conv 3x3 '''\ntest_x = np.random.rand(10, 1, 3, 3).astype('f').astype('f')\ntest_y = np.random.rand(10, 1).astype('f')\nmodel = Sequential()\nmodel.add(Convolution2D(1, 3, 3, input_shape=(1, 3, 3)))\nmodel.add(Flatten())\nmodel.add(Dense(1))\n\noutput_testcase(model, test_x, test_y, 'conv_3x3', '1e-6')\n\n''' Conv 3x3x3 '''\ntest_x = np.random.rand(10, 3, 10, 10).astype('f')\ntest_y = np.random.rand(10, 1).astype('f')\nmodel = Sequential()\nmodel.add(Convolution2D(3, 3, 3, input_shape=(3, 10, 10)))\nmodel.add(Flatten())\nmodel.add(Dense(1))\n\noutput_testcase(model, test_x, test_y, 'conv_3x3x3', '1e-6')\n\n''' Activation ELU '''\ntest_x = np.random.rand(1, 10).astype('f')\ntest_y = np.random.rand(1, 1).astype('f')\nmodel = Sequential()\nmodel.add(Dense(10, input_dim=10))\nmodel.add(ELU(alpha=0.5))\nmodel.add(Dense(1))\n\noutput_testcase(model, test_x, test_y, 'elu_10', '1e-6')\n\n''' Activation relu '''\ntest_x = np.random.rand(1, 10).astype('f')\ntest_y = np.random.rand(1, 10).astype('f')\nmodel = Sequential()\nmodel.add(Dense(10, input_dim=10))\nmodel.add(Activation('relu'))\n\noutput_testcase(model, test_x, test_y, 'relu_10', '1e-6')\n\n''' Dense relu '''\ntest_x = np.random.rand(1, 10).astype('f')\ntest_y = np.random.rand(1, 10).astype('f')\nmodel = Sequential()\nmodel.add(Dense(10, input_dim=10, activation='relu'))\nmodel.add(Dense(10, input_dim=10, activation='relu'))\nmodel.add(Dense(10, input_dim=10, activation='relu'))\n\noutput_testcase(model, test_x, test_y, 'dense_relu_10', '1e-6')\n\n''' Dense relu '''\ntest_x = np.random.rand(1, 10).astype('f')\ntest_y = np.random.rand(1, 10).astype('f')\nmodel = Sequential()\nmodel.add(Dense(10, input_dim=10, activation='tanh'))\nmodel.add(Dense(10, input_dim=10, activation='tanh'))\nmodel.add(Dense(10, input_dim=10, activation='tanh'))\n\noutput_testcase(model, test_x, test_y, 'dense_tanh_10', '1e-6')\n\n''' Conv softplus '''\ntest_x = np.random.rand(10, 1, 2, 2).astype('f')\ntest_y = np.random.rand(10, 1).astype('f')\nmodel = Sequential()\nmodel.add(Convolution2D(1, 2, 2, input_shape=(1, 2, 2), activation='softplus'))\nmodel.add(Flatten())\nmodel.add(Dense(1))\n\noutput_testcase(model, test_x, test_y, 'conv_softplus_2x2', '1e-6')\n\n\n''' Conv hardsigmoid '''\ntest_x = np.random.rand(10, 1, 2, 2).astype('f')\ntest_y = np.random.rand(10, 1).astype('f')\nmodel = Sequential()\nmodel.add(Convolution2D(1, 2, 2, input_shape=(1, 2, 2), activation='hard_sigmoid'))\nmodel.add(Flatten())\nmodel.add(Dense(1))\n\noutput_testcase(model, test_x, test_y, 'conv_hard_sigmoid_2x2', '1e-6')\n\n''' Conv sigmoid '''\ntest_x = np.random.rand(10, 1, 2, 2).astype('f')\ntest_y = np.random.rand(10, 1).astype('f')\nmodel = Sequential()\nmodel.add(Convolution2D(1, 2, 2, input_shape=(1, 2, 2), activation='sigmoid'))\nmodel.add(Flatten())\nmodel.add(Dense(1))\n\noutput_testcase(model, test_x, test_y, 'conv_sigmoid_2x2', '1e-6')\n\n\n''' Maxpooling2D 1x1'''\ntest_x = np.random.rand(10, 1, 10, 10).astype('f')\ntest_y = np.random.rand(10, 1).astype('f')\nmodel = Sequential()\nmodel.add(MaxPooling2D(pool_size=(1, 1), input_shape=(1, 10, 10)))\nmodel.add(Flatten())\nmodel.add(Dense(1))\n\noutput_testcase(model, test_x, test_y, 'maxpool2d_1x1', '1e-6')\n\n''' Maxpooling2D 2x2'''\ntest_x = np.random.rand(10, 1, 10, 10).astype('f')\ntest_y = np.random.rand(10, 1).astype('f')\nmodel = Sequential()\nmodel.add(MaxPooling2D(pool_size=(2, 2), input_shape=(1, 10, 10)))\nmodel.add(Flatten())\nmodel.add(Dense(1))\n\noutput_testcase(model, test_x, test_y, 'maxpool2d_2x2', '1e-6')\n\n''' Maxpooling2D 3x2x2'''\ntest_x = np.random.rand(10, 3, 10, 10).astype('f')\ntest_y = np.random.rand(10, 1).astype('f')\nmodel = Sequential()\nmodel.add(MaxPooling2D(pool_size=(2, 2), input_shape=(3, 10, 10)))\nmodel.add(Flatten())\nmodel.add(Dense(1))\n\noutput_testcase(model, test_x, test_y, 'maxpool2d_3x2x2', '1e-6')\n\n''' Maxpooling2D 3x3x3'''\ntest_x = np.random.rand(10, 3, 10, 10).astype('f')\ntest_y = np.random.rand(10, 1).astype('f')\nmodel = Sequential()\nmodel.add(MaxPooling2D(pool_size=(3, 3), input_shape=(3, 10, 10)))\nmodel.add(Flatten())\nmodel.add(Dense(1))\n\noutput_testcase(model, test_x, test_y, 'maxpool2d_3x3x3', '1e-6')\n\n''' LSTM simple 7x20 '''\ntest_x = np.random.rand(10, 7, 20).astype('f')\ntest_y = np.random.rand(10, 3).astype('f')\nmodel = Sequential()\nmodel.add(LSTM(3, return_sequences=False, input_shape=(7, 20)))\n\noutput_testcase(model, test_x, test_y, 'lstm_simple_7x20', '1e-6')\n\n\n''' LSTM simple stacked 20x9 '''\ntest_x = np.random.rand(10, 20, 9).astype('f')\ntest_y = np.random.rand(10, 1).astype('f')\nmodel = Sequential()\nmodel.add(LSTM(32, return_sequences=False, input_shape=(20, 9)))\nmodel.add(Dense(3, input_dim=32, activation='tanh'))\nmodel.add(Dense(1))\n\noutput_testcase(model, test_x, test_y, 'lstm_simple_stacked20x9', '1e-6')\n\n''' LSTM stacked 150x83 '''\ntest_x = np.random.rand(10, 150, 83).astype('f')\ntest_y = np.random.rand(10, 1).astype('f')\nmodel = Sequential()\nmodel.add(LSTM(32, return_sequences=True, input_shape=(150, 83)))\nmodel.add(LSTM(32, return_sequences=False))\nmodel.add(Dense(1, activation='sigmoid'))\n\noutput_testcase(model, test_x, test_y, 'lstm_stacked150x83', '1e-6')\n\n\n''' Embedding 64 '''\nnp.random.seed(10)\ntest_x = np.random.randint(100, size=(32, 10)).astype('f')\ntest_y = np.random.rand(32, 20).astype('f')\nmodel = Sequential()\nmodel.add(Embedding(100, 64, input_length=10))\nmodel.add(Flatten())\n#model.add(Dropout(0.5))\nmodel.add(Dense(20, activation='sigmoid'))\n\noutput_testcase(model, test_x, test_y, 'embedding64', '1e-6')\n\n\n''' Benchmark '''\ntest_x = np.random.rand(1, 3, 128, 128).astype('f')\ntest_y = np.random.rand(1, 10).astype('f')\nmodel = Sequential()\nmodel.add(Convolution2D(16, 7, 7, input_shape=(3, 128, 128), activation='relu'))\nmodel.add(MaxPooling2D(pool_size=(3, 3)))\nmodel.add(ELU())\nmodel.add(Convolution2D(8, 3, 3))\nmodel.add(Flatten())\nmodel.add(Dense(1000, activation='relu'))\nmodel.add(Dense(10))\n\noutput_testcase(model, test_x, test_y, 'benchmark', '1e-3')\n\n\n"
] | [
[
"numpy.random.seed",
"numpy.arange",
"numpy.set_printoptions",
"numpy.random.rand",
"numpy.random.randint"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
gjkennedy/OpenMDAO | [
"06897b584403cce34bc106dd2840aa07eea69e96",
"06897b584403cce34bc106dd2840aa07eea69e96",
"06897b584403cce34bc106dd2840aa07eea69e96",
"06897b584403cce34bc106dd2840aa07eea69e96"
] | [
"openmdao/surrogate_models/tests/test_map.py",
"openmdao/approximation_schemes/approximation_scheme.py",
"openmdao/drivers/tests/test_genetic_algorithm_driver.py",
"openmdao/solvers/nonlinear/tests/test_newton.py"
] | [
"from openmdao.api import Group, Problem, MetaModelUnStructuredComp, NearestNeighbor\nfrom openmdao.utils.assert_utils import assert_near_equal\n\nimport numpy as np\nimport unittest\n\n\nclass CompressorMap(MetaModelUnStructuredComp):\n\n def __init__(self):\n super(CompressorMap, self).__init__()\n\n self.add_input('Nc', val=1.0)\n self.add_input('Rline', val=2.0)\n self.add_input('alpha', val=0.0)\n\n self.add_output('PR', val=1.0, surrogate=NearestNeighbor(interpolant_type='linear'))\n self.add_output('eff', val=1.0, surrogate=NearestNeighbor(interpolant_type='linear'))\n self.add_output('Wc', val=1.0, surrogate=NearestNeighbor(interpolant_type='linear'))\n\n\nclass TestMap(unittest.TestCase):\n\n def test_comp_map(self):\n # create compressor map and save reference to options (for training data)\n c = CompressorMap()\n m = c.options\n\n # add compressor map to problem\n p = Problem()\n p.model.add_subsystem('compmap', c)\n p.setup()\n\n # train metamodel\n Nc = np.array([0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1])\n Rline = np.array([1.0, 1.2, 1.4, 1.6, 1.8, 2.0, 2.2, 2.4, 2.6, 2.8, 3.0])\n alpha = np.array([0.0, 1.0])\n Nc_mat, Rline_mat, alpha_mat = np.meshgrid(Nc, Rline, alpha, sparse=False)\n\n m['train:Nc'] = Nc_mat.flatten()\n m['train:Rline'] = Rline_mat.flatten()\n m['train:alpha'] = alpha_mat.flatten()\n\n m['train:PR'] = m['train:Nc']*m['train:Rline']+m['train:alpha']\n m['train:eff'] = m['train:Nc']*m['train:Rline']**2+m['train:alpha']\n m['train:Wc'] = m['train:Nc']**2*m['train:Rline']**2+m['train:alpha']\n\n # check predicted values\n p['compmap.Nc'] = 0.9\n p['compmap.Rline'] = 2.0\n p['compmap.alpha'] = 0.0\n p.run_model()\n\n tol = 1e-1\n assert_near_equal(p['compmap.PR'], p['compmap.Nc']*p['compmap.Rline']+p['compmap.alpha'], tol)\n assert_near_equal(p['compmap.eff'], p['compmap.Nc']*p['compmap.Rline']**2+p['compmap.alpha'], tol)\n assert_near_equal(p['compmap.Wc'], p['compmap.Nc']**2*p['compmap.Rline']**2+p['compmap.alpha'], tol)\n\n p['compmap.Nc'] = 0.95\n p['compmap.Rline'] = 2.1\n p['compmap.alpha'] = 0.0\n p.run_model()\n\n assert_near_equal(p['compmap.PR'], p['compmap.Nc']*p['compmap.Rline']+p['compmap.alpha'], tol)\n assert_near_equal(p['compmap.eff'], p['compmap.Nc']*p['compmap.Rline']**2+p['compmap.alpha'], tol)\n assert_near_equal(p['compmap.Wc'], p['compmap.Nc']**2*p['compmap.Rline']**2+p['compmap.alpha'], tol)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n",
"\"\"\"Base class used to define the interface for derivative approximation schemes.\"\"\"\nfrom collections import defaultdict\nfrom scipy.sparse import coo_matrix\nimport numpy as np\nfrom openmdao.utils.array_utils import sub2full_indices, get_input_idx_split\nimport openmdao.utils.coloring as coloring_mod\nfrom openmdao.jacobians.jacobian import Jacobian\n\n_full_slice = slice(None)\n\n\nclass ApproximationScheme(object):\n \"\"\"\n Base class used to define the interface for derivative approximation schemes.\n\n Attributes\n ----------\n _approx_groups : list\n A list of approximation tuples ordered into groups of 'of's matching the same 'wrt'.\n _colored_approx_groups : list\n A list containing info for all colored approximation groups.\n _approx_groups_cached_under_cs : bool\n Flag indicates whether approx_groups was generated under complex step from higher in the\n model hieararchy.\n _exec_dict : defaultdict(list)\n A dict that keeps derivatives in execution order. The key is a combination of wrt and\n various metadata that differs by approximation scheme.\n _j_colored : coo_matrix\n If coloring is active, cached COO jacobian.\n _j_data_sizes : ndarray of int\n Array of sizes of data chunks that make up _j_colored. (Used for MPI Allgatherv)\n _j_data_offsets : ndarray of int\n Array of offsets of each data chunk that makes up _j_colored. (Used for MPI Allgatherv)\n \"\"\"\n\n def __init__(self):\n \"\"\"\n Initialize the ApproximationScheme.\n \"\"\"\n self._approx_groups = None\n self._colored_approx_groups = None\n self._j_colored = None\n self._j_data_sizes = None\n self._j_data_offsets = None\n self._approx_groups_cached_under_cs = False\n self._exec_dict = defaultdict(list)\n\n def _reset(self):\n \"\"\"\n Get rid of any existing approx groups.\n \"\"\"\n self._colored_approx_groups = None\n self._approx_groups = None\n\n def _get_approx_groups(self, system, under_cs=False):\n \"\"\"\n Retrieve data structure that contains all the approximations.\n\n This data structure is regenerated if we transition to or from being under a complex step\n from higher in the model hierarchy.\n\n Parameters\n ----------\n system : <System>\n Group or component instance.\n under_cs : bool\n Flag that indicates if we are under complex step.\n\n Returns\n -------\n Tuple (approx_groups, colored_approx_groups)\n Each approx_groups entry contains specific data for a wrt var.\n Each colored_approx_groups entry contains data for a group of columns.\n \"\"\"\n if under_cs != self._approx_groups_cached_under_cs:\n if coloring_mod._use_partial_sparsity:\n self._init_colored_approximations(system)\n self._init_approximations(system)\n else:\n if self._colored_approx_groups is None and coloring_mod._use_partial_sparsity:\n self._init_colored_approximations(system)\n if self._approx_groups is None:\n self._init_approximations(system)\n\n self._approx_groups_cached_under_cs = under_cs\n\n return self._approx_groups, self._colored_approx_groups\n\n def add_approximation(self, abs_key, system, kwargs):\n \"\"\"\n Use this approximation scheme to approximate the derivative d(of)/d(wrt).\n\n Parameters\n ----------\n abs_key : tuple(str,str)\n Absolute name pairing of (of, wrt) for the derivative.\n system : System\n Containing System.\n kwargs : dict\n Additional keyword arguments, to be interpreted by sub-classes.\n \"\"\"\n raise NotImplementedError(\"add_approximation has not been implemented\")\n\n def compute_approximations(self, system, jac=None, total=False):\n \"\"\"\n Execute the system to compute the approximate (sub)-Jacobians.\n\n Parameters\n ----------\n system : System\n System on which the execution is run.\n jac : None or dict-like\n If None, update system with the approximated sub-Jacobians. Otherwise, store the\n approximations in the given dict-like object.\n total : bool\n If True total derivatives are being approximated, else partials.\n \"\"\"\n raise NotImplementedError()\n\n def _init_colored_approximations(self, system):\n from openmdao.core.group import Group\n from openmdao.core.implicitcomponent import ImplicitComponent\n\n self._colored_approx_groups = []\n self._j_colored = None\n self._j_data_sizes = None\n self._j_data_offsets = None\n\n # don't do anything if the coloring doesn't exist yet\n coloring = system._coloring_info['coloring']\n if not isinstance(coloring, coloring_mod.Coloring):\n return\n\n outputs = system._outputs\n inputs = system._inputs\n abs2meta = system._var_allprocs_abs2meta\n prom2abs_out = system._var_allprocs_prom2abs_list['output']\n prom2abs_in = system._var_allprocs_prom2abs_list['input']\n approx_wrt_idx = system._owns_approx_wrt_idx\n\n out_slices = outputs.get_slice_dict()\n in_slices = inputs.get_slice_dict()\n\n is_total = isinstance(system, Group)\n\n system._update_wrt_matches(system._coloring_info)\n wrt_matches = system._coloring_info['wrt_matches']\n\n data = None\n keys = set()\n for key, apprx in self._exec_dict.items():\n if key[0] in wrt_matches:\n if data is None:\n # data is the same for all colored approxs so we only need the first\n data = self._get_approx_data(system, key)\n options = apprx[0][1]\n if 'coloring' in options:\n keys.update(a[0] for a in apprx)\n\n if is_total and system.pathname == '': # top level approx totals\n of_names = system._owns_approx_of\n full_wrts = system._var_allprocs_abs_names['output'] + \\\n system._var_allprocs_abs_names['input']\n wrt_names = system._owns_approx_wrt\n else:\n of_names, wrt_names = system._get_partials_varlists()\n wrt_names = [prom2abs_in[n][0] if n in prom2abs_in else prom2abs_out[n][0]\n for n in wrt_names]\n full_wrts = wrt_names\n\n tmpJ = {\n '@nrows': coloring._shape[0],\n '@ncols': coloring._shape[1],\n '@out_slices': out_slices,\n '@approxs': keys,\n '@jac_slices': {},\n }\n\n # FIXME: need to deal with mix of local/remote indices\n\n len_full_ofs = len(system._var_allprocs_abs_names['output'])\n\n full_idxs = []\n approx_of_idx = system._owns_approx_of_idx\n jac_slices = tmpJ['@jac_slices']\n for abs_of, roffset, rend, _ in system._jacobian_of_iter():\n rslice = slice(roffset, rend)\n for abs_wrt, coffset, cend, _ in system._jacobian_wrt_iter(wrt_matches):\n jac_slices[(abs_of, abs_wrt)] = (rslice, slice(coffset, cend))\n\n if is_total and (approx_of_idx or len_full_ofs > len(of_names)):\n slc = out_slices[abs_of]\n if abs_of in approx_of_idx:\n full_idxs.append(np.arange(slc.start, slc.stop)[approx_of_idx[abs_of]])\n else:\n full_idxs.append(range(slc.start, slc.stop))\n if full_idxs:\n tmpJ['@row_idx_map'] = np.hstack(full_idxs)\n\n if len(full_wrts) != len(wrt_matches) or approx_wrt_idx:\n if is_total and system.pathname == '': # top level approx totals\n full_wrt_sizes = [abs2meta[wrt]['size'] for wrt in wrt_names]\n else:\n _, full_wrt_sizes = system._get_partials_var_sizes()\n\n # need mapping from coloring jac columns (subset) to full jac columns\n col_map = sub2full_indices(full_wrts, wrt_matches, full_wrt_sizes, approx_wrt_idx)\n else:\n col_map = None\n\n # get groups of columns from the coloring and compute proper indices into\n # the inputs and outputs vectors.\n is_semi = is_total and system.pathname\n use_full_cols = isinstance(system, ImplicitComponent) or is_semi\n for cols, nzrows in coloring.color_nonzero_iter('fwd'):\n ccols = cols if col_map is None else col_map[cols]\n idx_info = get_input_idx_split(ccols, inputs, outputs, use_full_cols, is_total)\n self._colored_approx_groups.append((data, cols, tmpJ, idx_info, nzrows))\n\n def _init_approximations(self, system):\n \"\"\"\n Prepare for later approximations.\n\n Parameters\n ----------\n system : System\n The system having its derivs approximated.\n \"\"\"\n outputs = system._outputs\n inputs = system._inputs\n abs2meta = system._var_allprocs_abs2meta\n\n out_slices = outputs.get_slice_dict()\n in_slices = inputs.get_slice_dict()\n\n approx_wrt_idx = system._owns_approx_wrt_idx\n coloring = system._get_static_coloring()\n\n self._approx_groups = []\n\n # must sort _exec_dict keys here or have ordering issues when using MPI\n for key in sorted(self._exec_dict):\n approx = self._exec_dict[key]\n meta = approx[0][1]\n if coloring is not None and 'coloring' in meta:\n continue\n wrt = key[0]\n directional = key[-1]\n data = self._get_approx_data(system, key)\n if wrt in inputs._views_flat:\n arr = inputs\n slices = in_slices\n elif wrt in outputs._views_flat:\n arr = outputs\n slices = out_slices\n else: # wrt is remote\n arr = None\n\n if wrt in approx_wrt_idx:\n in_idx = np.array(approx_wrt_idx[wrt], dtype=int)\n if arr is not None:\n in_idx += slices[wrt].start\n else:\n if arr is None:\n in_idx = range(abs2meta[wrt]['size'])\n else:\n in_idx = range(slices[wrt].start, slices[wrt].stop)\n\n # Directional derivatives for quick partial checking.\n # We place the indices in a list so that they are all stepped at the same time.\n if directional:\n in_idx = [list(in_idx)]\n\n tmpJ = _get_wrt_subjacs(system, approx)\n tmpJ['@out_slices'] = out_slices\n\n self._approx_groups.append((wrt, data, in_idx, tmpJ, [(arr, in_idx)], None))\n\n def _compute_approximations(self, system, jac, total, under_cs):\n from openmdao.core.component import Component\n # Clean vector for results\n results_array = system._outputs._data.copy() if total else system._residuals._data.copy()\n\n # To support driver src_indices, we need to override some checks in Jacobian, but do it\n # selectively.\n uses_voi_indices = (len(system._owns_approx_of_idx) > 0 or\n len(system._owns_approx_wrt_idx) > 0) and not isinstance(jac, dict)\n\n use_parallel_fd = system._num_par_fd > 1 and (system._full_comm is not None and\n system._full_comm.size > 1)\n par_fd_w_serial_model = use_parallel_fd and system._num_par_fd == system._full_comm.size\n num_par_fd = system._num_par_fd if use_parallel_fd else 1\n is_parallel = use_parallel_fd or system.comm.size > 1\n is_distributed = isinstance(system, Component) and system.options['distributed']\n\n results = defaultdict(list)\n iproc = system.comm.rank\n owns = system._owning_rank\n mycomm = system._full_comm if use_parallel_fd else system.comm\n jacobian = jac if isinstance(jac, Jacobian) else None\n\n fd_count = 0\n colored_shape = None\n jrows = []\n jcols = []\n jdata = []\n\n # This will either generate new approx groups or use cached ones\n approx_groups, colored_approx_groups = self._get_approx_groups(system, under_cs)\n do_rows_cols = self._j_colored is None\n\n # do colored solves first\n if colored_approx_groups is not None:\n for data, col_idxs, tmpJ, idx_info, nz_rows in colored_approx_groups:\n colored_shape = (tmpJ['@nrows'], tmpJ['@ncols'])\n\n if fd_count % num_par_fd == system._par_fd_id:\n # run the finite difference\n result = self._run_point(system, idx_info, data, results_array, total)\n if par_fd_w_serial_model or not is_parallel:\n rowmap = tmpJ['@row_idx_map'] if '@row_idx_map' in tmpJ else None\n if rowmap is not None:\n result = result[rowmap]\n result = self._transform_result(result)\n\n if nz_rows is None: # uncolored column\n if do_rows_cols:\n nrows = tmpJ['@nrows']\n jrows.extend(range(nrows))\n jcols.extend(col_idxs * nrows)\n jdata.extend(result)\n else:\n for i, col in enumerate(col_idxs):\n if do_rows_cols:\n jrows.extend(nz_rows[i])\n jcols.extend([col] * len(nz_rows[i]))\n jdata.extend(result[nz_rows[i]])\n else: # parallel model (some vars are remote)\n raise NotImplementedError(\"simul approx coloring with parallel FD/CS is \"\n \"only supported currently when using \"\n \"a serial model, i.e., when \"\n \"num_par_fd == number of MPI procs.\")\n fd_count += 1\n\n # now do uncolored solves\n for wrt, data, col_idxs, tmpJ, idx_info, nz_rows in approx_groups:\n J = tmpJ[wrt]\n full_idxs = J['loc_outvec_idxs']\n out_slices = tmpJ['@out_slices']\n\n if J['vector'] is not None:\n app_data = self.apply_directional(data, J['vector'])\n else:\n app_data = data\n\n for i_count, idxs in enumerate(col_idxs):\n if fd_count % num_par_fd == system._par_fd_id:\n # run the finite difference\n result = self._run_point(system, ((idx_info[0][0], idxs),),\n app_data, results_array, total)\n\n if is_parallel:\n for of, (oview, out_idxs, _, _) in J['ofs'].items():\n if owns[of] == iproc or is_distributed:\n results[(of, wrt)].append(\n (i_count,\n self._transform_result(\n result[out_slices[of]][out_idxs]).copy()))\n else:\n J['data'][:, i_count] = self._transform_result(result[full_idxs])\n\n fd_count += 1\n\n mult = self._get_multiplier(data)\n if colored_shape is not None: # coloring is active\n if par_fd_w_serial_model:\n if self._j_colored is None:\n jstuff = mycomm.allgather((jrows, jcols, jdata))\n rowlist = [rows for rows, _, _ in jstuff if rows]\n allrows = np.hstack(rowlist)\n allcols = np.hstack(cols for _, cols, _ in jstuff if cols)\n alldata = np.hstack(dat for _, _, dat in jstuff if dat)\n self._j_colored = coo_matrix((alldata, (allrows, allcols)), shape=colored_shape)\n self._j_data_sizes = sizes = np.array([len(x) for x, _, _ in jstuff])\n self._j_data_offsets = offsets = np.zeros(mycomm.size)\n offsets[1:] = np.cumsum(sizes)[:-1]\n else:\n mycomm.Allgatherv(jdata, [self._j_colored.data, self._j_data_sizes,\n self._j_data_offsets, MPI.DOUBLE])\n\n elif is_parallel:\n raise NotImplementedError(\"colored FD/CS over parallel groups not supported yet\")\n else: # serial colored\n if do_rows_cols:\n self._j_colored = coo_matrix((jdata, (jrows, jcols)), shape=colored_shape)\n else:\n self._j_colored.data[:] = jdata\n\n if mult != 1.0:\n self._j_colored.data *= mult\n\n # convert COO matrix to dense for easier slicing\n Jcolored = self._j_colored.toarray()\n\n elif is_parallel and not is_distributed: # uncolored with parallel systems\n results = _gather_jac_results(mycomm, results)\n\n if colored_approx_groups is not None:\n for _, _, tmpJ, _, _ in colored_approx_groups:\n # TODO: coloring when using parallel FD and/or FD with remote comps\n for key in tmpJ['@approxs']:\n slc = tmpJ['@jac_slices'][key]\n if uses_voi_indices:\n jac._override_checks = True\n jac[key] = _from_dense(jacobian, key, Jcolored[slc])\n jac._override_checks = False\n else:\n jac[key] = _from_dense(jacobian, key, Jcolored[slc])\n\n Jcolored = None # clean up memory\n\n for wrt, _, _, tmpJ, _, _ in approx_groups:\n J = tmpJ[wrt]\n ofs = J['ofs']\n for of in ofs:\n key = (of, wrt)\n oview, _, rows_reduced, cols_reduced = ofs[of]\n if is_parallel:\n for i, result in results[key]:\n oview[:, i] = result\n\n if J['vector'] is not None or mult != 1.0:\n oview *= mult\n\n if uses_voi_indices:\n jac._override_checks = True\n jac[key] = _from_dense(jacobian, key, oview, rows_reduced, cols_reduced)\n jac._override_checks = False\n else:\n jac[key] = _from_dense(jacobian, key, oview, rows_reduced, cols_reduced)\n\n\ndef _from_dense(jac, key, subjac, reduced_rows=_full_slice, reduced_cols=_full_slice):\n \"\"\"\n Convert given subjac from a dense array to whatever form matches our internal subjac.\n\n Parameters\n ----------\n jac : Jacobian or None\n Jacobian object.\n key : (str, str)\n Tuple of absulute names of of and wrt variables.\n subjac : ndarray\n Dense sub-jacobian to be assigned to the subjac corresponding to key.\n \"\"\"\n if jac is None: # we're saving deriv to a dict. Do no conversion.\n return subjac\n\n meta = jac._subjacs_info[key]\n val = meta['value']\n if meta['rows'] is not None: # internal format is our home grown COO\n if reduced_rows is not _full_slice or reduced_cols is not _full_slice:\n return subjac[reduced_rows, reduced_cols]\n else:\n return subjac[meta['rows'], meta['cols']]\n elif isinstance(val, np.ndarray):\n return subjac\n elif isinstance(val, coo_matrix):\n return coo_matrix(((val.row, val.col), subjac[val.row, val.col]))\n elif isinstance(val, csc_matrix):\n coo = val.tocoo()\n return coo_matrix(((coo.row, coo.col), subjac[coo.row, coo.col])).tocsc()\n elif isinstance(val, csr_matrix):\n coo = val.tocoo()\n return coo_matrix(((coo.row, coo.col), subjac[coo.row, coo.col])).tocsr()\n else:\n raise TypeError(\"Don't know how to convert dense ndarray to type '%s'\" %\n val.__class__.__name__)\n\n\ndef _gather_jac_results(comm, results):\n new_results = defaultdict(list)\n\n # create full results list\n for proc_results in comm.allgather(results):\n for key in proc_results:\n new_results[key].extend(proc_results[key])\n\n return new_results\n\n\ndef _get_wrt_subjacs(system, approxs):\n \"\"\"\n Return a dict mapping wrt names to contiguous memory views of all of their nonzero subjacs.\n\n All nonzero subjacs for a particular wrt are 'compressed' together so they're contiguous.\n\n This allows for setting an entire column of the jacobian at once instead of looping over\n each subjac.\n \"\"\"\n abs2idx = system._var_allprocs_abs2idx['nonlinear']\n abs2meta = system._var_allprocs_abs2meta\n approx_of_idx = system._owns_approx_of_idx\n approx_wrt_idx = system._owns_approx_wrt_idx\n approx_of = system._owns_approx_of\n\n J = {}\n ofdict = {}\n nondense = {}\n slicedict = system._outputs.get_slice_dict()\n abs_out_names = [n for n in system._var_allprocs_abs_names['output'] if n in slicedict]\n\n for key, options in approxs:\n of, wrt = key\n if 'rows' in options and options['rows'] is not None:\n nondense[key] = options\n if wrt not in J:\n J[wrt] = {'ofs': set(), 'tot_rows': 0, 'directional': options['directional'],\n 'vector': options['vector']}\n\n tmpJ = None\n if of not in ofdict and (approx_of is None or (approx_of and of in approx_of)):\n J[wrt]['ofs'].add(of)\n if of in approx_of_idx:\n out_idx = approx_of_idx[of]\n out_size = len(out_idx)\n else:\n out_size = abs2meta[of]['size']\n out_idx = _full_slice\n ofdict[of] = (out_size, out_idx)\n J[wrt]['tot_rows'] += out_size\n\n for wrt in J:\n unsorted_ofs = J[wrt]['ofs']\n J[wrt]['ofs'] = wrt_ofs = {}\n wrt_idx = approx_wrt_idx.get(wrt, _full_slice)\n\n # create dense array to contain all nonzero subjacs for this wrt\n if J[wrt]['directional']:\n J[wrt]['data'] = arr = np.zeros((J[wrt]['tot_rows'], 1))\n elif wrt_idx is not _full_slice:\n J[wrt]['data'] = arr = np.zeros((J[wrt]['tot_rows'], len(wrt_idx)))\n else:\n J[wrt]['data'] = arr = np.zeros((J[wrt]['tot_rows'], abs2meta[wrt]['size']))\n\n # sort ofs into the proper order to match outputs/resids vecs\n start = end = 0\n if system._owns_approx_of:\n sorted_ofs = [n for n in system._owns_approx_of if n in unsorted_ofs]\n else:\n sorted_ofs = sorted(unsorted_ofs, key=lambda n: abs2idx[n])\n\n for of in sorted_ofs:\n key = (of, wrt)\n osize, oidx = ofdict[of]\n end += osize\n # if needed, compute reduced row idxs and col idxs\n if key in nondense and (oidx is not _full_slice or wrt_idx is not _full_slice):\n # TODO: also need to handle scipy sparse matrices\n rows = nondense[key]['rows']\n cols = nondense[key]['cols']\n Jfull = np.zeros(nondense[key]['shape'], dtype=bool)\n Jfull[rows, cols] = True\n Jreduced = Jfull[oidx, wrt_idx]\n rows_reduced, cols_reduced = np.nonzero(Jreduced)\n Jfull = Jreduced = None\n else:\n rows_reduced = cols_reduced = _full_slice\n\n # store subview corresponding to the (of, wrt) subjac and any index info\n # print('wrt, of:', wrt, of, start, end, oidx)\n wrt_ofs[of] = (arr[start:end, :], oidx, rows_reduced, cols_reduced)\n start = end\n\n if abs_out_names != sorted_ofs:\n full_idxs = []\n for sof in sorted_ofs:\n if sof in slicedict:\n slc = slicedict[sof]\n if sof in approx_of_idx:\n full_idxs.append(np.arange(slc.start, slc.stop)[approx_of_idx[sof]])\n else:\n full_idxs.append(range(slc.start, slc.stop))\n J[wrt]['loc_outvec_idxs'] = np.hstack(full_idxs)\n else:\n J[wrt]['loc_outvec_idxs'] = _full_slice\n\n return J\n",
"\"\"\" Unit tests for the SimpleGADriver Driver.\"\"\"\n\nimport unittest\nimport os\n\nimport numpy as np\n\nimport openmdao.api as om\nfrom openmdao.drivers.genetic_algorithm_driver import GeneticAlgorithm\nfrom openmdao.test_suite.components.branin import Branin, BraninDiscrete\nfrom openmdao.test_suite.components.paraboloid import Paraboloid\nfrom openmdao.test_suite.components.sellar_feature import SellarMDA\nfrom openmdao.test_suite.components.three_bar_truss import ThreeBarTruss\nfrom openmdao.utils.assert_utils import assert_near_equal\nfrom openmdao.utils.mpi import MPI\n\ntry:\n from openmdao.vectors.petsc_vector import PETScVector\nexcept ImportError:\n PETScVector = None\n\nextra_prints = False # enable printing results\n\nclass TestSimpleGA(unittest.TestCase):\n\n def setUp(self):\n np.random.seed(1)\n os.environ['SimpleGADriver_seed'] = '11'\n\n def test_simple_test_func(self):\n class MyComp(om.ExplicitComponent):\n\n def setup(self):\n self.add_input('x', np.zeros((2, )))\n\n self.add_output('a', 0.0)\n self.add_output('b', 0.0)\n self.add_output('c', 0.0)\n self.add_output('d', 0.0)\n\n def compute(self, inputs, outputs):\n x = inputs['x']\n\n outputs['a'] = (2.0*x[0] - 3.0*x[1])**2\n outputs['b'] = 18.0 - 32.0*x[0] + 12.0*x[0]**2 + 48.0*x[1] - 36.0*x[0]*x[1] + 27.0*x[1]**2\n outputs['c'] = (x[0] + x[1] + 1.0)**2\n outputs['d'] = 19.0 - 14.0*x[0] + 3.0*x[0]**2 - 14.0*x[1] + 6.0*x[0]*x[1] + 3.0*x[1]**2\n\n prob = om.Problem()\n model = prob.model\n\n model.add_subsystem('px', om.IndepVarComp('x', np.array([0.2, -0.2])))\n model.add_subsystem('comp', MyComp())\n model.add_subsystem('obj', om.ExecComp('f=(30 + a*b)*(1 + c*d)'))\n\n model.connect('px.x', 'comp.x')\n model.connect('comp.a', 'obj.a')\n model.connect('comp.b', 'obj.b')\n model.connect('comp.c', 'obj.c')\n model.connect('comp.d', 'obj.d')\n\n # Played with bounds so we don't get subtractive cancellation of tiny numbers.\n model.add_design_var('px.x', lower=np.array([0.2, -1.0]), upper=np.array([1.0, -0.2]))\n model.add_objective('obj.f')\n\n prob.driver = om.SimpleGADriver()\n prob.driver.options['bits'] = {'px.x': 16}\n prob.driver.options['max_gen'] = 75\n\n prob.driver._randomstate = 11\n\n prob.setup()\n prob.run_driver()\n\n if extra_prints:\n print('obj.f', prob['obj.f'])\n print('px.x', prob['px.x'])\n\n # TODO: Satadru listed this solution, but I get a way better one.\n # Solution: xopt = [0.2857, -0.8571], fopt = 23.2933\n assert_near_equal(prob['obj.f'], 12.37306086, 1e-4)\n assert_near_equal(prob['px.x'][0], 0.2, 1e-4)\n assert_near_equal(prob['px.x'][1], -0.88653391, 1e-4)\n\n def test_mixed_integer_branin(self):\n prob = om.Problem()\n model = prob.model\n\n model.add_subsystem('p1', om.IndepVarComp('xC', 7.5))\n model.add_subsystem('p2', om.IndepVarComp('xI', 0.0))\n model.add_subsystem('comp', Branin())\n\n model.connect('p2.xI', 'comp.x0')\n model.connect('p1.xC', 'comp.x1')\n\n model.add_design_var('p2.xI', lower=-5.0, upper=10.0)\n model.add_design_var('p1.xC', lower=0.0, upper=15.0)\n model.add_objective('comp.f')\n\n prob.driver = om.SimpleGADriver(max_gen=75, pop_size=25)\n prob.driver.options['bits'] = {'p1.xC': 8}\n\n prob.driver._randomstate = 1\n\n prob.setup()\n prob.run_driver()\n\n if extra_prints:\n print('comp.f', prob['comp.f'])\n\n # Optimal solution\n assert_near_equal(prob['comp.f'], 0.49399549, 1e-4)\n self.assertTrue(int(prob['p2.xI']) in [3, -3])\n\n def test_mixed_integer_branin_discrete(self):\n prob = om.Problem()\n model = prob.model\n\n indep = om.IndepVarComp()\n indep.add_output('xC', val=7.5)\n indep.add_discrete_output('xI', val=0)\n\n model.add_subsystem('p', indep)\n model.add_subsystem('comp', BraninDiscrete())\n\n model.connect('p.xI', 'comp.x0')\n model.connect('p.xC', 'comp.x1')\n\n model.add_design_var('p.xI', lower=-5, upper=10)\n model.add_design_var('p.xC', lower=0.0, upper=15.0)\n model.add_objective('comp.f')\n\n prob.driver = om.SimpleGADriver(max_gen=75, pop_size=25)\n prob.driver.options['bits'] = {'p.xC': 8}\n\n prob.driver._randomstate = 1\n\n prob.setup()\n prob.run_driver()\n\n if extra_prints:\n print('comp.f', prob['comp.f'])\n print('p.xI', prob['p.xI'])\n\n # Optimal solution\n assert_near_equal(prob['comp.f'], 0.49399549, 1e-4)\n self.assertTrue(prob['p.xI'] in [3, -3])\n self.assertTrue(isinstance(prob['p.xI'], int))\n\n def test_mixed_integer_3bar(self):\n class ObjPenalty(om.ExplicitComponent):\n \"\"\"\n Weight objective with penalty on stress constraint.\n \"\"\"\n def setup(self):\n self.add_input('obj', 0.0)\n self.add_input('stress', val=np.zeros((3, )))\n\n self.add_output('weighted', 0.0)\n\n def compute(self, inputs, outputs):\n obj = inputs['obj']\n stress = inputs['stress']\n\n pen = 0.0\n for j in range(len(stress)):\n if stress[j] > 1.0:\n pen += 10.0*(stress[j] - 1.0)**2\n\n outputs['weighted'] = obj + pen\n\n prob = om.Problem()\n model = prob.model\n\n model.add_subsystem('xc_a1', om.IndepVarComp('area1', 5.0, units='cm**2'), promotes=['*'])\n model.add_subsystem('xc_a2', om.IndepVarComp('area2', 5.0, units='cm**2'), promotes=['*'])\n model.add_subsystem('xc_a3', om.IndepVarComp('area3', 5.0, units='cm**2'), promotes=['*'])\n model.add_subsystem('xi_m1', om.IndepVarComp('mat1', 1), promotes=['*'])\n model.add_subsystem('xi_m2', om.IndepVarComp('mat2', 1), promotes=['*'])\n model.add_subsystem('xi_m3', om.IndepVarComp('mat3', 1), promotes=['*'])\n model.add_subsystem('comp', ThreeBarTruss(), promotes=['*'])\n model.add_subsystem('obj_with_penalty', ObjPenalty(), promotes=['*'])\n\n model.add_design_var('area1', lower=1.2, upper=1.3)\n model.add_design_var('area2', lower=2.0, upper=2.1)\n model.add_design_var('mat1', lower=1, upper=4)\n model.add_design_var('mat2', lower=1, upper=4)\n model.add_design_var('mat3', lower=1, upper=4)\n model.add_objective('weighted')\n\n prob.driver = om.SimpleGADriver()\n prob.driver.options['bits'] = {'area1': 6,\n 'area2': 6}\n prob.driver.options['max_gen'] = 75\n\n prob.driver._randomstate = 1\n\n prob.setup()\n prob['area3'] = 0.0005\n prob.run_driver()\n\n if extra_prints:\n print('mass', prob['mass'])\n print('mat1', prob['mat1'])\n print('mat2', prob['mat2'])\n\n # Note, GA doesn't do so well with the continuous vars, naturally, so we reduce the space\n # as much as we can. Objective is still rather random, but it is close. GA does a great job\n # of picking the correct values for the integer desvars though.\n self.assertLess(prob['mass'], 6.0)\n assert_near_equal(prob['mat1'], 3, 1e-5)\n assert_near_equal(prob['mat2'], 3, 1e-5)\n # Material 3 can be anything\n\n def test_mixed_integer_3bar_default_bits(self):\n # Tests bug where letting openmdao calculate the bits didn't preserve\n # integer status unless range was a power of 2.\n\n class ObjPenalty(om.ExplicitComponent):\n \"\"\"\n Weight objective with penalty on stress constraint.\n \"\"\"\n def setup(self):\n self.add_input('obj', 0.0)\n self.add_input('stress', val=np.zeros((3, )))\n\n self.add_output('weighted', 0.0)\n\n def compute(self, inputs, outputs):\n obj = inputs['obj']\n stress = inputs['stress']\n\n pen = 0.0\n for j in range(len(stress)):\n if stress[j] > 1.0:\n pen += 10.0*(stress[j] - 1.0)**2\n\n outputs['weighted'] = obj + pen\n\n prob = om.Problem()\n model = prob.model\n\n model.add_subsystem('xc_a1', om.IndepVarComp('area1', 5.0, units='cm**2'), promotes=['*'])\n model.add_subsystem('xc_a2', om.IndepVarComp('area2', 5.0, units='cm**2'), promotes=['*'])\n model.add_subsystem('xc_a3', om.IndepVarComp('area3', 5.0, units='cm**2'), promotes=['*'])\n model.add_subsystem('xi_m1', om.IndepVarComp('mat1', 1), promotes=['*'])\n model.add_subsystem('xi_m2', om.IndepVarComp('mat2', 1), promotes=['*'])\n model.add_subsystem('xi_m3', om.IndepVarComp('mat3', 1), promotes=['*'])\n model.add_subsystem('comp', ThreeBarTruss(), promotes=['*'])\n model.add_subsystem('obj_with_penalty', ObjPenalty(), promotes=['*'])\n\n model.add_design_var('area1', lower=1.2, upper=1.3)\n model.add_design_var('area2', lower=2.0, upper=2.1)\n model.add_design_var('mat1', lower=2, upper=4)\n model.add_design_var('mat2', lower=2, upper=4)\n model.add_design_var('mat3', lower=1, upper=4)\n model.add_objective('weighted')\n\n prob.driver = om.SimpleGADriver()\n prob.driver.options['bits'] = {'area1': 6,\n 'area2': 6}\n prob.driver.options['max_gen'] = 75\n\n prob.driver._randomstate = 1\n\n prob.setup()\n prob['area3'] = 0.0005\n prob.run_driver()\n\n if extra_prints:\n print('mass', prob['mass'])\n print('mat1', prob['mat1'])\n print('mat2', prob['mat2'])\n\n # Note, GA doesn't do so well with the continuous vars, naturally, so we reduce the space\n # as much as we can. Objective is still rather random, but it is close. GA does a great job\n # of picking the correct values for the integer desvars though.\n self.assertLess(prob['mass'], 6.0)\n assert_near_equal(prob['mat1'], 3, 1e-5)\n assert_near_equal(prob['mat2'], 3, 1e-5)\n # Material 3 can be anything\n\n def test_analysis_error(self):\n class ValueErrorComp(om.ExplicitComponent):\n def setup(self):\n self.add_input('x', 1.0)\n self.add_output('f', 1.0)\n\n def compute(self, inputs, outputs):\n raise ValueError\n\n prob = om.Problem()\n model = prob.model\n\n model.add_subsystem('p', om.IndepVarComp('x', 0.0))\n model.add_subsystem('comp', ValueErrorComp())\n\n model.connect('p.x', 'comp.x')\n\n model.add_design_var('p.x', lower=-5.0, upper=10.0)\n model.add_objective('comp.f')\n\n prob.driver = om.SimpleGADriver(max_gen=75, pop_size=25)\n prob.driver._randomstate = 1\n prob.setup()\n # prob.run_driver()\n self.assertRaises(ValueError, prob.run_driver)\n\n def test_encode_and_decode(self):\n ga = GeneticAlgorithm(None)\n gen = np.array([[0, 0, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1,\n 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1,\n 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0],\n [1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1,\n 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0,\n 1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0]])\n vlb = np.array([-170.0, -170.0, -170.0, -170.0, -170.0, -170.0])\n vub = np.array([255.0, 255.0, 255.0, 170.0, 170.0, 170.0])\n bits = np.array([9, 9, 9, 9, 9, 9])\n x = np.array([[-69.36399217, 22.12328767, -7.81800391, -66.86888454, 116.77103718, 76.18395303],\n [248.34637965, 191.79060665, -31.93737769, 97.47553816, 118.76712329, 92.15264188]])\n\n ga.npop = 2\n ga.lchrom = int(np.sum(bits))\n np.testing.assert_array_almost_equal(x, ga.decode(gen, vlb, vub, bits))\n np.testing.assert_array_almost_equal(gen[0], ga.encode(x[0], vlb, vub, bits))\n np.testing.assert_array_almost_equal(gen[1], ga.encode(x[1], vlb, vub, bits))\n\n dec = ga.decode(gen, vlb, vub, bits)\n enc0 = ga.encode(dec[0], vlb, vub, bits)\n enc1 = ga.encode(dec[1], vlb, vub, bits)\n np.testing.assert_array_almost_equal(gen[0], enc0) # decode followed by encode gives original array\n np.testing.assert_array_almost_equal(gen[1], enc1)\n\n def test_encode_and_decode_gray_code(self):\n ga = GeneticAlgorithm(None)\n gen = np.array([[0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0,\n 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0,\n 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1],\n [1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0,\n 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1,\n 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1]])\n vlb = np.array([-170.0, -170.0, -170.0, -170.0, -170.0, -170.0])\n vub = np.array([255.0, 255.0, 255.0, 170.0, 170.0, 170.0])\n bits = np.array([9, 9, 9, 9, 9, 9])\n x = np.array([[-69.36399217, 22.12328767, -7.81800391, -66.86888454, 116.77103718, 76.18395303],\n [248.34637965, 191.79060665, -31.93737769, 97.47553816, 118.76712329, 92.15264188]])\n\n ga.npop = 2\n ga.lchrom = int(np.sum(bits))\n ga.gray_code = True\n np.testing.assert_array_almost_equal(x, ga.decode(gen, vlb, vub, bits))\n np.testing.assert_array_almost_equal(gen[0], ga.encode(x[0], vlb, vub, bits))\n np.testing.assert_array_almost_equal(gen[1], ga.encode(x[1], vlb, vub, bits))\n\n dec = ga.decode(gen, vlb, vub, bits)\n enc0 = ga.encode(dec[0], vlb, vub, bits)\n enc1 = ga.encode(dec[1], vlb, vub, bits)\n np.testing.assert_array_almost_equal(gen[0], enc0) # decode followed by encode gives original array\n np.testing.assert_array_almost_equal(gen[1], enc1)\n\n def test_vector_desvars_multiobj(self):\n prob = om.Problem()\n\n indeps = prob.model.add_subsystem('indeps', om.IndepVarComp())\n indeps.add_output('x', 3)\n indeps.add_output('y', [4.0, -4])\n\n prob.model.add_subsystem('paraboloid1',\n om.ExecComp('f = (x+5)**2- 3'))\n prob.model.add_subsystem('paraboloid2',\n om.ExecComp('f = (y[0]-3)**2 + (y[1]-1)**2 - 3',\n y=[0, 0]))\n prob.model.connect('indeps.x', 'paraboloid1.x')\n prob.model.connect('indeps.y', 'paraboloid2.y')\n\n prob.driver = om.SimpleGADriver()\n\n prob.model.add_design_var('indeps.x', lower=-5, upper=5)\n prob.model.add_design_var('indeps.y', lower=[-10, 0], upper=[10, 3])\n prob.model.add_objective('paraboloid1.f')\n prob.model.add_objective('paraboloid2.f')\n prob.setup()\n prob.run_driver()\n\n if extra_prints:\n print('indeps.x', prob['indeps.x'])\n print('indeps.y', prob['indeps.y'])\n\n np.testing.assert_array_almost_equal(prob['indeps.x'], -5)\n np.testing.assert_array_almost_equal(prob['indeps.y'], [3, 1])\n\n def test_SimpleGADriver_missing_objective(self):\n prob = om.Problem()\n model = prob.model\n\n model.add_subsystem('x', om.IndepVarComp('x', 2.0), promotes=['*'])\n model.add_subsystem('f_x', Paraboloid(), promotes=['*'])\n\n prob.driver = om.SimpleGADriver()\n\n prob.model.add_design_var('x', lower=0)\n prob.model.add_constraint('x', lower=0)\n\n prob.setup()\n\n with self.assertRaises(Exception) as raises_msg:\n prob.run_driver()\n\n exception = raises_msg.exception\n\n msg = \"Driver requires objective to be declared\"\n\n self.assertEqual(exception.args[0], msg)\n\n def test_vectorized_constraints(self):\n prob = om.Problem()\n model = prob.model\n\n dim = 2\n model.add_subsystem('x', om.IndepVarComp('x', np.ones(dim)), promotes=['*'])\n model.add_subsystem('f_x', om.ExecComp('f_x = sum(x * x)', x=np.ones(dim), f_x=1.0), promotes=['*'])\n model.add_subsystem('g_x', om.ExecComp('g_x = 1 - x', x=np.ones(dim), g_x=np.zeros(dim)), promotes=['*'])\n\n prob.driver = om.SimpleGADriver()\n\n prob.model.add_design_var('x', lower=-10, upper=10)\n prob.model.add_objective('f_x')\n prob.model.add_constraint('g_x', upper=np.zeros(dim))\n\n prob.setup()\n prob.run_driver()\n\n if extra_prints:\n print('x', prob['x'])\n\n # Check that the constraint is satisfied (x >= 1)\n for i in range(dim):\n self.assertLessEqual(1.0, prob[\"x\"][i])\n\n\nclass TestDriverOptionsSimpleGA(unittest.TestCase):\n\n def setUp(self):\n np.random.seed(1)\n os.environ['SimpleGADriver_seed'] = '11'\n\n def test_driver_options(self):\n \"\"\"Tests if Pm and Pc options can be set.\"\"\"\n prob = om.Problem()\n model = prob.model\n indeps = model.add_subsystem('indeps', om.IndepVarComp(), promotes=['*'])\n indeps.add_output('x', 1.)\n model.add_subsystem('model', om.ExecComp('y=x**2'), promotes=['*'])\n driver = prob.driver = om.SimpleGADriver()\n driver.options['Pm'] = 0.123\n driver.options['Pc'] = 0.0123\n driver.options['max_gen'] = 5\n driver.options['bits'] = {'x': 8}\n prob.model.add_design_var('x', lower=-10., upper=10.)\n prob.model.add_objective('y')\n prob.setup()\n prob.run_driver()\n self.assertEqual(driver.options['Pm'], 0.123)\n self.assertEqual(driver.options['Pc'], 0.0123)\n\n\nclass TestMultiObjectiveSimpleGA(unittest.TestCase):\n\n def setUp(self):\n np.random.seed(1)\n os.environ['SimpleGADriver_seed'] = '11'\n\n def test_multi_obj(self):\n\n class Box(om.ExplicitComponent):\n\n def setup(self):\n self.add_input('length', val=1.)\n self.add_input('width', val=1.)\n self.add_input('height', val=1.)\n\n self.add_output('front_area', val=1.0)\n self.add_output('top_area', val=1.0)\n self.add_output('area', val=1.0)\n self.add_output('volume', val=1.)\n\n def compute(self, inputs, outputs):\n length = inputs['length']\n width = inputs['width']\n height = inputs['height']\n\n outputs['top_area'] = length * width\n outputs['front_area'] = length * height\n outputs['area'] = 2*length*height + 2*length*width + 2*height*width\n outputs['volume'] = length*height*width\n\n prob = om.Problem()\n prob.model.add_subsystem('box', Box(), promotes=['*'])\n\n indeps = prob.model.add_subsystem('indeps', om.IndepVarComp(), promotes=['*'])\n indeps.add_output('length', 1.5)\n indeps.add_output('width', 1.5)\n indeps.add_output('height', 1.5)\n\n # setup the optimization\n prob.driver = om.SimpleGADriver()\n prob.driver.options['max_gen'] = 100\n prob.driver.options['bits'] = {'length': 8, 'width': 8, 'height': 8}\n prob.driver.options['multi_obj_exponent'] = 1.\n prob.driver.options['penalty_parameter'] = 10.\n prob.driver.options['multi_obj_weights'] = {'box.front_area': 0.1,\n 'box.top_area': 0.9}\n prob.driver.options['multi_obj_exponent'] = 1\n\n prob.model.add_design_var('length', lower=0.1, upper=2.)\n prob.model.add_design_var('width', lower=0.1, upper=2.)\n prob.model.add_design_var('height', lower=0.1, upper=2.)\n prob.model.add_objective('front_area', scaler=-1) # maximize\n prob.model.add_objective('top_area', scaler=-1) # maximize\n prob.model.add_constraint('volume', upper=1.)\n\n # run #1\n prob.setup()\n prob.run_driver()\n front = prob['front_area']\n top = prob['top_area']\n l1 = prob['length']\n w1 = prob['width']\n h1 = prob['height']\n\n if extra_prints:\n print('Box dims: ', l1, w1, h1)\n print('Front and top area: ', front, top)\n print('Volume: ', prob['volume']) # should be around 1\n\n # run #2\n # weights changed\n prob2 = om.Problem()\n prob2.model.add_subsystem('box', Box(), promotes=['*'])\n\n indeps2 = prob2.model.add_subsystem('indeps', om.IndepVarComp(), promotes=['*'])\n indeps2.add_output('length', 1.5)\n indeps2.add_output('width', 1.5)\n indeps2.add_output('height', 1.5)\n\n # setup the optimization\n prob2.driver = om.SimpleGADriver()\n prob2.driver.options['max_gen'] = 100\n prob2.driver.options['bits'] = {'length': 8, 'width': 8, 'height': 8}\n prob2.driver.options['multi_obj_exponent'] = 1.\n prob2.driver.options['penalty_parameter'] = 10.\n prob2.driver.options['multi_obj_weights'] = {'box.front_area': 0.9,\n 'box.top_area': 0.1}\n prob2.driver.options['multi_obj_exponent'] = 1\n\n prob2.model.add_design_var('length', lower=0.1, upper=2.)\n prob2.model.add_design_var('width', lower=0.1, upper=2.)\n prob2.model.add_design_var('height', lower=0.1, upper=2.)\n prob2.model.add_objective('front_area', scaler=-1) # maximize\n prob2.model.add_objective('top_area', scaler=-1) # maximize\n prob2.model.add_constraint('volume', upper=1.)\n\n # run #1\n prob2.setup()\n prob2.run_driver()\n front2 = prob2['front_area']\n top2 = prob2['top_area']\n l2 = prob2['length']\n w2 = prob2['width']\n h2 = prob2['height']\n\n if extra_prints:\n print('Box dims: ', l2, w2, h2)\n print('Front and top area: ', front2, top2)\n print('Volume: ', prob['volume']) # should be around 1\n\n self.assertGreater(w1, w2) # front area does not depend on width\n self.assertGreater(h2, h1) # top area does not depend on height\n\n\nclass TestConstrainedSimpleGA(unittest.TestCase):\n\n def setUp(self):\n np.random.seed(1)\n os.environ['SimpleGADriver_seed'] = '11'\n\n def test_constrained_with_penalty(self):\n\n class Cylinder(om.ExplicitComponent):\n \"\"\"Main class\"\"\"\n\n def setup(self):\n self.add_input('radius', val=1.0)\n self.add_input('height', val=1.0)\n\n self.add_output('Area', val=1.0)\n self.add_output('Volume', val=1.0)\n\n def compute(self, inputs, outputs):\n radius = inputs['radius']\n height = inputs['height']\n\n area = height * radius * 2 * 3.14 + 3.14 * radius ** 2 * 2\n volume = 3.14 * radius ** 2 * height\n outputs['Area'] = area\n outputs['Volume'] = volume\n\n prob = om.Problem()\n prob.model.add_subsystem('cylinder', Cylinder(), promotes=['*'])\n\n indeps = prob.model.add_subsystem('indeps', om.IndepVarComp(), promotes=['*'])\n indeps.add_output('radius', 2.) # height\n indeps.add_output('height', 3.) # radius\n\n # setup the optimization\n driver = prob.driver = om.SimpleGADriver()\n prob.driver.options['penalty_parameter'] = 3.\n prob.driver.options['penalty_exponent'] = 1.\n prob.driver.options['max_gen'] = 50\n prob.driver.options['bits'] = {'radius': 8, 'height': 8}\n\n prob.model.add_design_var('radius', lower=0.5, upper=5.)\n prob.model.add_design_var('height', lower=0.5, upper=5.)\n prob.model.add_objective('Area')\n prob.model.add_constraint('Volume', lower=10.)\n\n prob.setup()\n prob.run_driver()\n\n if extra_prints:\n print('radius', prob['radius']) # exact solution is (5/pi)^(1/3) ~= 1.167\n print('height', prob['height']) # exact solution is 2*radius\n print('Area', prob['Area'])\n print('Volume', prob['Volume']) # should be around 10\n\n self.assertTrue(driver.supports[\"equality_constraints\"], True)\n self.assertTrue(driver.supports[\"inequality_constraints\"], True)\n # check that it is not going to the unconstrained optimum\n self.assertGreater(prob['radius'], 1.)\n self.assertGreater(prob['height'], 1.)\n\n def test_driver_supports(self):\n\n prob = om.Problem()\n\n indeps = prob.model.add_subsystem('indeps', om.IndepVarComp(), promotes=['*'])\n\n # setup the optimization\n driver = prob.driver = om.SimpleGADriver()\n\n with self.assertRaises(KeyError) as raises_msg:\n prob.driver.supports['equality_constraints'] = False\n\n exception = raises_msg.exception\n\n msg = \"SimpleGADriver: Tried to set read-only option 'equality_constraints'.\"\n\n self.assertEqual(exception.args[0], msg)\n\n def test_constrained_without_penalty(self):\n\n class Cylinder(om.ExplicitComponent):\n \"\"\"Main class\"\"\"\n\n def setup(self):\n self.add_input('radius', val=1.0)\n self.add_input('height', val=1.0)\n\n self.add_output('Area', val=1.0)\n self.add_output('Volume', val=1.0)\n\n def compute(self, inputs, outputs):\n radius = inputs['radius']\n height = inputs['height']\n\n area = height * radius * 2 * 3.14 + 3.14 * radius ** 2 * 2\n volume = 3.14 * radius ** 2 * height\n outputs['Area'] = area\n outputs['Volume'] = volume\n\n prob = om.Problem()\n prob.model.add_subsystem('cylinder', Cylinder(), promotes=['*'])\n\n indeps = prob.model.add_subsystem('indeps', om.IndepVarComp(), promotes=['*'])\n indeps.add_output('radius', 2.) # height\n indeps.add_output('height', 3.) # radius\n\n # setup the optimization\n driver = prob.driver = om.SimpleGADriver()\n prob.driver.options['penalty_parameter'] = 0. # no penalty, same as unconstrained\n prob.driver.options['penalty_exponent'] = 1.\n prob.driver.options['max_gen'] = 50\n prob.driver.options['bits'] = {'radius': 8, 'height': 8}\n\n prob.model.add_design_var('radius', lower=0.5, upper=5.)\n prob.model.add_design_var('height', lower=0.5, upper=5.)\n prob.model.add_objective('Area')\n prob.model.add_constraint('Volume', lower=10.)\n\n prob.setup()\n prob.run_driver()\n\n if extra_prints:\n print('radius', prob['radius']) # exact solution is (5/pi)^(1/3) ~= 1.167\n print('height', prob['height']) # exact solution is 2*radius\n print('Area', prob['Area'])\n print('Volume', prob['Volume']) # should be around 10\n\n self.assertTrue(driver.supports[\"equality_constraints\"], True)\n self.assertTrue(driver.supports[\"inequality_constraints\"], True)\n # it is going to the unconstrained optimum\n self.assertAlmostEqual(prob['radius'], 0.5, 1)\n self.assertAlmostEqual(prob['height'], 0.5, 1)\n\n def test_no_constraint(self):\n\n class Cylinder(om.ExplicitComponent):\n \"\"\"Main class\"\"\"\n\n def setup(self):\n self.add_input('radius', val=1.0)\n self.add_input('height', val=1.0)\n\n self.add_output('Area', val=1.0)\n self.add_output('Volume', val=1.0)\n\n def compute(self, inputs, outputs):\n radius = inputs['radius']\n height = inputs['height']\n\n area = height * radius * 2 * 3.14 + 3.14 * radius ** 2 * 2\n volume = 3.14 * radius ** 2 * height\n outputs['Area'] = area\n outputs['Volume'] = volume\n\n prob = om.Problem()\n prob.model.add_subsystem('cylinder', Cylinder(), promotes=['*'])\n\n indeps = prob.model.add_subsystem('indeps', om.IndepVarComp(), promotes=['*'])\n indeps.add_output('radius', 2.) # height\n indeps.add_output('height', 3.) # radius\n\n # setup the optimization\n driver = prob.driver = om.SimpleGADriver()\n prob.driver.options['penalty_parameter'] = 10. # will have no effect\n prob.driver.options['penalty_exponent'] = 1.\n prob.driver.options['max_gen'] = 50\n prob.driver.options['bits'] = {'radius': 8, 'height': 8}\n\n prob.model.add_design_var('radius', lower=0.5, upper=5.)\n prob.model.add_design_var('height', lower=0.5, upper=5.)\n prob.model.add_objective('Area')\n\n prob.setup()\n prob.run_driver()\n\n if extra_prints:\n print('radius', prob['radius']) # exact solution is (5/pi)^(1/3) ~= 1.167\n print('height', prob['height']) # exact solution is 2*radius\n print('Area', prob['Area'])\n print('Volume', prob['Volume']) # should be around 10\n\n self.assertTrue(driver.supports[\"equality_constraints\"], True)\n self.assertTrue(driver.supports[\"inequality_constraints\"], True)\n self.assertAlmostEqual(prob['radius'], 0.5, 1) # it is going to the unconstrained optimum\n self.assertAlmostEqual(prob['height'], 0.5, 1) # it is going to the unconstrained optimum\n\n\[email protected](MPI and PETScVector, \"MPI and PETSc are required.\")\nclass MPITestSimpleGA(unittest.TestCase):\n\n N_PROCS = 2\n\n def setUp(self):\n np.random.seed(1)\n os.environ['SimpleGADriver_seed'] = '11'\n\n def test_mixed_integer_branin(self):\n prob = om.Problem()\n model = prob.model\n\n model.add_subsystem('p1', om.IndepVarComp('xC', 7.5))\n model.add_subsystem('p2', om.IndepVarComp('xI', 0.0))\n model.add_subsystem('comp', Branin())\n\n model.connect('p2.xI', 'comp.x0')\n model.connect('p1.xC', 'comp.x1')\n\n model.add_design_var('p2.xI', lower=-5.0, upper=10.0)\n model.add_design_var('p1.xC', lower=0.0, upper=15.0)\n model.add_objective('comp.f')\n\n prob.driver = om.SimpleGADriver()\n prob.driver.options['bits'] = {'p1.xC': 8}\n prob.driver.options['max_gen'] = 50\n prob.driver.options['pop_size'] = 25\n prob.driver.options['run_parallel'] = True\n\n prob.driver._randomstate = 1\n\n prob.setup()\n prob.run_driver()\n\n if extra_prints:\n print('comp.f', prob['comp.f'])\n print('p2.xI', prob['p2.xI'])\n\n # Optimal solution\n assert_near_equal(prob['comp.f'], 0.49399549, 1e-4)\n self.assertTrue(int(prob['p2.xI']) in [3, -3])\n\n def test_two_branin_parallel_model(self):\n prob = om.Problem()\n model = prob.model\n\n model.add_subsystem('p1', om.IndepVarComp('xC', 7.5))\n model.add_subsystem('p2', om.IndepVarComp('xI', 0.0))\n par = model.add_subsystem('par', om.ParallelGroup())\n\n par.add_subsystem('comp1', Branin())\n par.add_subsystem('comp2', Branin())\n\n model.connect('p2.xI', 'par.comp1.x0')\n model.connect('p1.xC', 'par.comp1.x1')\n model.connect('p2.xI', 'par.comp2.x0')\n model.connect('p1.xC', 'par.comp2.x1')\n\n model.add_subsystem('comp', om.ExecComp('f = f1 + f2'))\n model.connect('par.comp1.f', 'comp.f1')\n model.connect('par.comp2.f', 'comp.f2')\n\n model.add_design_var('p2.xI', lower=-5.0, upper=10.0)\n model.add_design_var('p1.xC', lower=0.0, upper=15.0)\n model.add_objective('comp.f')\n\n prob.driver = om.SimpleGADriver()\n prob.driver.options['bits'] = {'p1.xC': 8}\n prob.driver.options['max_gen'] = 40\n prob.driver.options['pop_size'] = 25\n prob.driver.options['run_parallel'] = False\n prob.driver.options['procs_per_model'] = 2\n\n prob.driver._randomstate = 1\n\n prob.setup()\n prob.run_driver()\n\n if extra_prints:\n print('comp.f', prob['comp.f'])\n print('p2.xI', prob['p2.xI'])\n\n # Optimal solution\n assert_near_equal(prob['comp.f'], 0.98799098, 1e-4)\n self.assertTrue(int(prob['p2.xI']) in [3, -3])\n\n def test_mixed_integer_3bar_default_bits(self):\n # Tests bug where letting openmdao calculate the bits didn't preserve\n # integer status unless range was a power of 2.\n\n class ObjPenalty(om.ExplicitComponent):\n \"\"\"\n Weight objective with penalty on stress constraint.\n \"\"\"\n\n def setup(self):\n self.add_input('obj', 0.0)\n self.add_input('stress', val=np.zeros((3, )))\n\n self.add_output('weighted', 0.0)\n\n def compute(self, inputs, outputs):\n obj = inputs['obj']\n stress = inputs['stress']\n\n pen = 0.0\n for j in range(len(stress)):\n if stress[j] > 1.0:\n pen += 10.0*(stress[j] - 1.0)**2\n\n outputs['weighted'] = obj + pen\n\n prob = om.Problem()\n model = prob.model\n\n model.add_subsystem('xc_a1', om.IndepVarComp('area1', 5.0, units='cm**2'), promotes=['*'])\n model.add_subsystem('xc_a2', om.IndepVarComp('area2', 5.0, units='cm**2'), promotes=['*'])\n model.add_subsystem('xc_a3', om.IndepVarComp('area3', 5.0, units='cm**2'), promotes=['*'])\n model.add_subsystem('xi_m1', om.IndepVarComp('mat1', 1), promotes=['*'])\n model.add_subsystem('xi_m2', om.IndepVarComp('mat2', 1), promotes=['*'])\n model.add_subsystem('xi_m3', om.IndepVarComp('mat3', 1), promotes=['*'])\n model.add_subsystem('comp', ThreeBarTruss(), promotes=['*'])\n model.add_subsystem('obj_with_penalty', ObjPenalty(), promotes=['*'])\n\n model.add_design_var('area1', lower=1.2, upper=1.3)\n model.add_design_var('area2', lower=2.0, upper=2.1)\n model.add_design_var('mat1', lower=2, upper=4)\n model.add_design_var('mat2', lower=2, upper=4)\n model.add_design_var('mat3', lower=1, upper=4)\n model.add_objective('weighted')\n\n prob.driver = om.SimpleGADriver()\n prob.driver.options['bits'] = {'area1': 6,\n 'area2': 6}\n prob.driver.options['max_gen'] = 75\n\n prob.driver._randomstate = 1\n\n prob.setup()\n prob['area3'] = 0.0005\n prob.run_driver()\n\n if extra_prints:\n print('mass', prob['mass'])\n print('mat1', prob['mat1'])\n print('mat2', prob['mat2'])\n\n # Note, GA doesn't do so well with the continuous vars, naturally, so we reduce the space\n # as much as we can. Objective is still rather random, but it is close. GA does a great job\n # of picking the correct values for the integer desvars though.\n self.assertLess(prob['mass'], 6.0)\n assert_near_equal(prob['mat1'], 3, 1e-5)\n assert_near_equal(prob['mat2'], 3, 1e-5)\n # Material 3 can be anything\n\n def test_mpi_bug_solver(self):\n # This test verifies that mpi doesn't hang due to collective calls in the solver.\n\n prob = om.Problem()\n prob.model = SellarMDA()\n\n prob.model.add_design_var('x', lower=0, upper=10)\n prob.model.add_design_var('z', lower=0, upper=10)\n prob.model.add_objective('obj')\n\n prob.driver = om.SimpleGADriver(run_parallel=True)\n\n # Set these low because we don't need to run long.\n prob.driver.options['max_gen'] = 2\n prob.driver.options['pop_size'] = 5\n\n prob.setup()\n prob.set_solver_print(level=0)\n\n prob.run_driver()\n\n\nclass D1(om.ExplicitComponent):\n def initialize(self):\n self.options['distributed'] = True\n\n def setup(self):\n comm = self.comm\n rank = comm.rank\n\n if rank == 1:\n start = 1\n end = 2\n else:\n start = 0\n end = 1\n\n self.add_input('y2', np.ones((1, ), float),\n src_indices=np.arange(start, end, dtype=int))\n self.add_input('x', np.ones((1, ), float))\n\n self.add_output('y1', np.ones((1, ), float))\n\n self.declare_partials('y1', ['y2', 'x'])\n\n def compute(self, inputs, outputs):\n y2 = inputs['y2']\n x = inputs['x']\n\n if self.comm.rank == 1:\n outputs['y1'] = 18.0 - 0.2*y2 + 2*x\n else:\n outputs['y1'] = 28.0 - 0.2*y2 + x\n\n def compute_partials(self, inputs, partials, discrete_inputs=None):\n y2 = inputs['y2']\n x = inputs['x']\n\n partials['y1', 'y2'] = -0.2\n if self.comm.rank == 1:\n partials['y1', 'x'] = 2.0\n else:\n partials['y1', 'x'] = 1.0\n\n\nclass D2(om.ExplicitComponent):\n def initialize(self):\n self.options['distributed'] = True\n\n def setup(self):\n comm = self.comm\n rank = comm.rank\n\n if rank == 1:\n start = 1\n end = 2\n else:\n start = 0\n end = 1\n\n self.add_input('y1', np.ones((1, ), float),\n src_indices=np.arange(start, end, dtype=int))\n\n self.add_output('y2', np.ones((1, ), float))\n\n self.declare_partials('y2', ['y1'])\n\n def compute(self, inputs, outputs):\n y1 = inputs['y1']\n\n if self.comm.rank == 1:\n outputs['y2'] = y2 = y1**.5 - 3\n else:\n outputs['y2'] = y1**.5 + 7\n\n def compute_partials(self, inputs, partials, discrete_inputs=None):\n y1 = inputs['y1']\n\n partials['y2', 'y1'] = 0.5 / y1**.5\n\n\nclass Summer(om.ExplicitComponent):\n def setup(self):\n self.add_input('y1', val=np.zeros((2, )))\n self.add_input('y2', val=np.zeros((2, )))\n self.add_output('obj', 0.0, shape=1)\n\n self.declare_partials('obj', 'y1', rows=np.array([0, 0]), cols=np.array([0, 1]), val=np.ones((2, )))\n\n def compute(self, inputs, outputs):\n outputs['obj'] = np.sum(inputs['y1']) + np.sum(inputs['y2'])\n\n\[email protected](MPI and PETScVector, \"MPI and PETSc are required.\")\nclass MPITestSimpleGA4Procs(unittest.TestCase):\n\n N_PROCS = 4\n\n def setUp(self):\n np.random.seed(1)\n os.environ['SimpleGADriver_seed'] = '11'\n\n def test_two_branin_parallel_model(self):\n prob = om.Problem()\n model = prob.model\n\n model.add_subsystem('p1', om.IndepVarComp('xC', 5))\n model.add_subsystem('p2', om.IndepVarComp('xI', 0.0))\n par = model.add_subsystem('par', om.ParallelGroup())\n\n par.add_subsystem('comp1', Branin())\n par.add_subsystem('comp2', Branin())\n\n model.connect('p2.xI', 'par.comp1.x0')\n model.connect('p1.xC', 'par.comp1.x1')\n model.connect('p2.xI', 'par.comp2.x0')\n model.connect('p1.xC', 'par.comp2.x1')\n\n model.add_subsystem('comp', om.ExecComp('f = f1 + f2'))\n model.connect('par.comp1.f', 'comp.f1')\n model.connect('par.comp2.f', 'comp.f2')\n\n model.add_design_var('p2.xI', lower=-5.0, upper=10.0)\n model.add_design_var('p1.xC', lower=0.0, upper=15.0)\n model.add_objective('comp.f')\n\n prob.driver = om.SimpleGADriver()\n prob.driver.options['bits'] = {'p1.xC': 8}\n prob.driver.options['max_gen'] = 10\n prob.driver.options['pop_size'] = 25\n prob.driver.options['run_parallel'] = True\n prob.driver.options['procs_per_model'] = 2\n\n prob.driver._randomstate = 1\n\n prob.setup()\n prob.run_driver()\n\n if extra_prints:\n print('comp.f', prob['comp.f'])\n print('p2.xI', prob['p2.xI'])\n\n # Optimal solution\n assert_near_equal(prob['comp.f'], 0.98799098, 1e-4)\n self.assertTrue(int(prob['p2.xI']) in [3, -3])\n\n def test_indivisible_error(self):\n prob = om.Problem()\n model = prob.model\n model.add_subsystem('par', om.ParallelGroup())\n\n prob.driver = om.SimpleGADriver()\n prob.driver.options['run_parallel'] = True\n prob.driver.options['procs_per_model'] = 3\n\n with self.assertRaises(RuntimeError) as context:\n prob.setup()\n\n self.assertEqual(str(context.exception),\n \"The total number of processors is not evenly divisible by the \"\n \"specified number of processors per model.\\n Provide a number of \"\n \"processors that is a multiple of 3, or specify a number \"\n \"of processors per model that divides into 4.\")\n\n def test_concurrent_eval_padded(self):\n # This test only makes sure we don't lock up if we overallocate our integer desvar space\n # to the next power of 2.\n\n class GAGroup(om.Group):\n\n def setup(self):\n\n self.add_subsystem('p1', om.IndepVarComp('x', 1.0))\n self.add_subsystem('p2', om.IndepVarComp('y', 1.0))\n self.add_subsystem('p3', om.IndepVarComp('z', 1.0))\n\n self.add_subsystem('comp', om.ExecComp(['f = x + y + z']))\n\n self.add_design_var('p1.x', lower=-100, upper=100)\n self.add_design_var('p2.y', lower=-100, upper=100)\n self.add_design_var('p3.z', lower=-100, upper=100)\n self.add_objective('comp.f')\n\n prob = om.Problem()\n prob.model = GAGroup()\n\n driver = prob.driver = om.SimpleGADriver()\n driver.options['max_gen'] = 5\n driver.options['pop_size'] = 40\n driver.options['run_parallel'] = True\n\n prob.setup()\n\n # No meaningful result from a short run; just make sure we don't hang.\n prob.run_driver()\n\n def test_proc_per_model(self):\n # Test that we can run a GA on a distributed component without lockups.\n prob = om.Problem()\n model = prob.model\n\n model.add_subsystem('p', om.IndepVarComp('x', 3.0), promotes=['x'])\n\n model.add_subsystem('d1', D1(), promotes=['*'])\n model.add_subsystem('d2', D2(), promotes=['*'])\n\n model.add_subsystem('obj_comp', Summer(), promotes=['*'])\n model.nonlinear_solver = om.NewtonSolver(solve_subsystems=True)\n model.linear_solver = om.LinearBlockGS()\n\n model.add_design_var('x', lower=-0.5, upper=0.5)\n model.add_objective('obj')\n\n driver = prob.driver = om.SimpleGADriver()\n prob.driver.options['pop_size'] = 4\n prob.driver.options['max_gen'] = 3\n prob.driver.options['run_parallel'] = True\n prob.driver.options['procs_per_model'] = 2\n\n prob.setup()\n prob.set_solver_print(level=0)\n\n prob.run_driver()\n\n\nclass TestFeatureSimpleGA(unittest.TestCase):\n\n def setUp(self):\n import numpy as np\n np.random.seed(1)\n\n import os\n os.environ['SimpleGADriver_seed'] = '11'\n\n def test_basic(self):\n import openmdao.api as om\n from openmdao.test_suite.components.branin import Branin\n\n prob = om.Problem()\n model = prob.model\n\n model.add_subsystem('p1', om.IndepVarComp('xC', 7.5))\n model.add_subsystem('p2', om.IndepVarComp('xI', 0.0))\n model.add_subsystem('comp', Branin())\n\n model.connect('p2.xI', 'comp.x0')\n model.connect('p1.xC', 'comp.x1')\n\n model.add_design_var('p2.xI', lower=-5.0, upper=10.0)\n model.add_design_var('p1.xC', lower=0.0, upper=15.0)\n model.add_objective('comp.f')\n\n prob.driver = om.SimpleGADriver()\n prob.driver.options['bits'] = {'p1.xC': 8}\n\n prob.setup()\n prob.run_driver()\n\n def test_basic_with_assert(self):\n import openmdao.api as om\n from openmdao.test_suite.components.branin import Branin\n\n prob = om.Problem()\n model = prob.model\n\n model.add_subsystem('p1', om.IndepVarComp('xC', 7.5))\n model.add_subsystem('p2', om.IndepVarComp('xI', 0.0))\n model.add_subsystem('comp', Branin())\n\n model.connect('p2.xI', 'comp.x0')\n model.connect('p1.xC', 'comp.x1')\n\n model.add_design_var('p2.xI', lower=-5.0, upper=10.0)\n model.add_design_var('p1.xC', lower=0.0, upper=15.0)\n model.add_objective('comp.f')\n\n prob.driver = om.SimpleGADriver()\n prob.driver.options['bits'] = {'p1.xC': 8}\n\n prob.driver._randomstate = 1\n\n prob.setup()\n prob.run_driver()\n\n # Optimal solution\n assert_near_equal(prob['comp.f'], 0.49399549, 1e-4)\n\n def test_option_max_gen(self):\n import openmdao.api as om\n from openmdao.test_suite.components.branin import Branin\n\n prob = om.Problem()\n model = prob.model\n\n model.add_subsystem('p1', om.IndepVarComp('xC', 7.5))\n model.add_subsystem('p2', om.IndepVarComp('xI', 0.0))\n model.add_subsystem('comp', Branin())\n\n model.connect('p2.xI', 'comp.x0')\n model.connect('p1.xC', 'comp.x1')\n\n model.add_design_var('p2.xI', lower=-5.0, upper=10.0)\n model.add_design_var('p1.xC', lower=0.0, upper=15.0)\n model.add_objective('comp.f')\n\n prob.driver = om.SimpleGADriver()\n prob.driver.options['bits'] = {'p1.xC': 8}\n prob.driver.options['max_gen'] = 5\n\n prob.setup()\n prob.run_driver()\n\n def test_option_pop_size(self):\n import openmdao.api as om\n from openmdao.test_suite.components.branin import Branin\n\n prob = om.Problem()\n model = prob.model\n\n model.add_subsystem('p1', om.IndepVarComp('xC', 7.5))\n model.add_subsystem('p2', om.IndepVarComp('xI', 0.0))\n model.add_subsystem('comp', Branin())\n\n model.connect('p2.xI', 'comp.x0')\n model.connect('p1.xC', 'comp.x1')\n\n model.add_design_var('p2.xI', lower=-5.0, upper=10.0)\n model.add_design_var('p1.xC', lower=0.0, upper=15.0)\n model.add_objective('comp.f')\n\n prob.driver = om.SimpleGADriver()\n prob.driver.options['bits'] = {'p1.xC': 8}\n prob.driver.options['pop_size'] = 10\n\n prob.setup()\n prob.run_driver()\n\n def test_constrained_with_penalty(self):\n import openmdao.api as om\n\n class Cylinder(om.ExplicitComponent):\n \"\"\"Main class\"\"\"\n\n def setup(self):\n self.add_input('radius', val=1.0)\n self.add_input('height', val=1.0)\n\n self.add_output('Area', val=1.0)\n self.add_output('Volume', val=1.0)\n\n def compute(self, inputs, outputs):\n radius = inputs['radius']\n height = inputs['height']\n\n area = height * radius * 2 * 3.14 + 3.14 * radius ** 2 * 2\n volume = 3.14 * radius ** 2 * height\n outputs['Area'] = area\n outputs['Volume'] = volume\n\n prob = om.Problem()\n prob.model.add_subsystem('cylinder', Cylinder(), promotes=['*'])\n\n indeps = prob.model.add_subsystem('indeps', om.IndepVarComp(), promotes=['*'])\n indeps.add_output('radius', 2.) # height\n indeps.add_output('height', 3.) # radius\n\n # setup the optimization\n prob.driver = om.SimpleGADriver()\n prob.driver.options['penalty_parameter'] = 3.\n prob.driver.options['penalty_exponent'] = 1.\n prob.driver.options['max_gen'] = 50\n prob.driver.options['bits'] = {'radius': 8, 'height': 8}\n\n prob.model.add_design_var('radius', lower=0.5, upper=5.)\n prob.model.add_design_var('height', lower=0.5, upper=5.)\n prob.model.add_objective('Area')\n prob.model.add_constraint('Volume', lower=10.)\n\n prob.setup()\n prob.run_driver()\n\n # These go to 0.5 for unconstrained problem. With constraint and penalty, they\n # will be above 1.0 (actual values will vary.)\n self.assertGreater(prob['radius'], 1.)\n self.assertGreater(prob['height'], 1.)\n\n\[email protected](MPI and PETScVector, \"MPI and PETSc are required.\")\nclass MPIFeatureTests(unittest.TestCase):\n N_PROCS = 2\n\n def setUp(self):\n import numpy as np\n np.random.seed(1)\n\n import os\n os.environ['SimpleGADriver_seed'] = '11'\n\n def test_option_parallel(self):\n import openmdao.api as om\n from openmdao.test_suite.components.branin import Branin\n\n prob = om.Problem()\n model = prob.model\n\n model.add_subsystem('p1', om.IndepVarComp('xC', 7.5))\n model.add_subsystem('p2', om.IndepVarComp('xI', 0.0))\n model.add_subsystem('comp', Branin())\n\n model.connect('p2.xI', 'comp.x0')\n model.connect('p1.xC', 'comp.x1')\n\n model.add_design_var('p2.xI', lower=-5.0, upper=10.0)\n model.add_design_var('p1.xC', lower=0.0, upper=15.0)\n model.add_objective('comp.f')\n\n prob.driver = om.SimpleGADriver()\n prob.driver.options['bits'] = {'p1.xC': 8}\n prob.driver.options['max_gen'] = 10\n prob.driver.options['run_parallel'] = True\n\n prob.setup()\n prob.run_driver()\n\n # Optimal solution\n if extra_prints:\n print('comp.f', prob['comp.f'])\n print('p2.xI', prob['p2.xI'])\n print('p1.xC', prob['p1.xC'])\n\n\[email protected](MPI and PETScVector, \"MPI and PETSc are required.\")\nclass MPIFeatureTests4(unittest.TestCase):\n N_PROCS = 4\n\n def setUp(self):\n import numpy as np\n np.random.seed(1)\n\n import os\n os.environ['SimpleGADriver_seed'] = '11'\n\n def test_option_procs_per_model(self):\n import openmdao.api as om\n from openmdao.test_suite.components.branin import Branin\n\n prob = om.Problem()\n model = prob.model\n\n model.add_subsystem('p1', om.IndepVarComp('xC', 7.5))\n model.add_subsystem('p2', om.IndepVarComp('xI', 0.0))\n par = model.add_subsystem('par', om.ParallelGroup())\n\n par.add_subsystem('comp1', Branin())\n par.add_subsystem('comp2', Branin())\n\n model.connect('p2.xI', 'par.comp1.x0')\n model.connect('p1.xC', 'par.comp1.x1')\n model.connect('p2.xI', 'par.comp2.x0')\n model.connect('p1.xC', 'par.comp2.x1')\n\n model.add_subsystem('comp', om.ExecComp('f = f1 + f2'))\n model.connect('par.comp1.f', 'comp.f1')\n model.connect('par.comp2.f', 'comp.f2')\n\n model.add_design_var('p2.xI', lower=-5.0, upper=10.0)\n model.add_design_var('p1.xC', lower=0.0, upper=15.0)\n model.add_objective('comp.f')\n\n prob.driver = om.SimpleGADriver()\n prob.driver.options['bits'] = {'p1.xC': 8}\n prob.driver.options['max_gen'] = 10\n prob.driver.options['pop_size'] = 25\n prob.driver.options['run_parallel'] = True\n prob.driver.options['procs_per_model'] = 2\n\n prob.driver._randomstate = 1\n\n prob.setup()\n prob.run_driver()\n\n # Optimal solution\n if extra_prints:\n print('comp.f', prob['comp.f'])\n print('p2.xI', prob['p2.xI'])\n print('p1.xC', prob['p1.xC'])\n\n\nif __name__ == \"__main__\":\n unittest.main()\n",
"\"\"\"Test the Newton nonlinear solver. \"\"\"\n\nimport unittest\nimport warnings\n\n\nimport numpy as np\n\nimport openmdao.api as om\nfrom openmdao.core.tests.test_discrete import InternalDiscreteGroup\nfrom openmdao.test_suite.components.double_sellar import DoubleSellar, DoubleSellarImplicit, \\\n SubSellar\nfrom openmdao.test_suite.components.implicit_newton_linesearch import ImplCompTwoStates\nfrom openmdao.test_suite.components.sellar import SellarDerivativesGrouped, \\\n SellarNoDerivatives, SellarDerivatives, SellarStateConnection, StateConnection, \\\n SellarDis1withDerivatives, SellarDis2withDerivatives\nfrom openmdao.utils.assert_utils import assert_near_equal, assert_warning, assert_no_warning\nfrom openmdao.utils.mpi import MPI\n\ntry:\n from openmdao.vectors.petsc_vector import PETScVector\nexcept ImportError:\n PETScVector = None\n\n\nclass TestNewton(unittest.TestCase):\n\n def test_specify_newton_linear_solver_in_system(self):\n\n my_newton = om.NewtonSolver(solve_subsystems=False)\n my_newton.linear_solver = om.DirectSolver()\n\n prob = om.Problem(model=SellarDerivatives(nonlinear_solver=my_newton))\n\n prob.setup()\n\n self.assertIsInstance(prob.model.nonlinear_solver.linear_solver, om.DirectSolver)\n\n prob.run_model()\n\n assert_near_equal(prob['y1'], 25.58830273, .00001)\n assert_near_equal(prob['y2'], 12.05848819, .00001)\n\n def test_feature_newton_basic(self):\n \"\"\" Feature test for slotting a Newton solver and using it to solve\n Sellar.\n \"\"\"\n import openmdao.api as om\n from openmdao.test_suite.components.sellar import SellarDerivatives\n\n prob = om.Problem(model=SellarDerivatives(nonlinear_solver=om.NewtonSolver(solve_subsystems=False)))\n\n prob.setup()\n prob.run_model()\n\n assert_near_equal(prob['y1'], 25.58830273, .00001)\n assert_near_equal(prob['y2'], 12.05848819, .00001)\n\n def test_sellar_grouped(self):\n # Tests basic Newton solution on Sellar in a subgroup\n\n prob = om.Problem(model=SellarDerivativesGrouped(nonlinear_solver=om.NewtonSolver(solve_subsystems=False)))\n\n prob.setup()\n prob.set_solver_print(level=0)\n prob.run_model()\n\n assert_near_equal(prob['y1'], 25.58830273, .00001)\n assert_near_equal(prob['y2'], 12.05848819, .00001)\n\n # Make sure we aren't iterating like crazy\n self.assertLess(prob.model.nonlinear_solver._iter_count, 8)\n\n def test_sellar(self):\n # Just tests Newton on Sellar with FD derivs.\n\n prob = om.Problem(model=SellarNoDerivatives(nonlinear_solver=om.NewtonSolver(solve_subsystems=False)))\n\n prob.setup()\n prob.run_model()\n\n assert_near_equal(prob['y1'], 25.58830273, .00001)\n assert_near_equal(prob['y2'], 12.05848819, .00001)\n\n # Make sure we aren't iterating like crazy\n self.assertLess(prob.model.nonlinear_solver._iter_count, 8)\n\n def test_sellar_derivs(self):\n # Test top level Sellar (i.e., not grouped).\n # Also, piggybacked testing that makes sure we only call apply_nonlinear\n # on the head component behind the cycle break.\n\n prob = om.Problem()\n prob.model = SellarDerivatives(nonlinear_solver=om.NewtonSolver(solve_subsystems=False),\n linear_solver=om.LinearBlockGS())\n\n prob.setup()\n prob.set_solver_print(level=0)\n prob.run_model()\n\n assert_near_equal(prob['y1'], 25.58830273, .00001)\n assert_near_equal(prob['y2'], 12.05848819, .00001)\n\n # Make sure we aren't iterating like crazy\n self.assertLess(prob.model.nonlinear_solver._iter_count, 8)\n\n ## Make sure we only call apply_linear on 'heads'\n #nd1 = prob.model.d1.execution_count\n #nd2 = prob.model.d2.execution_count\n #if prob.model.d1._run_apply == True:\n #self.assertEqual(nd1, 2*nd2)\n #else:\n #self.assertEqual(2*nd1, nd2)\n\n def test_sellar_derivs_with_Lin_GS(self):\n\n prob = om.Problem(model=SellarDerivatives(nonlinear_solver=om.NewtonSolver(solve_subsystems=False)))\n\n prob.setup()\n prob.set_solver_print(level=0)\n prob.run_model()\n\n assert_near_equal(prob['y1'], 25.58830273, .00001)\n assert_near_equal(prob['y2'], 12.05848819, .00001)\n\n # Make sure we aren't iterating like crazy\n self.assertLess(prob.model.nonlinear_solver._iter_count, 8)\n\n def test_sellar_state_connection(self):\n # Sellar model closes loop with state connection instead of a cycle.\n\n prob = om.Problem(model=SellarStateConnection(nonlinear_solver=om.NewtonSolver(solve_subsystems=False)))\n\n prob.set_solver_print(level=0)\n prob.setup()\n prob.run_model()\n\n assert_near_equal(prob['y1'], 25.58830273, .00001)\n assert_near_equal(prob['state_eq.y2_command'], 12.05848819, .00001)\n\n # Make sure we aren't iterating like crazy\n self.assertLess(prob.model.nonlinear_solver._iter_count, 8)\n\n def test_sellar_state_connection_fd_system(self):\n # Sellar model closes loop with state connection instead of a cycle.\n # This test is just fd.\n prob = om.Problem(model=SellarStateConnection(nonlinear_solver=om.NewtonSolver(solve_subsystems=False)))\n\n prob.model.approx_totals(method='fd')\n\n prob.setup()\n prob.set_solver_print(level=0)\n prob.run_model()\n\n assert_near_equal(prob['y1'], 25.58830273, .00001)\n assert_near_equal(prob['state_eq.y2_command'], 12.05848819, .00001)\n\n # Make sure we aren't iterating like crazy\n self.assertLess(prob.model.nonlinear_solver._iter_count, 6)\n\n def test_sellar_specify_linear_solver(self):\n\n prob = om.Problem()\n model = prob.model\n\n model.add_subsystem('px', om.IndepVarComp('x', 1.0), promotes=['x'])\n model.add_subsystem('pz', om.IndepVarComp('z', np.array([5.0, 2.0])), promotes=['z'])\n\n proms = ['x', 'z', 'y1', 'state_eq.y2_actual', 'state_eq.y2_command', 'd1.y2', 'd2.y2']\n sub = model.add_subsystem('sub', om.Group(), promotes=proms)\n\n subgrp = sub.add_subsystem('state_eq_group', om.Group(),\n promotes=['state_eq.y2_actual', 'state_eq.y2_command'])\n subgrp.linear_solver = om.ScipyKrylov()\n subgrp.add_subsystem('state_eq', StateConnection())\n\n sub.add_subsystem('d1', SellarDis1withDerivatives(), promotes=['x', 'z', 'y1'])\n sub.add_subsystem('d2', SellarDis2withDerivatives(), promotes=['z', 'y1'])\n\n model.connect('state_eq.y2_command', 'd1.y2')\n model.connect('d2.y2', 'state_eq.y2_actual')\n\n model.add_subsystem('obj_cmp', om.ExecComp('obj = x**2 + z[1] + y1 + exp(-y2)',\n z=np.array([0.0, 0.0]), x=0.0, y1=0.0, y2=0.0),\n promotes=['x', 'z', 'y1', 'obj'])\n model.connect('d2.y2', 'obj_cmp.y2')\n\n model.add_subsystem('con_cmp1', om.ExecComp('con1 = 3.16 - y1'), promotes=['con1', 'y1'])\n model.add_subsystem('con_cmp2', om.ExecComp('con2 = y2 - 24.0'), promotes=['con2'])\n model.connect('d2.y2', 'con_cmp2.y2')\n\n model.nonlinear_solver = om.NewtonSolver(solve_subsystems=False)\n\n # Use bad settings for this one so that problem doesn't converge.\n # That way, we test that we are really using Newton's Lin Solver\n # instead.\n model.linear_solver = om.ScipyKrylov()\n model.linear_solver.options['maxiter'] = 1\n\n # The good solver\n model.nonlinear_solver.linear_solver = om.ScipyKrylov()\n\n prob.set_solver_print(level=0)\n prob.setup()\n prob.run_model()\n\n assert_near_equal(prob['y1'], 25.58830273, .00001)\n assert_near_equal(prob['state_eq.y2_command'], 12.05848819, .00001)\n\n # Make sure we aren't iterating like crazy\n self.assertLess(model.nonlinear_solver._iter_count, 8)\n self.assertEqual(model.linear_solver._iter_count, 0)\n self.assertGreater(model.nonlinear_solver.linear_solver._iter_count, 0)\n\n def test_sellar_specify_linear_direct_solver(self):\n\n prob = om.Problem()\n model = prob.model\n\n model.add_subsystem('px', om.IndepVarComp('x', 1.0), promotes=['x'])\n model.add_subsystem('pz', om.IndepVarComp('z', np.array([5.0, 2.0])), promotes=['z'])\n\n proms = ['x', 'z', 'y1', 'state_eq.y2_actual', 'state_eq.y2_command', 'd1.y2', 'd2.y2']\n sub = model.add_subsystem('sub', om.Group(), promotes=proms)\n\n subgrp = sub.add_subsystem('state_eq_group', om.Group(),\n promotes=['state_eq.y2_actual', 'state_eq.y2_command'])\n subgrp.linear_solver = om.ScipyKrylov()\n subgrp.add_subsystem('state_eq', StateConnection())\n\n sub.add_subsystem('d1', SellarDis1withDerivatives(), promotes=['x', 'z', 'y1'])\n sub.add_subsystem('d2', SellarDis2withDerivatives(), promotes=['z', 'y1'])\n\n model.connect('state_eq.y2_command', 'd1.y2')\n model.connect('d2.y2', 'state_eq.y2_actual')\n\n model.add_subsystem('obj_cmp', om.ExecComp('obj = x**2 + z[1] + y1 + exp(-y2)',\n z=np.array([0.0, 0.0]), x=0.0, y1=0.0, y2=0.0),\n promotes=['x', 'z', 'y1', 'obj'])\n model.connect('d2.y2', 'obj_cmp.y2')\n\n model.add_subsystem('con_cmp1', om.ExecComp('con1 = 3.16 - y1'), promotes=['con1', 'y1'])\n model.add_subsystem('con_cmp2', om.ExecComp('con2 = y2 - 24.0'), promotes=['con2'])\n model.connect('d2.y2', 'con_cmp2.y2')\n\n model.nonlinear_solver = om.NewtonSolver(solve_subsystems=False)\n\n # Use bad settings for this one so that problem doesn't converge.\n # That way, we test that we are really using Newton's Lin Solver\n # instead.\n sub.linear_solver = om.ScipyKrylov()\n sub.linear_solver.options['maxiter'] = 1\n\n # The good solver\n model.nonlinear_solver.linear_solver = om.DirectSolver()\n\n prob.set_solver_print(level=0)\n prob.setup()\n prob.run_model()\n\n assert_near_equal(prob['y1'], 25.58830273, .00001)\n assert_near_equal(prob['state_eq.y2_command'], 12.05848819, .00001)\n\n # Make sure we aren't iterating like crazy\n self.assertLess(model.nonlinear_solver._iter_count, 8)\n self.assertEqual(model.linear_solver._iter_count, 0)\n\n def test_solve_subsystems_basic(self):\n prob = om.Problem(model=DoubleSellar())\n model = prob.model\n\n g1 = model.g1\n g1.nonlinear_solver = om.NewtonSolver(solve_subsystems=False)\n g1.nonlinear_solver.options['rtol'] = 1.0e-5\n g1.linear_solver = om.DirectSolver(assemble_jac=True)\n g1.options['assembled_jac_type'] = 'dense'\n\n g2 = model.g2\n g2.nonlinear_solver = om.NewtonSolver(solve_subsystems=False)\n g2.nonlinear_solver.options['rtol'] = 1.0e-5\n g2.linear_solver = om.DirectSolver(assemble_jac=True)\n g2.options['assembled_jac_type'] = 'dense'\n\n model.nonlinear_solver = om.NewtonSolver()\n model.linear_solver = om.ScipyKrylov(assemble_jac=True)\n model.options['assembled_jac_type'] = 'dense'\n\n model.nonlinear_solver.options['solve_subsystems'] = True\n\n prob.setup()\n prob.run_model()\n\n assert_near_equal(prob['g1.y1'], 0.64, .00001)\n assert_near_equal(prob['g1.y2'], 0.80, .00001)\n assert_near_equal(prob['g2.y1'], 0.64, .00001)\n assert_near_equal(prob['g2.y2'], 0.80, .00001)\n\n def test_solve_subsystems_basic_csc(self):\n prob = om.Problem(model=DoubleSellar())\n model = prob.model\n\n g1 = model.g1\n g1.nonlinear_solver = om.NewtonSolver(solve_subsystems=False, rtol=1.0e-5)\n g1.options['assembled_jac_type'] = 'dense'\n g1.linear_solver = om.DirectSolver(assemble_jac=True)\n\n g2 = model.g2\n g2.nonlinear_solver = om.NewtonSolver(solve_subsystems=False, rtol=1.0e-5)\n g2.linear_solver = om.DirectSolver(assemble_jac=True)\n g2.options['assembled_jac_type'] = 'dense'\n\n model.nonlinear_solver = om.NewtonSolver(solve_subsystems=True)\n model.linear_solver = om.ScipyKrylov(assemble_jac=True)\n\n prob.setup()\n prob.run_model()\n\n assert_near_equal(prob['g1.y1'], 0.64, .00001)\n assert_near_equal(prob['g1.y2'], 0.80, .00001)\n assert_near_equal(prob['g2.y1'], 0.64, .00001)\n assert_near_equal(prob['g2.y2'], 0.80, .00001)\n\n def test_solve_subsystems_basic_dense_jac(self):\n prob = om.Problem(model=DoubleSellar())\n model = prob.model\n\n g1 = model.g1\n g1.nonlinear_solver = om.NewtonSolver(solve_subsystems=False, rtol=1.0e-5)\n g1.linear_solver = om.DirectSolver()\n\n g2 = model.g2\n g2.nonlinear_solver = om.NewtonSolver(solve_subsystems=False, rtol=1.0e-5)\n g2.linear_solver = om.DirectSolver()\n\n model.nonlinear_solver = om.NewtonSolver(solve_subsystems=True)\n model.linear_solver = om.ScipyKrylov(assemble_jac=True)\n model.options['assembled_jac_type'] = 'dense'\n\n prob.setup()\n prob.run_model()\n\n assert_near_equal(prob['g1.y1'], 0.64, .00001)\n assert_near_equal(prob['g1.y2'], 0.80, .00001)\n assert_near_equal(prob['g2.y1'], 0.64, .00001)\n assert_near_equal(prob['g2.y2'], 0.80, .00001)\n\n def test_solve_subsystems_basic_dense_jac_scaling(self):\n prob = om.Problem(model=DoubleSellar(units=None, scaling=True))\n model = prob.model\n\n g1 = model.g1\n g1.nonlinear_solver = om.NewtonSolver(solve_subsystems=False, rtol=1.0e-5)\n g1.linear_solver = om.DirectSolver()\n\n g2 = model.g2\n g2.nonlinear_solver = om.NewtonSolver(solve_subsystems=False, rtol=1.0e-5)\n g2.linear_solver = om.DirectSolver()\n\n model.nonlinear_solver = om.NewtonSolver(solve_subsystems=True)\n model.linear_solver = om.ScipyKrylov(assemble_jac=True)\n model.options['assembled_jac_type'] = 'dense'\n\n prob.setup()\n prob.run_model()\n\n assert_near_equal(prob['g1.y1'], 0.64, .00001)\n assert_near_equal(prob['g1.y2'], 0.80, .00001)\n assert_near_equal(prob['g2.y1'], 0.64, .00001)\n assert_near_equal(prob['g2.y2'], 0.80, .00001)\n\n def test_solve_subsystems_basic_dense_jac_units_scaling(self):\n prob = om.Problem(model=DoubleSellar(units=True, scaling=True))\n model = prob.model\n\n g1 = model.g1\n g1.nonlinear_solver = om.NewtonSolver(solve_subsystems=False, rtol=1.0e-5)\n g1.nonlinear_solver.linesearch = None\n g1.linear_solver = om.DirectSolver()\n\n g2 = model.g2\n g2.nonlinear_solver = om.NewtonSolver(solve_subsystems=False, rtol=1.0e-5)\n g2.nonlinear_solver.linesearch = None\n g2.linear_solver = om.DirectSolver()\n\n model.nonlinear_solver = om.NewtonSolver(solve_subsystems=True)\n model.nonlinear_solver.linesearch = None\n model.linear_solver = om.ScipyKrylov(assemble_jac=True)\n model.options['assembled_jac_type'] = 'dense'\n\n prob.setup()\n prob.run_model()\n\n assert_near_equal(prob['g1.y1'], 0.0533333333, .00001)\n assert_near_equal(prob['g1.y2'], 0.80, .00001)\n assert_near_equal(prob['g2.y1'], 0.0533333333, .00001)\n assert_near_equal(prob['g2.y2'], 0.80, .00001)\n\n def test_solve_subsystems_assembled_jac_top(self):\n prob = om.Problem(model=DoubleSellar())\n model = prob.model\n\n g1 = model.g1\n g1.nonlinear_solver = om.NewtonSolver(solve_subsystems=False, rtol=1.0e-5)\n g1.linear_solver = om.DirectSolver()\n\n g2 = model.g2\n g2.nonlinear_solver = om.NewtonSolver(solve_subsystems=False, rtol=1.0e-5)\n g2.linear_solver = om.DirectSolver()\n\n model.nonlinear_solver = om.NewtonSolver(solve_subsystems=True)\n model.linear_solver = om.ScipyKrylov(assemble_jac=True)\n model.options['assembled_jac_type'] = 'dense'\n\n prob.setup()\n prob.run_model()\n\n assert_near_equal(prob['g1.y1'], 0.64, .00001)\n assert_near_equal(prob['g1.y2'], 0.80, .00001)\n assert_near_equal(prob['g2.y1'], 0.64, .00001)\n assert_near_equal(prob['g2.y2'], 0.80, .00001)\n\n def test_solve_subsystems_assembled_jac_top_csc(self):\n prob = om.Problem(model=DoubleSellar())\n model = prob.model\n\n g1 = model.g1\n g1.nonlinear_solver = om.NewtonSolver(solve_subsystems=False, rtol=1.0e-5)\n g1.linear_solver = om.DirectSolver()\n\n g2 = model.g2\n g2.nonlinear_solver = om.NewtonSolver(solve_subsystems=False, rtol=1.0e-5)\n g2.linear_solver = om.DirectSolver()\n\n model.nonlinear_solver = om.NewtonSolver(solve_subsystems=True)\n model.linear_solver = om.ScipyKrylov(assemble_jac=True)\n\n prob.setup()\n prob.run_model()\n\n assert_near_equal(prob['g1.y1'], 0.64, .00001)\n assert_near_equal(prob['g1.y2'], 0.80, .00001)\n assert_near_equal(prob['g2.y1'], 0.64, .00001)\n assert_near_equal(prob['g2.y2'], 0.80, .00001)\n\n def test_solve_subsystems_assembled_jac_top_implicit(self):\n prob = om.Problem(model=DoubleSellarImplicit())\n model = prob.model\n\n g1 = model.g1\n g1.nonlinear_solver = om.NewtonSolver(solve_subsystems=False, rtol=1.0e-5)\n g1.linear_solver = om.DirectSolver()\n\n g2 = model.g2\n g2.nonlinear_solver = om.NewtonSolver(solve_subsystems=False, rtol=1.0e-5)\n g2.linear_solver = om.DirectSolver()\n\n model.nonlinear_solver = om.NewtonSolver(solve_subsystems=True)\n model.linear_solver = om.ScipyKrylov(assemble_jac=True)\n model.options['assembled_jac_type'] = 'dense'\n\n prob.setup()\n prob.run_model()\n\n assert_near_equal(prob['g1.y1'], 0.64, .00001)\n assert_near_equal(prob['g1.y2'], 0.80, .00001)\n assert_near_equal(prob['g2.y1'], 0.64, .00001)\n assert_near_equal(prob['g2.y2'], 0.80, .00001)\n\n def test_solve_subsystems_assembled_jac_top_implicit_scaling(self):\n prob = om.Problem(model=DoubleSellarImplicit(scaling=True))\n model = prob.model\n\n g1 = model.g1\n g1.nonlinear_solver = om.NewtonSolver(solve_subsystems=False, rtol=1.0e-5)\n g1.linear_solver = om.DirectSolver()\n\n g2 = model.g2\n g2.nonlinear_solver = om.NewtonSolver(solve_subsystems=False, rtol=1.0e-5)\n g2.linear_solver = om.DirectSolver()\n\n model.nonlinear_solver = om.NewtonSolver(solve_subsystems=True)\n model.linear_solver = om.ScipyKrylov(assemble_jac=True)\n model.options['assembled_jac_type'] = 'dense'\n\n prob.setup()\n prob.run_model()\n\n assert_near_equal(prob['g1.y1'], 0.64, .00001)\n assert_near_equal(prob['g1.y2'], 0.80, .00001)\n assert_near_equal(prob['g2.y1'], 0.64, .00001)\n assert_near_equal(prob['g2.y2'], 0.80, .00001)\n\n def test_solve_subsystems_assembled_jac_top_implicit_scaling_units(self):\n prob = om.Problem(model=DoubleSellarImplicit(units=True, scaling=True))\n model = prob.model\n\n g1 = model.g1\n g1.nonlinear_solver = om.NewtonSolver(solve_subsystems=False, rtol=1.0e-5)\n g1.nonlinear_solver.linesearch = None\n g1.linear_solver = om.DirectSolver()\n\n g2 = model.g2\n g2.nonlinear_solver = om.NewtonSolver(solve_subsystems=False, rtol=1.0e-5)\n g2.nonlinear_solver.linesearch = None\n g2.linear_solver = om.DirectSolver()\n\n model.nonlinear_solver = om.NewtonSolver(solve_subsystems=True)\n model.nonlinear_solver.linesearch = None\n model.linear_solver = om.ScipyKrylov(assemble_jac=True)\n model.options['assembled_jac_type'] = 'dense'\n\n prob.setup()\n prob.run_model()\n\n assert_near_equal(prob['g1.y1'], 0.053333333, .00001)\n assert_near_equal(prob['g1.y2'], 0.80, .00001)\n assert_near_equal(prob['g2.y1'], 0.053333333, .00001)\n assert_near_equal(prob['g2.y2'], 0.80, .00001)\n\n def test_solve_subsystems_assembled_jac_subgroup(self):\n prob = om.Problem(model=DoubleSellar())\n model = prob.model\n\n g1 = model.g1\n g1.nonlinear_solver = om.NewtonSolver(solve_subsystems=False, rtol=1.0e-5)\n g1.linear_solver = om.DirectSolver(assemble_jac=True)\n model.options['assembled_jac_type'] = 'dense'\n\n g2 = model.g2\n g2.nonlinear_solver = om.NewtonSolver(solve_subsystems=False, rtol=1.0e-5)\n g2.linear_solver = om.DirectSolver()\n\n model.nonlinear_solver = om.NewtonSolver(solve_subsystems=False)\n model.linear_solver = om.ScipyKrylov()\n\n prob.setup()\n prob.run_model()\n\n assert_near_equal(prob['g1.y1'], 0.64, .00001)\n assert_near_equal(prob['g1.y2'], 0.80, .00001)\n assert_near_equal(prob['g2.y1'], 0.64, .00001)\n assert_near_equal(prob['g2.y2'], 0.80, .00001)\n\n def test_solve_subsystems_internals(self):\n # Here we test that this feature is doing what it should do by counting the\n # number of calls in various places.\n\n class CountNewton(om.NewtonSolver):\n \"\"\" This version of Newton also counts how many times it runs in total.\"\"\"\n\n def __init__(self, **kwargs):\n super(CountNewton, self).__init__(**kwargs)\n self.options['solve_subsystems'] = True\n self.total_count = 0\n\n def _single_iteration(self):\n super(CountNewton, self)._single_iteration()\n self.total_count += 1\n\n class CountDS(om.DirectSolver):\n \"\"\" This version of Newton also counts how many times it linearizes\"\"\"\n\n def __init__(self, **kwargs):\n super(CountDS, self).__init__(**kwargs)\n self.lin_count = 0\n\n def _linearize(self):\n super(CountDS, self)._linearize()\n self.lin_count += 1\n\n prob = om.Problem(model=DoubleSellar())\n model = prob.model\n\n # each SubSellar group converges itself\n g1 = model.g1\n g1.nonlinear_solver = CountNewton()\n g1.nonlinear_solver.options['rtol'] = 1.0e-5\n g1.linear_solver = CountDS() # used for derivatives\n\n g2 = model.g2\n g2.nonlinear_solver = CountNewton()\n g2.nonlinear_solver.options['rtol'] = 1.0e-5\n g2.linear_solver = om.DirectSolver()\n\n # Converge the outer loop with Gauss Seidel, with a looser tolerance.\n model.nonlinear_solver = om.NewtonSolver()\n model.linear_solver = om.ScipyKrylov()\n\n # Enfore behavior: max_sub_solves = 0 means we run once during init\n\n model.nonlinear_solver.options['maxiter'] = 5\n model.nonlinear_solver.options['solve_subsystems'] = True\n model.nonlinear_solver.options['max_sub_solves'] = 0\n prob.set_solver_print(level=0)\n\n prob.setup()\n prob.run_model()\n\n # Verifying subsolvers ran\n self.assertEqual(g1.nonlinear_solver.total_count, 2)\n self.assertEqual(g2.nonlinear_solver.total_count, 2)\n self.assertEqual(g1.linear_solver.lin_count, 2)\n\n prob = om.Problem(model=DoubleSellar())\n model = prob.model\n\n # each SubSellar group converges itself\n g1 = model.g1\n g1.nonlinear_solver = CountNewton()\n g1.nonlinear_solver.options['rtol'] = 1.0e-5\n g1.linear_solver = CountDS() # used for derivatives\n\n g2 = model.g2\n g2.nonlinear_solver = CountNewton()\n g2.nonlinear_solver.options['rtol'] = 1.0e-5\n g2.linear_solver = om.DirectSolver()\n\n # Converge the outer loop with Gauss Seidel, with a looser tolerance.\n model.nonlinear_solver = om.NewtonSolver()\n model.linear_solver = om.ScipyKrylov()\n\n # Enforce Behavior: baseline\n\n model.nonlinear_solver.options['maxiter'] = 5\n model.nonlinear_solver.options['solve_subsystems'] = True\n model.nonlinear_solver.options['max_sub_solves'] = 5\n prob.set_solver_print(level=0)\n\n prob.setup()\n prob.run_model()\n\n # Verifying subsolvers ran\n self.assertEqual(g1.nonlinear_solver.total_count, 5)\n self.assertEqual(g2.nonlinear_solver.total_count, 5)\n self.assertEqual(g1.linear_solver.lin_count, 5)\n\n prob = om.Problem(model=DoubleSellar())\n model = prob.model\n\n # each SubSellar group converges itself\n g1 = model.g1\n g1.nonlinear_solver = CountNewton()\n g1.nonlinear_solver.options['rtol'] = 1.0e-5\n g1.linear_solver = CountDS() # used for derivatives\n\n g2 = model.g2\n g2.nonlinear_solver = CountNewton()\n g2.nonlinear_solver.options['rtol'] = 1.0e-5\n g2.linear_solver = om.DirectSolver()\n\n # Converge the outer loop with Gauss Seidel, with a looser tolerance.\n model.nonlinear_solver = om.NewtonSolver()\n model.linear_solver = om.ScipyKrylov()\n\n # Enfore behavior: max_sub_solves = 1 means we run during init and first iteration of iter_execute\n\n model.nonlinear_solver.options['maxiter'] = 5\n model.nonlinear_solver.options['solve_subsystems'] = True\n model.nonlinear_solver.options['max_sub_solves'] = 1\n prob.set_solver_print(level=0)\n\n prob.setup()\n prob.run_model()\n\n # Verifying subsolvers ran\n self.assertEqual(g1.nonlinear_solver.total_count, 4)\n self.assertEqual(g2.nonlinear_solver.total_count, 4)\n self.assertEqual(g1.linear_solver.lin_count, 4)\n\n def test_maxiter_one(self):\n # Fix bug when maxiter was set to 1.\n # This bug caused linearize to run before apply in this case.\n\n class ImpComp(om.ImplicitComponent):\n\n def setup(self):\n self.add_input('a', val=1.)\n self.add_output('x', val=0.)\n self.applied = False\n\n self.declare_partials(of='*', wrt='*')\n\n def apply_nonlinear(self, inputs, outputs, residuals):\n residuals['x'] = np.exp(outputs['x']) - \\\n inputs['a']**2 * outputs['x']**2\n self.applied = True\n\n def solve_nonlinear(self, inputs, outputs):\n pass\n\n def linearize(self, inputs, outputs, jacobian):\n jacobian['x', 'x'] = np.exp(outputs['x']) - \\\n 2 * inputs['a']**2 * outputs['x']\n jacobian['x', 'a'] = -2 * inputs['a'] * outputs['x']**2\n\n if not self.applied:\n raise RuntimeError(\"Bug! Linearize called before Apply!\")\n\n prob = om.Problem()\n root = prob.model\n root.add_subsystem('p1', om.IndepVarComp('a', 1.0))\n root.add_subsystem('comp', ImpComp())\n root.connect('p1.a', 'comp.a')\n\n root.nonlinear_solver = om.NewtonSolver(solve_subsystems=False)\n root.nonlinear_solver.options['maxiter'] = 1\n prob.set_solver_print(level=0)\n\n prob.setup()\n prob.run_model()\n\n def test_err_on_non_converge(self):\n # Raise AnalysisError when it fails to converge\n\n prob = om.Problem()\n nlsolver = om.NewtonSolver(solve_subsystems=False)\n prob.model = SellarDerivatives(nonlinear_solver=nlsolver,\n linear_solver=om.LinearBlockGS())\n\n nlsolver.options['err_on_non_converge'] = True\n nlsolver.options['maxiter'] = 1\n\n prob.setup()\n prob.set_solver_print(level=0)\n\n with self.assertRaises(om.AnalysisError) as context:\n prob.run_driver()\n\n msg = \"Solver 'NL: Newton' on system '' failed to converge in 1 iterations.\"\n self.assertEqual(str(context.exception), msg)\n\n def test_reraise_child_analysiserror(self):\n # Raise AnalysisError when it fails to converge\n\n prob = om.Problem(model=DoubleSellar())\n model = prob.model\n\n g1 = model.g1\n g1.nonlinear_solver = om.NewtonSolver()\n g1.nonlinear_solver.options['maxiter'] = 1\n g1.nonlinear_solver.options['err_on_non_converge'] = True\n g1.nonlinear_solver.options['solve_subsystems'] = True\n g1.linear_solver = om.DirectSolver(assemble_jac=True)\n\n g2 = model.g2\n g2.nonlinear_solver = om.NewtonSolver()\n g2.nonlinear_solver.options['maxiter'] = 1\n g2.nonlinear_solver.options['err_on_non_converge'] = True\n g2.nonlinear_solver.options['solve_subsystems'] = True\n g2.linear_solver = om.DirectSolver(assemble_jac=True)\n\n model.nonlinear_solver = om.NewtonSolver()\n model.linear_solver = om.ScipyKrylov(assemble_jac=True)\n model.nonlinear_solver.options['solve_subsystems'] = True\n model.nonlinear_solver.options['err_on_non_converge'] = True\n model.nonlinear_solver.options['reraise_child_analysiserror'] = True\n\n prob.setup()\n\n with self.assertRaises(om.AnalysisError) as context:\n prob.run_model()\n\n msg = \"Solver 'NL: Newton' on system 'g1' failed to converge in 1 iterations.\"\n self.assertEqual(str(context.exception), msg)\n\n def test_err_message_inf_nan(self):\n\n prob = om.Problem()\n nlsolver = om.NewtonSolver(solve_subsystems=False)\n prob.model = SellarDerivatives(nonlinear_solver=nlsolver,\n linear_solver=om.LinearBlockGS())\n\n nlsolver.options['err_on_non_converge'] = True\n nlsolver.options['maxiter'] = 1\n\n prob.setup()\n prob.set_solver_print(level=0)\n\n prob['x'] = np.nan\n\n with self.assertRaises(om.AnalysisError) as context:\n prob.run_model()\n\n msg = \"Solver 'NL: Newton' on system '': residuals contain 'inf' or 'NaN' after 0 iterations.\"\n self.assertEqual(str(context.exception), msg)\n\n def test_relevancy_for_newton(self):\n\n class TestImplCompSimple(om.ImplicitComponent):\n\n def setup(self):\n self.add_input('a', val=1.)\n self.add_output('x', val=0.)\n\n self.declare_partials(of='*', wrt='*')\n\n def apply_nonlinear(self, inputs, outputs, residuals):\n residuals['x'] = np.exp(outputs['x']) - \\\n inputs['a']**2 * outputs['x']**2\n\n def linearize(self, inputs, outputs, jacobian):\n jacobian['x', 'x'] = np.exp(outputs['x']) - \\\n 2 * inputs['a']**2 * outputs['x']\n jacobian['x', 'a'] = -2 * inputs['a'] * outputs['x']**2\n\n\n prob = om.Problem()\n model = prob.model\n\n model.add_subsystem('p1', om.IndepVarComp('x', 3.0))\n model.add_subsystem('icomp', TestImplCompSimple())\n model.add_subsystem('ecomp', om.ExecComp('y = x*p', p=1.0))\n\n model.connect('p1.x', 'ecomp.x')\n model.connect('icomp.x', 'ecomp.p')\n\n model.add_design_var('p1.x', 3.0)\n model.add_objective('ecomp.y')\n\n model.nonlinear_solver = om.NewtonSolver(solve_subsystems=False)\n model.linear_solver = om.ScipyKrylov()\n\n prob.setup()\n\n prob.run_model()\n\n J = prob.compute_totals()\n assert_near_equal(J['ecomp.y', 'p1.x'][0][0], -0.703467422498, 1e-6)\n\n def test_error_specify_solve_subsystems(self):\n # Raise AnalysisError when it fails to converge\n\n prob = om.Problem()\n model = prob.model\n\n model.nonlinear_solver = om.NewtonSolver()\n\n prob.setup()\n\n with self.assertRaises(ValueError) as context:\n prob.run_model()\n\n msg = \"NewtonSolver in Group (<model>): solve_subsystems must be set by the user.\"\n self.assertEqual(str(context.exception), msg)\n\n\n\nclass TestNewtonFeatures(unittest.TestCase):\n\n def test_feature_basic(self):\n import numpy as np\n\n import openmdao.api as om\n from openmdao.test_suite.components.sellar import SellarDis1withDerivatives, SellarDis2withDerivatives\n\n prob = om.Problem()\n model = prob.model\n\n model.add_subsystem('px', om.IndepVarComp('x', 1.0), promotes=['x'])\n model.add_subsystem('pz', om.IndepVarComp('z', np.array([5.0, 2.0])), promotes=['z'])\n\n model.add_subsystem('d1', SellarDis1withDerivatives(), promotes=['x', 'z', 'y1', 'y2'])\n model.add_subsystem('d2', SellarDis2withDerivatives(), promotes=['z', 'y1', 'y2'])\n\n model.add_subsystem('obj_cmp', om.ExecComp('obj = x**2 + z[1] + y1 + exp(-y2)',\n z=np.array([0.0, 0.0]), x=0.0),\n promotes=['obj', 'x', 'z', 'y1', 'y2'])\n\n model.add_subsystem('con_cmp1', om.ExecComp('con1 = 3.16 - y1'), promotes=['con1', 'y1'])\n model.add_subsystem('con_cmp2', om.ExecComp('con2 = y2 - 24.0'), promotes=['con2', 'y2'])\n\n model.linear_solver = om.DirectSolver()\n\n model.nonlinear_solver = om.NewtonSolver(solve_subsystems=False)\n\n prob.setup()\n\n prob.run_model()\n\n assert_near_equal(prob['y1'], 25.58830273, .00001)\n assert_near_equal(prob['y2'], 12.05848819, .00001)\n\n def test_feature_maxiter(self):\n import numpy as np\n\n import openmdao.api as om\n from openmdao.test_suite.components.sellar import SellarDis1withDerivatives, SellarDis2withDerivatives\n\n prob = om.Problem()\n model = prob.model\n\n model.add_subsystem('px', om.IndepVarComp('x', 1.0), promotes=['x'])\n model.add_subsystem('pz', om.IndepVarComp('z', np.array([5.0, 2.0])), promotes=['z'])\n\n model.add_subsystem('d1', SellarDis1withDerivatives(), promotes=['x', 'z', 'y1', 'y2'])\n model.add_subsystem('d2', SellarDis2withDerivatives(), promotes=['z', 'y1', 'y2'])\n\n model.add_subsystem('obj_cmp', om.ExecComp('obj = x**2 + z[1] + y1 + exp(-y2)',\n z=np.array([0.0, 0.0]), x=0.0),\n promotes=['obj', 'x', 'z', 'y1', 'y2'])\n\n model.add_subsystem('con_cmp1', om.ExecComp('con1 = 3.16 - y1'), promotes=['con1', 'y1'])\n model.add_subsystem('con_cmp2', om.ExecComp('con2 = y2 - 24.0'), promotes=['con2', 'y2'])\n\n model.linear_solver = om.DirectSolver()\n\n newton = model.nonlinear_solver = om.NewtonSolver(solve_subsystems=False)\n newton.options['maxiter'] = 2\n\n prob.setup()\n\n prob.run_model()\n\n assert_near_equal(prob['y1'], 25.5878516779, .00001)\n assert_near_equal(prob['y2'], 12.0607416105, .00001)\n\n def test_feature_rtol(self):\n import numpy as np\n\n import openmdao.api as om\n from openmdao.test_suite.components.sellar import SellarDis1withDerivatives, SellarDis2withDerivatives\n\n prob = om.Problem()\n model = prob.model\n\n model.add_subsystem('px', om.IndepVarComp('x', 1.0), promotes=['x'])\n model.add_subsystem('pz', om.IndepVarComp('z', np.array([5.0, 2.0])), promotes=['z'])\n\n model.add_subsystem('d1', SellarDis1withDerivatives(), promotes=['x', 'z', 'y1', 'y2'])\n model.add_subsystem('d2', SellarDis2withDerivatives(), promotes=['z', 'y1', 'y2'])\n\n model.add_subsystem('obj_cmp', om.ExecComp('obj = x**2 + z[1] + y1 + exp(-y2)',\n z=np.array([0.0, 0.0]), x=0.0),\n promotes=['obj', 'x', 'z', 'y1', 'y2'])\n\n model.add_subsystem('con_cmp1', om.ExecComp('con1 = 3.16 - y1'), promotes=['con1', 'y1'])\n model.add_subsystem('con_cmp2', om.ExecComp('con2 = y2 - 24.0'), promotes=['con2', 'y2'])\n\n model.linear_solver = om.DirectSolver()\n\n newton = model.nonlinear_solver = om.NewtonSolver(solve_subsystems=False)\n newton.options['rtol'] = 1e-3\n\n prob.setup()\n\n prob.run_model()\n\n assert_near_equal(prob['y1'], 25.5878516779, .00001)\n assert_near_equal(prob['y2'], 12.0607416105, .00001)\n\n def test_feature_atol(self):\n import numpy as np\n\n import openmdao.api as om\n from openmdao.test_suite.components.sellar import SellarDis1withDerivatives, SellarDis2withDerivatives\n\n prob = om.Problem()\n model = prob.model\n\n model.add_subsystem('px', om.IndepVarComp('x', 1.0), promotes=['x'])\n model.add_subsystem('pz', om.IndepVarComp('z', np.array([5.0, 2.0])), promotes=['z'])\n\n model.add_subsystem('d1', SellarDis1withDerivatives(), promotes=['x', 'z', 'y1', 'y2'])\n model.add_subsystem('d2', SellarDis2withDerivatives(), promotes=['z', 'y1', 'y2'])\n\n model.add_subsystem('obj_cmp', om.ExecComp('obj = x**2 + z[1] + y1 + exp(-y2)',\n z=np.array([0.0, 0.0]), x=0.0),\n promotes=['obj', 'x', 'z', 'y1', 'y2'])\n\n model.add_subsystem('con_cmp1', om.ExecComp('con1 = 3.16 - y1'), promotes=['con1', 'y1'])\n model.add_subsystem('con_cmp2', om.ExecComp('con2 = y2 - 24.0'), promotes=['con2', 'y2'])\n\n model.linear_solver = om.DirectSolver()\n\n newton = model.nonlinear_solver = om.NewtonSolver(solve_subsystems=False)\n newton.options['atol'] = 1e-4\n\n prob.setup()\n\n prob.run_model()\n\n assert_near_equal(prob['y1'], 25.5882856302, .00001)\n assert_near_equal(prob['y2'], 12.05848819, .00001)\n\n def test_feature_linear_solver(self):\n import numpy as np\n\n import openmdao.api as om\n from openmdao.test_suite.components.sellar import SellarDis1withDerivatives, \\\n SellarDis2withDerivatives\n\n prob = om.Problem()\n model = prob.model\n\n model.add_subsystem('px', om.IndepVarComp('x', 1.0), promotes=['x'])\n model.add_subsystem('pz', om.IndepVarComp('z', np.array([5.0, 2.0])), promotes=['z'])\n\n model.add_subsystem('d1', SellarDis1withDerivatives(), promotes=['x', 'z', 'y1', 'y2'])\n model.add_subsystem('d2', SellarDis2withDerivatives(), promotes=['z', 'y1', 'y2'])\n\n model.add_subsystem('obj_cmp', om.ExecComp('obj = x**2 + z[1] + y1 + exp(-y2)',\n z=np.array([0.0, 0.0]), x=0.0),\n promotes=['obj', 'x', 'z', 'y1', 'y2'])\n\n model.add_subsystem('con_cmp1', om.ExecComp('con1 = 3.16 - y1'), promotes=['con1', 'y1'])\n model.add_subsystem('con_cmp2', om.ExecComp('con2 = y2 - 24.0'), promotes=['con2', 'y2'])\n\n model.linear_solver = om.LinearBlockGS()\n\n newton = model.nonlinear_solver = om.NewtonSolver(solve_subsystems=False)\n\n newton.linear_solver = om.DirectSolver()\n\n prob.setup()\n\n prob.run_model()\n\n assert_near_equal(prob['y1'], 25.58830273, .00001)\n assert_near_equal(prob['y2'], 12.05848819, .00001)\n\n def test_feature_max_sub_solves(self):\n import numpy as np\n\n import openmdao.api as om\n from openmdao.test_suite.components.double_sellar import SubSellar\n\n prob = om.Problem()\n model = prob.model\n\n model.add_subsystem('g1', SubSellar())\n model.add_subsystem('g2', SubSellar())\n\n model.connect('g1.y2', 'g2.x')\n model.connect('g2.y2', 'g1.x')\n\n # Converge the outer loop with Gauss Seidel, with a looser tolerance.\n model.nonlinear_solver = om.NewtonSolver()\n model.linear_solver = om.DirectSolver()\n\n g1 = model.g1\n g1.nonlinear_solver = om.NewtonSolver(solve_subsystems=False)\n g1.nonlinear_solver.options['rtol'] = 1.0e-5\n g1.linear_solver = om.DirectSolver()\n\n g2 = model.g2\n g2.nonlinear_solver = om.NewtonSolver(solve_subsystems=False)\n g2.nonlinear_solver.options['rtol'] = 1.0e-5\n g2.linear_solver = om.DirectSolver()\n\n model.nonlinear_solver = om.NewtonSolver()\n model.linear_solver = om.ScipyKrylov()\n\n model.nonlinear_solver.options['solve_subsystems'] = True\n model.nonlinear_solver.options['max_sub_solves'] = 0\n\n prob.setup()\n prob.run_model()\n\n def test_feature_err_on_non_converge(self):\n import numpy as np\n\n import openmdao.api as om\n from openmdao.test_suite.components.sellar import SellarDis1withDerivatives, SellarDis2withDerivatives\n\n prob = om.Problem()\n model = prob.model\n\n model.add_subsystem('px', om.IndepVarComp('x', 1.0), promotes=['x'])\n model.add_subsystem('pz', om.IndepVarComp('z', np.array([5.0, 2.0])), promotes=['z'])\n\n model.add_subsystem('d1', SellarDis1withDerivatives(), promotes=['x', 'z', 'y1', 'y2'])\n model.add_subsystem('d2', SellarDis2withDerivatives(), promotes=['z', 'y1', 'y2'])\n\n model.add_subsystem('obj_cmp', om.ExecComp('obj = x**2 + z[1] + y1 + exp(-y2)',\n z=np.array([0.0, 0.0]), x=0.0),\n promotes=['obj', 'x', 'z', 'y1', 'y2'])\n\n model.add_subsystem('con_cmp1', om.ExecComp('con1 = 3.16 - y1'), promotes=['con1', 'y1'])\n model.add_subsystem('con_cmp2', om.ExecComp('con2 = y2 - 24.0'), promotes=['con2', 'y2'])\n\n model.linear_solver = om.DirectSolver()\n\n newton = model.nonlinear_solver = om.NewtonSolver(solve_subsystems=False)\n newton.options['maxiter'] = 1\n newton.options['err_on_non_converge'] = True\n\n prob.setup()\n\n try:\n prob.run_model()\n except om.AnalysisError:\n pass\n\n def test_solve_subsystems_basic(self):\n import openmdao.api as om\n from openmdao.test_suite.components.double_sellar import DoubleSellar\n\n prob = om.Problem(model=DoubleSellar())\n model = prob.model\n\n g1 = model.g1\n g1.nonlinear_solver = om.NewtonSolver(solve_subsystems=False)\n g1.nonlinear_solver.options['rtol'] = 1.0e-5\n g1.linear_solver = om.DirectSolver()\n\n g2 = model.g2\n g2.nonlinear_solver = om.NewtonSolver(solve_subsystems=False)\n g2.nonlinear_solver.options['rtol'] = 1.0e-5\n g2.linear_solver = om.DirectSolver()\n\n model.nonlinear_solver = om.NewtonSolver()\n model.linear_solver = om.ScipyKrylov()\n\n model.nonlinear_solver.options['solve_subsystems'] = True\n\n prob.setup()\n prob.run_model()\n\n assert_near_equal(prob['g1.y1'], 0.64, .00001)\n assert_near_equal(prob['g1.y2'], 0.80, .00001)\n assert_near_equal(prob['g2.y1'], 0.64, .00001)\n assert_near_equal(prob['g2.y2'], 0.80, .00001)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n"
] | [
[
"numpy.array",
"numpy.meshgrid"
],
[
"numpy.hstack",
"scipy.sparse.coo_matrix",
"numpy.nonzero",
"numpy.arange",
"numpy.cumsum",
"numpy.array",
"numpy.zeros"
],
[
"numpy.random.seed",
"numpy.arange",
"numpy.ones",
"numpy.array",
"numpy.zeros",
"numpy.sum",
"numpy.testing.assert_array_almost_equal"
],
[
"numpy.exp",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
pyensemble/wildwood | [
"b261cbd7d0b425b50647f719ab99c1d89f477d5c"
] | [
"plot_signals_weighted_depth.py"
] | [
"\nimport logging\nfrom matplotlib.cm import get_cmap\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nimport pandas as pd\n\nfrom wildwood.datasets import get_signal, make_regression\nfrom wildwood.forest import ForestRegressor\n\nfrom wildwood._binning import Binner\n\npd.set_option(\"display.max_columns\", 20)\npd.set_option(\"display.precision\", 2)\n\nlogging.basicConfig(\n level=logging.INFO, format=\"%(asctime)s %(message)s\", datefmt=\"%Y-%m-%d %H:%M:%S\"\n)\n\ncolormap = get_cmap(\"tab20\")\n\nn_samples_train = 5000\nn_samples_test = 1000\nrandom_state = 42\n\n\nnoise = 0.03\naggregation = True\nn_estimators = 100\n\nstep = 1 / noise ** 2\n\nsignal = \"heavisine\"\n\nX_train, y_train = make_regression(\n n_samples=n_samples_train, signal=signal, noise=noise, random_state=random_state\n)\nX_test = np.linspace(0, 1, num=n_samples_test)\n\n#\n# reg = ForestRegressor(\n# random_state=random_state,\n# aggregation=aggregation,\n# max_features=1,\n# n_estimators=n_estimators,\n# step=step,\n# )\n#\n# reg.fit(X_train.reshape(n_samples_train, 1), y_train)\n# y_pred = reg.predict(X_test.reshape(n_samples_test, 1))\n#\n# df = reg.get_nodes(0)\n\n# print(df)\n\n# exit(0)\n\nsignals = [\"heavisine\", \"bumps\", \"blocks\", \"doppler\"]\n\n\ndef plot_weighted_depth(signal):\n\n X_train, y_train = make_regression(\n n_samples=n_samples_train, signal=signal, noise=noise, random_state=random_state\n )\n X_train = X_train.reshape(-1, 1)\n X_test = np.linspace(0, 1, num=n_samples_test).reshape(-1, 1)\n\n binner = Binner().fit(X_train)\n X_test_binned = binner.transform(X_test)\n\n reg = ForestRegressor(\n random_state=random_state,\n aggregation=aggregation,\n n_estimators=n_estimators,\n step=step,\n )\n\n reg.fit(X_train, y_train)\n y_pred = reg.predict(X_test)\n weighted_depths = reg._weighted_depth(X_test.reshape(n_samples_test, 1))\n\n # print(\"weighted_depths.shape:\", weighted_depths.shape)\n\n # avg_weighted_depth = weighted_depths.mean(axis=0)\n\n fig, (ax1, ax2, ax3) = plt.subplots(nrows=3, ncols=1, sharex=True, figsize=(6, 5))\n\n plot_samples = ax1.plot(\n X_train, y_train, color=colormap.colors[1], lw=2, label=\"Samples\"\n )[0]\n plot_signal = ax1.plot(\n X_test_binned / 255,\n get_signal(X_test_binned / 255, signal),\n lw=2,\n color=colormap.colors[0],\n label=\"Signal\",\n )[0]\n plot_prediction = ax2.plot(\n X_test.ravel(), y_pred, lw=2, color=colormap.colors[2], label=\"Prediction\"\n )[0]\n # ax3.plot(\n # X_test,\n # weighted_depths[:, 1:],\n # lw=1,\n # color=colormap.colors[5],\n # alpha=0.2,\n # label=\"Weighted depths\",\n # )\n plot_weighted_depths = ax3.plot(\n X_test, weighted_depths.T, lw=1, color=colormap.colors[5], alpha=0.2\n )[0]\n\n plot_mean_weighted_depths = ax3.plot(\n X_test,\n weighted_depths.mean(axis=0),\n lw=2,\n color=colormap.colors[4],\n label=\"Mean weighted depth\",\n )[0]\n filename = \"weighted_depths_%s.pdf\" % signal\n fig.subplots_adjust(hspace=0.1)\n fig.legend(\n (\n plot_signal,\n plot_samples,\n plot_mean_weighted_depths,\n plot_weighted_depths,\n plot_prediction,\n ),\n (\n \"Signal\",\n \"Samples\",\n \"Average weighted depths\",\n \"Weighted depths\",\n \"Prediction\",\n ),\n fontsize=12,\n loc=\"upper center\",\n bbox_to_anchor=(0.5, 1.0),\n ncol=3,\n )\n # plt.savefig(filename)\n logging.info(\"Saved the decision functions in '%s'\" % filename)\n\n\nfor signal in signals:\n plot_weighted_depth(signal)\n\nplt.show()\n"
] | [
[
"numpy.linspace",
"matplotlib.pyplot.subplots",
"matplotlib.cm.get_cmap",
"pandas.set_option",
"matplotlib.pyplot.show"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
obkyrush/jax | [
"8662c5f660678b6320a1a8fc46e917e97c399b57"
] | [
"jax/_src/random.py"
] | [
"# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nfrom functools import partial\nfrom typing import Any, Optional, Sequence, Union\nimport warnings\n\nimport numpy as np\n\nfrom jax import lax\nfrom jax import core\nfrom jax import numpy as jnp\nfrom jax._src import dtypes\nfrom jax.core import NamedShape\nfrom jax._src.api import jit, vmap\nfrom jax._src.numpy.lax_numpy import _constant_like, _convert_and_clip_integer, _check_arraylike\nfrom jax.lib import xla_bridge\nfrom jax.lib import xla_client\nfrom jax.lib import cuda_prng\nfrom jax.numpy.linalg import cholesky, svd, eigh\nfrom jax.interpreters import ad\nfrom jax.interpreters import batching\nfrom jax.interpreters import xla\nfrom jax._src.util import prod\n\n\nArray = Any\nRealArray = Array\nIntegerArray = Array\n# TODO: Import or define these to match\n# https://github.com/numpy/numpy/blob/main/numpy/typing/_dtype_like.py.\nDTypeLikeInt = Any\nDTypeLikeFloat = Any\n\n\n_UINT_DTYPES = {8: jnp.uint8, 16: jnp.uint16, 32: jnp.uint32, 64: jnp.uint64}\n\n\ndef PRNGKey(seed: int) -> jnp.ndarray:\n \"\"\"Create a pseudo-random number generator (PRNG) key given an integer seed.\n\n Args:\n seed: a 64- or 32-bit integer used as the value of the key.\n\n Returns:\n A PRNG key, which is modeled as an array of shape (2,) and dtype uint32. The\n key is constructed from a 64-bit seed by effectively bit-casting to a pair\n of uint32 values (or from a 32-bit seed by first padding out with zeros).\n \"\"\"\n # Avoid overflowerror in X32 mode by first converting ints to int64.\n # This breaks JIT invariance of PRNGKey for large ints, but supports the\n # common use-case of instantiating PRNGKey with Python hashes in X32 mode.\n if isinstance(seed, int):\n seed_arr = jnp.asarray(np.int64(seed))\n else:\n seed_arr = jnp.asarray(seed)\n if seed_arr.shape:\n raise TypeError(f\"PRNGKey seed must be a scalar; got {seed!r}.\")\n if not np.issubdtype(seed_arr.dtype, np.integer):\n raise TypeError(f\"PRNGKey seed must be an integer; got {seed!r}\")\n\n convert = lambda k: lax.reshape(lax.convert_element_type(k, np.uint32), [1])\n k1 = convert(lax.shift_right_logical(seed_arr, lax._const(seed_arr, 32)))\n k2 = convert(jnp.bitwise_and(seed_arr, np.uint32(0xFFFFFFFF)))\n return lax.concatenate([k1, k2], 0)\n\ndef _is_prng_key(key: jnp.ndarray) -> bool:\n try:\n return key.shape == (2,) and key.dtype == np.uint32\n except AttributeError:\n return False\n\n\n### utilities\n\n\ndef _make_rotate_left(dtype):\n if not jnp.issubdtype(dtype, np.integer):\n raise TypeError(\"_rotate_left only accepts integer dtypes.\")\n nbits = np.array(jnp.iinfo(dtype).bits, dtype)\n\n def _rotate_left(x, d):\n if lax.dtype(d) != dtype:\n d = lax.convert_element_type(d, dtype)\n if lax.dtype(x) != dtype:\n x = lax.convert_element_type(x, dtype)\n return lax.shift_left(x, d) | lax.shift_right_logical(x, nbits - d)\n return _rotate_left\n\n\ndef _bit_stats(bits):\n \"\"\"This is a debugging function to compute the statistics of bit fields.\"\"\"\n return np.array([list(map(int, np.binary_repr(x, 64))) for x in bits]).mean(0)\n\n\n### hash function and split\n\ndef _threefry2x32_abstract_eval(*args):\n if any(a.dtype != jnp.uint32 for a in args):\n raise TypeError(\"Arguments to threefry2x32 must have uint32 type, got {}\"\n .format(args))\n if all(isinstance(arg, core.ShapedArray) for arg in args):\n shape = lax._broadcasting_shape_rule(*args)\n named_shape = core.join_named_shapes(*(a.named_shape for a in args))\n aval = core.ShapedArray(shape, jnp.dtype(jnp.uint32), named_shape=named_shape)\n else:\n aval = core.UnshapedArray(jnp.dtype(jnp.uint32))\n return (aval,) * 2\n\nrotate_left = _make_rotate_left(np.uint32)\n\ndef apply_round(v, rot):\n v = v[:]\n v[0] = v[0] + v[1]\n v[1] = rotate_left(v[1], rot)\n v[1] = v[0] ^ v[1]\n return v\n\ndef rotate_list(xs):\n return xs[1:] + xs[:1]\n\ndef rolled_loop_step(i, state):\n x, ks, rotations = state\n for r in rotations[0]:\n x = apply_round(x, r)\n new_x = [x[0] + ks[0], x[1] + ks[1] + jnp.asarray(i + 1, dtype=np.uint32)]\n return new_x, rotate_list(ks), rotate_list(rotations)\n\ndef _threefry2x32_lowering(key1, key2, x1, x2, use_rolled_loops=True):\n \"\"\"Apply the Threefry 2x32 hash.\n\n Args:\n keypair: a pair of 32bit unsigned integers used for the key.\n count: an array of dtype uint32 used for the counts.\n\n Returns:\n An array of dtype uint32 with the same shape as `count`.\n \"\"\"\n x = [x1, x2]\n\n rotations = [np.array([13, 15, 26, 6], dtype=np.uint32),\n np.array([17, 29, 16, 24], dtype=np.uint32)]\n ks = [key1, key2, key1 ^ key2 ^ np.uint32(0x1BD11BDA)]\n\n x[0] = x[0] + ks[0]\n x[1] = x[1] + ks[1]\n\n if use_rolled_loops:\n x, _, _ = lax.fori_loop(0, 5, rolled_loop_step, (x, rotate_list(ks), rotations))\n\n else:\n for r in rotations[0]:\n x = apply_round(x, r)\n x[0] = x[0] + ks[1]\n x[1] = x[1] + ks[2] + np.uint32(1)\n\n for r in rotations[1]:\n x = apply_round(x, r)\n x[0] = x[0] + ks[2]\n x[1] = x[1] + ks[0] + np.uint32(2)\n\n for r in rotations[0]:\n x = apply_round(x, r)\n x[0] = x[0] + ks[0]\n x[1] = x[1] + ks[1] + np.uint32(3)\n\n for r in rotations[1]:\n x = apply_round(x, r)\n x[0] = x[0] + ks[1]\n x[1] = x[1] + ks[2] + np.uint32(4)\n\n for r in rotations[0]:\n x = apply_round(x, r)\n x[0] = x[0] + ks[2]\n x[1] = x[1] + ks[0] + np.uint32(5)\n\n return tuple(x)\n\n\ndef _threefry2x32_gpu_translation_rule(c, k1, k2, x1, x2):\n shape = lax.broadcast_shapes(\n c.get_shape(k1).dimensions(), c.get_shape(k2).dimensions(),\n c.get_shape(x1).dimensions(), c.get_shape(x2).dimensions())\n rank = len(shape)\n if 0 in shape:\n zeros = xla_client.ops.Broadcast(\n xla_bridge.constant(c, np.array(0, np.uint32)), shape)\n return xla_client.ops.Tuple(c, [zeros, zeros])\n def _broadcast(x):\n ndims = c.get_shape(x).rank()\n return xla_client.ops.BroadcastInDim(x, shape,\n tuple(range(rank - ndims, rank)))\n return cuda_prng.threefry2x32(\n c, (_broadcast(k1), _broadcast(k2)), (_broadcast(x1), _broadcast(x2)))\n\nthreefry2x32_p = core.Primitive(\"threefry2x32\")\nthreefry2x32_p.multiple_results = True\nthreefry2x32_p.def_impl(partial(xla.apply_primitive, threefry2x32_p))\nthreefry2x32_p.def_abstract_eval(_threefry2x32_abstract_eval)\nbatching.defbroadcasting(threefry2x32_p)\nxla.translations_with_avals[threefry2x32_p] = xla.lower_fun(\n partial(_threefry2x32_lowering, use_rolled_loops=False),\n multiple_results=True, with_avals=True)\nxla.backend_specific_translations['cpu'][threefry2x32_p] = xla.lower_fun(\n partial(_threefry2x32_lowering, use_rolled_loops=True),\n multiple_results=True)\nif cuda_prng:\n xla.backend_specific_translations['gpu'][threefry2x32_p] = \\\n _threefry2x32_gpu_translation_rule\n\n@jit\ndef threefry_2x32(keypair, count):\n \"\"\"Apply the Threefry 2x32 hash.\n\n Args:\n keypair: a pair of 32bit unsigned integers used for the key.\n count: an array of dtype uint32 used for the counts.\n\n Returns:\n An array of dtype uint32 with the same shape as `count`.\n \"\"\"\n key1, key2 = keypair\n if not lax.dtype(key1) == lax.dtype(key2) == lax.dtype(count) == np.uint32:\n msg = \"threefry_2x32 requires uint32 arguments, got {}\"\n raise TypeError(msg.format([lax.dtype(x) for x in [key1, key2, count]]))\n\n odd_size = count.size % 2\n if odd_size:\n x = list(jnp.split(jnp.concatenate([count.ravel(), np.uint32([0])]), 2))\n else:\n x = list(jnp.split(count.ravel(), 2))\n\n x = threefry2x32_p.bind(key1, key2, x[0], x[1])\n out = jnp.concatenate(x)\n assert out.dtype == np.uint32\n return lax.reshape(out[:-1] if odd_size else out, count.shape)\n\n\ndef split(key: jnp.ndarray, num: int = 2) -> jnp.ndarray:\n \"\"\"Splits a PRNG key into `num` new keys by adding a leading axis.\n\n Args:\n key: a PRNGKey (an array with shape (2,) and dtype uint32).\n num: optional, a positive integer indicating the number of keys to produce\n (default 2).\n\n Returns:\n An array with shape (num, 2) and dtype uint32 representing `num` new keys.\n \"\"\"\n return _split(key, int(num)) # type: ignore\n\n@partial(jit, static_argnums=(1,))\ndef _split(key, num) -> jnp.ndarray:\n counts = lax.iota(np.uint32, num * 2)\n return lax.reshape(threefry_2x32(key, counts), (num, 2))\n\n\ndef fold_in(key: jnp.ndarray, data: int) -> jnp.ndarray:\n \"\"\"Folds in data to a PRNG key to form a new PRNG key.\n\n Args:\n key: a PRNGKey (an array with shape (2,) and dtype uint32).\n data: a 32bit integer representing data to be folded in to the key.\n\n Returns:\n A new PRNGKey that is a deterministic function of the inputs and is\n statistically safe for producing a stream of new pseudo-random values.\n \"\"\"\n return _fold_in(key, jnp.uint32(data))\n\n@jit\ndef _fold_in(key, data):\n return threefry_2x32(key, PRNGKey(data))\n\n\n@partial(jit, static_argnums=(1, 2))\ndef _random_bits(key, bit_width, shape):\n \"\"\"Sample uniform random bits of given width and shape using PRNG key.\"\"\"\n if not _is_prng_key(key):\n raise TypeError(\"_random_bits got invalid prng key.\")\n if bit_width not in (8, 16, 32, 64):\n raise TypeError(\"requires 8-, 16-, 32- or 64-bit field width.\")\n shape = core.as_named_shape(shape)\n for name, size in shape.named_items:\n real_size = lax.psum(1, name)\n if real_size != size:\n raise ValueError(f\"The shape of axis {name} was specified as {size}, \"\n f\"but it really is {real_size}\")\n axis_index = lax.axis_index(name)\n key = fold_in(key, axis_index)\n size = prod(shape.positional)\n max_count = int(np.ceil(bit_width * size / 32))\n\n nblocks, rem = divmod(max_count, jnp.iinfo(np.uint32).max)\n\n if not nblocks:\n bits = threefry_2x32(key, lax.iota(np.uint32, rem))\n else:\n keys = split(key, nblocks + 1)\n subkeys, last_key = keys[:-1], keys[-1]\n blocks = vmap(threefry_2x32, in_axes=(0, None))(subkeys, lax.iota(np.uint32, jnp.iinfo(np.uint32).max))\n last = threefry_2x32(last_key, lax.iota(np.uint32, rem))\n bits = lax.concatenate([blocks.ravel(), last], 0)\n\n dtype = _UINT_DTYPES[bit_width]\n if bit_width == 64:\n bits = [lax.convert_element_type(x, dtype) for x in jnp.split(bits, 2)]\n bits = lax.shift_left(bits[0], dtype(32)) | bits[1]\n elif bit_width in [8, 16]:\n # this is essentially bits.view(dtype)[:size]\n bits = lax.bitwise_and(\n np.uint32(np.iinfo(dtype).max),\n lax.shift_right_logical(\n lax.broadcast(bits, (1,)),\n lax.mul(\n np.uint32(bit_width),\n lax.broadcasted_iota(np.uint32, (32 // bit_width, 1), 0)\n )\n )\n )\n bits = lax.reshape(bits, (np.uint32(max_count * 32 // bit_width),), (1, 0))\n bits = lax.convert_element_type(bits, dtype)[:size]\n return lax.reshape(bits, shape)\n\n\n### random samplers\n\n\ndef _check_shape(name, shape: Union[Sequence[int], NamedShape], *param_shapes):\n shape = core.as_named_shape(shape)\n\n if param_shapes:\n shape_ = lax.broadcast_shapes(shape.positional, *param_shapes)\n if shape.positional != shape_:\n msg = (\"{} parameter shapes must be broadcast-compatible with shape \"\n \"argument, and the result of broadcasting the shapes must equal \"\n \"the shape argument, but got result {} for shape argument {}.\")\n raise ValueError(msg.format(name, shape_, shape))\n\n\ndef uniform(key: jnp.ndarray,\n shape: Union[Sequence[int], NamedShape] = (),\n dtype: DTypeLikeFloat = dtypes.float_,\n minval: RealArray = 0.,\n maxval: RealArray = 1.) -> jnp.ndarray:\n \"\"\"Sample uniform random values in [minval, maxval) with given shape/dtype.\n\n Args:\n key: a PRNGKey used as the random key.\n shape: optional, a tuple of nonnegative integers representing the result\n shape. Default ().\n dtype: optional, a float dtype for the returned values (default float64 if\n jax_enable_x64 is true, otherwise float32).\n minval: optional, a minimum (inclusive) value broadcast-compatible with shape for the range (default 0).\n maxval: optional, a maximum (exclusive) value broadcast-compatible with shape for the range (default 1).\n\n Returns:\n A random array with the specified shape and dtype.\n \"\"\"\n if not dtypes.issubdtype(dtype, np.floating):\n raise ValueError(f\"dtype argument to `uniform` must be a float dtype, \"\n f\"got {dtype}\")\n dtype = dtypes.canonicalize_dtype(dtype)\n shape = core.as_named_shape(shape)\n return _uniform(key, shape, dtype, minval, maxval) # type: ignore\n\n@partial(jit, static_argnums=(1, 2))\ndef _uniform(key, shape, dtype, minval, maxval) -> jnp.ndarray:\n _check_shape(\"uniform\", shape)\n if not jnp.issubdtype(dtype, np.floating):\n raise TypeError(\"uniform only accepts floating point dtypes.\")\n\n minval = lax.convert_element_type(minval, dtype)\n maxval = lax.convert_element_type(maxval, dtype)\n minval = lax.broadcast_to_rank(minval, shape.positional_rank)\n maxval = lax.broadcast_to_rank(maxval, shape.positional_rank)\n\n finfo = jnp.finfo(dtype)\n nbits, nmant = finfo.bits, finfo.nmant\n\n if nbits not in (16, 32, 64):\n raise TypeError(\"uniform only accepts 32- or 64-bit dtypes.\")\n\n bits = _random_bits(key, nbits, shape)\n\n # The strategy here is to randomize only the mantissa bits with an exponent of\n # 1 (after applying the bias), then shift and scale to the desired range. The\n # bit-level transformation we use relies on Numpy and XLA having bit-for-bit\n # equivalent float representations, which might not be true on all platforms.\n float_bits = lax.bitwise_or(\n lax.shift_right_logical(bits, np.array(nbits - nmant, lax.dtype(bits))),\n np.array(1., dtype).view(_UINT_DTYPES[nbits]))\n floats = lax.bitcast_convert_type(float_bits, dtype) - np.array(1., dtype)\n return lax.max(\n minval,\n lax.reshape(floats * (maxval - minval) + minval, shape.positional))\n\n\ndef randint(key: jnp.ndarray,\n shape: Sequence[int],\n minval: IntegerArray,\n maxval: IntegerArray,\n dtype: DTypeLikeInt = dtypes.int_):\n \"\"\"Sample uniform random values in [minval, maxval) with given shape/dtype.\n\n Args:\n key: a PRNGKey used as the random key.\n shape: a tuple of nonnegative integers representing the shape.\n minval: int or array of ints broadcast-compatible with ``shape``, a minimum\n (inclusive) value for the range.\n maxval: int or array of ints broadcast-compatible with ``shape``, a maximum\n (exclusive) value for the range.\n dtype: optional, an int dtype for the returned values (default int64 if\n jax_enable_x64 is true, otherwise int32).\n\n Returns:\n A random array with the specified shape and dtype.\n \"\"\"\n dtype = dtypes.canonicalize_dtype(dtype)\n shape = core.canonicalize_shape(shape)\n return _randint(key, shape, minval, maxval, dtype)\n\n@partial(jit, static_argnums=(1, 4))\ndef _randint(key, shape, minval, maxval, dtype):\n _check_shape(\"randint\", shape, np.shape(minval), np.shape(maxval))\n if not jnp.issubdtype(dtype, np.integer):\n raise TypeError(f\"randint only accepts integer dtypes, got {dtype}\")\n\n _check_arraylike(\"randint\", minval, maxval)\n minval = jnp.asarray(minval)\n maxval = jnp.asarray(maxval)\n if not jnp.issubdtype(minval.dtype, np.integer):\n minval = minval.astype(int)\n if not jnp.issubdtype(maxval.dtype, np.integer):\n maxval = maxval.astype(int)\n\n # Flag where maxval is greater than the maximum value of dtype\n # in order to handle cases like randint(key, shape, 0, 256, 'uint8')\n maxval_out_of_range = lax.gt(\n maxval, _convert_and_clip_integer(jnp.array(jnp.iinfo(dtype).max, dtype), maxval.dtype))\n\n minval = _convert_and_clip_integer(minval, dtype)\n maxval = _convert_and_clip_integer(maxval, dtype)\n minval = lax.broadcast_to_rank(minval, len(shape))\n maxval = lax.broadcast_to_rank(maxval, len(shape))\n nbits = jnp.iinfo(dtype).bits\n\n if nbits not in (8, 16, 32, 64):\n raise TypeError(f\"randint only accepts 8-, 16-, 32-, or 64-bit dtypes, got {dtype}\")\n\n # This algorithm is biased whenever (maxval - minval) is not a power of 2.\n # We generate double the number of random bits required by the dtype so as to\n # reduce that bias.\n k1, k2 = split(key)\n rbits = lambda key: _random_bits(key, nbits, shape)\n higher_bits, lower_bits = rbits(k1), rbits(k2)\n\n unsigned_dtype = _UINT_DTYPES[nbits]\n span = lax.convert_element_type(maxval - minval, unsigned_dtype)\n\n # Ensure that span=1 when maxval <= minval, so minval is always returned;\n # https://github.com/google/jax/issues/222\n span = lax.select(maxval <= minval, lax.full_like(span, 1), span)\n\n # When maxval is out of range, the span has to be one larger.\n # If span is already the maximum representable value, this will wrap to zero,\n # causing remainders below to have no effect, which is the correct semantics.\n span = lax.select(\n maxval_out_of_range & (maxval > minval),\n lax.add(span, lax._const(span, 1)),\n span)\n\n # To compute a remainder operation on an integer that might have twice as many\n # bits as we can represent in the native unsigned dtype, we compute a\n # multiplier equal to 2**nbits % span. To avoid overflow, we use the identity:\n # (a * b) % N = [(a % N) * (b % N)] % N\n multiplier = lax.rem(lax._const(span, 2 ** (nbits // 2)), span)\n multiplier = lax.rem(lax.mul(multiplier, multiplier), span)\n\n random_offset = lax.add(lax.mul(lax.rem(higher_bits, span), multiplier),\n lax.rem(lower_bits, span))\n random_offset = lax.rem(random_offset, span)\n return lax.add(minval, lax.convert_element_type(random_offset, dtype))\n\n\ndef shuffle(key: jnp.ndarray, x: Array, axis: int = 0) -> jnp.ndarray:\n \"\"\"Shuffle the elements of an array uniformly at random along an axis.\n\n Args:\n key: a PRNGKey used as the random key.\n x: the array to be shuffled.\n axis: optional, an int axis along which to shuffle (default 0).\n\n Returns:\n A shuffled version of x.\n \"\"\"\n msg = (\"jax.random.shuffle is deprecated and will be removed in a future release. \"\n \"Use jax.random.permutation\")\n warnings.warn(msg, FutureWarning)\n return _shuffle(key, x, axis) # type: ignore\n\n\ndef permutation(key: jnp.ndarray, x: Array) -> jnp.ndarray:\n \"\"\"\n Permute elements of an array along its first axis or return a permuted range.\n\n If `x` is a multi-dimensional array, it is only shuffled along its\n first index.\n\n Args:n\n key: a PRNGKey used as the random key.\n x: the array or integer range to be shuffled.\n\n Returns:\n A shuffled version of x or array range\n \"\"\"\n if not np.ndim(x):\n # scalar case, must be a concrete integer\n if not np.issubdtype(lax.dtype(x), np.integer):\n raise TypeError(\"x must be an integer or at least 1-dimensional\")\n x = int(x) # type: ignore[assignment]\n return _shuffle(key, jnp.arange(x), 0)\n elif np.ndim(x) == 1:\n return _shuffle(key, x, 0)\n else:\n assert isinstance(x, jnp.ndarray)\n ind = _shuffle(key, jnp.arange(x.shape[0]), 0) # type: ignore[attribute-error]\n return x[ind]\n\n\n@partial(jit, static_argnums=(2,))\ndef _shuffle(key, x, axis) -> jnp.ndarray:\n # On parallel architectures, Fisher-Yates is more expensive than doing\n # multiple sorts. This algorithm is based on one developed and analyzed by\n # tjablin@. We sort according to randomly-generated 32bit keys, but those keys\n # may have collisions. If we repeat the process, using fresh 32bit keys for\n # each sort, then whenever all pairs of elements have been assigned distinct\n # keys at some iteration (or equivalently when the strings formed by\n # concatenating the successive keys for each element are all distinct) then we\n # are guaranteed to have a perfect sample (assuming that either the sort is\n # stable or that any bias is not value-dependent). Since checking uniqueness\n # at runtime may be expensive, we use a heuristic static stop criterion\n # developed by tjablin@. See tensorflow/compiler/tf2xla/random_ops.cc for more\n # info, and for the original implementation of this algorithm. See also\n # Section 2 of http://people.csail.mit.edu/costis/6896sp11/lec5s.pdf for\n # another analysis (where the keys are generated one bit at a time).\n exponent = 3 # see tjablin@'s analysis for explanation of this parameter\n uint32max = jnp.iinfo(np.uint32).max\n num_rounds = int(np.ceil(exponent * np.log(max(1, x.size)) / np.log(uint32max)))\n\n for _ in range(num_rounds):\n key, subkey = split(key)\n sort_keys = _random_bits(subkey, 32, x.shape)\n _, x = lax.sort_key_val(sort_keys, x, axis)\n\n return x\n\n\ndef choice(key: jnp.ndarray,\n a: IntegerArray,\n shape: Sequence[int] = (),\n replace: bool = True,\n p=None) -> jnp.ndarray:\n \"\"\"Generates a random sample from a given 1-D array.\n\n Args:\n key: a PRNGKey used as the random key.\n a : 1D array or int. If an ndarray, a random sample is generated from\n its elements. If an int, the random sample is generated as if a were\n arange(a).\n shape : tuple of ints, optional. Output shape. If the given shape is,\n e.g., ``(m, n)``, then ``m * n`` samples are drawn. Default is (),\n in which case a single value is returned.\n replace : boolean. Whether the sample is with or without replacement.\n default is True.\n p : 1-D array-like, The probabilities associated with each entry in a.\n If not given the sample assumes a uniform distribution over all\n entries in a.\n\n Returns:\n An array of shape `shape` containing samples from `a`.\n \"\"\"\n if not isinstance(shape, Sequence):\n raise TypeError(\"shape argument of jax.random.choice must be a sequence, \"\n f\"got {shape}\")\n if np.ndim(a) not in [0, 1]:\n raise ValueError(\"a must be an integer or 1-dimensional\")\n _check_arraylike(\"choice\", a)\n if np.ndim(a) == 0:\n a = core.concrete_or_error(int, a, \"The error occurred in jax.random.choice()\")\n else:\n a = jnp.asarray(a)\n n_inputs = int(a) if np.ndim(a) == 0 else len(a) # type: ignore[arg-type]\n n_draws = prod(shape)\n if n_draws == 0:\n return jnp.zeros(shape, dtype=lax.dtype(a))\n if n_inputs <= 0:\n raise ValueError(\"a must be greater than 0 unless no samples are taken\")\n if not replace and n_draws > n_inputs:\n raise ValueError(\"Cannot take a larger sample than population when 'replace=False'\")\n\n if p is None:\n if replace:\n ind = randint(key, shape, 0, n_inputs)\n result = ind if np.ndim(a) == 0 else a[ind] # type: ignore[index]\n else:\n result = permutation(key, a)[:n_draws]\n else:\n if p.shape != (n_inputs,):\n raise ValueError(\"p must be None or match the shape of a\")\n if replace:\n p_cuml = jnp.cumsum(p)\n r = p_cuml[-1] * (1 - uniform(key, shape))\n ind = jnp.searchsorted(p_cuml, r)\n result = ind if np.ndim(a) == 0 else a[ind] # type: ignore[index]\n else:\n # Gumbel top-k trick: https://timvieira.github.io/blog/post/2019/09/16/algorithms-for-sampling-without-replacement/\n g = -gumbel(key, (n_inputs,)) - jnp.log(p)\n ind = jnp.argsort(g)[:n_draws]\n result = ind if np.ndim(a) == 0 else a[ind] # type: ignore[index]\n return result.reshape(shape)\n\n\ndef normal(key: jnp.ndarray,\n shape: Union[Sequence[int], NamedShape] = (),\n dtype: DTypeLikeFloat = dtypes.float_) -> jnp.ndarray:\n \"\"\"Sample standard normal random values with given shape and float dtype.\n\n Args:\n key: a PRNGKey used as the random key.\n shape: optional, a tuple of nonnegative integers representing the result\n shape. Default ().\n dtype: optional, a float dtype for the returned values (default float64 if\n jax_enable_x64 is true, otherwise float32).\n\n Returns:\n A random array with the specified shape and dtype.\n \"\"\"\n if not dtypes.issubdtype(dtype, np.inexact):\n raise ValueError(f\"dtype argument to `normal` must be a float or complex dtype, \"\n f\"got {dtype}\")\n dtype = dtypes.canonicalize_dtype(dtype)\n shape = core.as_named_shape(shape)\n return _normal(key, shape, dtype) # type: ignore\n\n@partial(jit, static_argnums=(1, 2))\ndef _normal(key, shape, dtype) -> jnp.ndarray:\n if dtypes.issubdtype(dtype, np.complexfloating):\n sqrt2 = np.array(np.sqrt(2), dtype)\n\n key_re, key_im = split(key)\n real_dtype = np.array(0, dtype).real.dtype\n _re = _normal_real(key_re, shape, real_dtype)\n _im = _normal_real(key_im, shape, real_dtype)\n return (_re + 1j * _im) / sqrt2\n else:\n return _normal_real(key, shape, dtype) # type: ignore\n\n@partial(jit, static_argnums=(1, 2))\ndef _normal_real(key, shape, dtype) -> jnp.ndarray:\n _check_shape(\"normal\", shape)\n lo = np.nextafter(np.array(-1., dtype), np.array(0., dtype), dtype=dtype)\n hi = np.array(1., dtype)\n u = uniform(key, shape, dtype, lo, hi) # type: ignore[arg-type]\n return np.array(np.sqrt(2), dtype) * lax.erf_inv(u)\n\n\ndef multivariate_normal(key: jnp.ndarray,\n mean: RealArray,\n cov: RealArray,\n shape: Optional[Sequence[int]] = None,\n dtype: DTypeLikeFloat = dtypes.float_,\n method: str = 'cholesky') -> jnp.ndarray:\n \"\"\"Sample multivariate normal random values with given mean and covariance.\n\n Args:\n key: a PRNGKey used as the random key.\n mean: a mean vector of shape ``(..., n)``.\n cov: a positive definite covariance matrix of shape ``(..., n, n)``. The\n batch shape ``...`` must be broadcast-compatible with that of ``mean``.\n shape: optional, a tuple of nonnegative integers specifying the result\n batch shape; that is, the prefix of the result shape excluding the last\n axis. Must be broadcast-compatible with ``mean.shape[:-1]`` and\n ``cov.shape[:-2]``. The default (None) produces a result batch shape by\n broadcasting together the batch shapes of ``mean`` and ``cov``.\n dtype: optional, a float dtype for the returned values (default float64 if\n jax_enable_x64 is true, otherwise float32).\n method: optional, a method to compute the factor of ``cov``.\n Must be one of 'svd', eigh, and 'cholesky'. Default 'cholesky'.\n Returns:\n A random array with the specified dtype and shape given by\n ``shape + mean.shape[-1:]`` if ``shape`` is not None, or else\n ``broadcast_shapes(mean.shape[:-1], cov.shape[:-2]) + mean.shape[-1:]``.\n \"\"\"\n if method not in {'svd', 'eigh', 'cholesky'}:\n raise ValueError(\"method must be one of {'svd', 'eigh', 'cholesky'}\")\n if not dtypes.issubdtype(dtype, np.floating):\n raise ValueError(f\"dtype argument to `multivariate_normal` must be a float \"\n f\"dtype, got {dtype}\")\n dtype = dtypes.canonicalize_dtype(dtype)\n if shape is not None:\n shape = core.canonicalize_shape(shape)\n return _multivariate_normal(key, mean, cov, shape, dtype, method) # type: ignore\n\n@partial(jit, static_argnums=(3, 4, 5))\ndef _multivariate_normal(key, mean, cov, shape, dtype, method) -> jnp.ndarray:\n if not np.ndim(mean) >= 1:\n msg = \"multivariate_normal requires mean.ndim >= 1, got mean.ndim == {}\"\n raise ValueError(msg.format(np.ndim(mean)))\n if not np.ndim(cov) >= 2:\n msg = \"multivariate_normal requires cov.ndim >= 2, got cov.ndim == {}\"\n raise ValueError(msg.format(np.ndim(cov)))\n n = mean.shape[-1]\n if np.shape(cov)[-2:] != (n, n):\n msg = (\"multivariate_normal requires cov.shape == (..., n, n) for n={n}, \"\n \"but got cov.shape == {shape}.\")\n raise ValueError(msg.format(n=n, shape=np.shape(cov)))\n\n if shape is None:\n shape = lax.broadcast_shapes(mean.shape[:-1], cov.shape[:-2])\n else:\n _check_shape(\"normal\", shape, mean.shape[:-1], cov.shape[:-2])\n\n if method == 'svd':\n (u, s, _) = svd(cov)\n factor = u * jnp.sqrt(s)\n elif method == 'eigh':\n (w, v) = eigh(cov)\n factor = v * jnp.sqrt(w)\n else: # 'cholesky'\n factor = cholesky(cov)\n normal_samples = normal(key, shape + mean.shape[-1:], dtype)\n return mean + jnp.einsum('...ij,...j->...i', factor, normal_samples)\n\n\ndef truncated_normal(key: jnp.ndarray,\n lower: RealArray,\n upper: RealArray,\n shape: Optional[Union[Sequence[int], NamedShape]] = None,\n dtype: DTypeLikeFloat = dtypes.float_) -> jnp.ndarray:\n \"\"\"Sample truncated standard normal random values with given shape and dtype.\n\n Args:\n key: a PRNGKey used as the random key.\n lower: a float or array of floats representing the lower bound for\n truncation. Must be broadcast-compatible with ``upper``.\n upper: a float or array of floats representing the upper bound for\n truncation. Must be broadcast-compatible with ``lower``.\n shape: optional, a tuple of nonnegative integers specifying the result\n shape. Must be broadcast-compatible with ``lower`` and ``upper``. The\n default (None) produces a result shape by broadcasting ``lower`` and\n ``upper``.\n dtype: optional, a float dtype for the returned values (default float64 if\n jax_enable_x64 is true, otherwise float32).\n\n Returns:\n A random array with the specified dtype and shape given by ``shape`` if\n ``shape`` is not None, or else by broadcasting ``lower`` and ``upper``.\n Returns values in the open interval ``(lower, upper)``.\n \"\"\"\n if not dtypes.issubdtype(dtype, np.floating):\n raise ValueError(f\"dtype argument to `truncated_normal` must be a float \"\n f\"dtype, got {dtype}\")\n dtype = dtypes.canonicalize_dtype(dtype)\n if shape is not None:\n shape = core.as_named_shape(shape)\n return _truncated_normal(key, lower, upper, shape, dtype) # type: ignore\n\n@partial(jit, static_argnums=(3, 4))\ndef _truncated_normal(key, lower, upper, shape, dtype) -> jnp.ndarray:\n if shape is None:\n shape = lax.broadcast_shapes(np.shape(lower), np.shape(upper))\n else:\n _check_shape(\"truncated_normal\", shape, np.shape(lower), np.shape(upper))\n\n sqrt2 = np.array(np.sqrt(2), dtype)\n lower = lax.convert_element_type(lower, dtype)\n upper = lax.convert_element_type(upper, dtype)\n a = lax.erf(lower / sqrt2)\n b = lax.erf(upper / sqrt2)\n if not jnp.issubdtype(dtype, np.floating):\n raise TypeError(\"truncated_normal only accepts floating point dtypes.\")\n u = uniform(key, shape, dtype, minval=a, maxval=b)\n out = sqrt2 * lax.erf_inv(u)\n # Clamp the value to the open interval (lower, upper) to make sure that\n # rounding (or if we chose `a` for `u`) doesn't push us outside of the range.\n return jnp.clip(\n out,\n lax.nextafter(lax.stop_gradient(lower), np.array(np.inf, dtype=dtype)),\n lax.nextafter(lax.stop_gradient(upper), np.array(-np.inf, dtype=dtype)))\n\n\ndef bernoulli(key: jnp.ndarray,\n p: RealArray = np.float32(0.5),\n shape: Optional[Union[Sequence[int], NamedShape]] = None) -> jnp.ndarray:\n \"\"\"Sample Bernoulli random values with given shape and mean.\n\n Args:\n key: a PRNGKey used as the random key.\n p: optional, a float or array of floats for the mean of the random\n variables. Must be broadcast-compatible with ``shape``. Default 0.5.\n shape: optional, a tuple of nonnegative integers representing the result\n shape. Must be broadcast-compatible with ``p.shape``. The default (None)\n produces a result shape equal to ``p.shape``.\n\n Returns:\n A random array with boolean dtype and shape given by ``shape`` if ``shape``\n is not None, or else ``p.shape``.\n \"\"\"\n dtype = dtypes.canonicalize_dtype(lax.dtype(p))\n if shape is not None:\n shape = core.as_named_shape(shape)\n if not jnp.issubdtype(dtype, np.floating):\n msg = \"bernoulli probability `p` must have a floating dtype, got {}.\"\n raise TypeError(msg.format(dtype))\n p = lax.convert_element_type(p, dtype)\n return _bernoulli(key, p, shape) # type: ignore\n\n@partial(jit, static_argnums=(2,))\ndef _bernoulli(key, p, shape) -> jnp.ndarray:\n if shape is None:\n # TODO: Use the named part of `p` as well\n shape = np.shape(p)\n else:\n _check_shape(\"bernoulli\", shape, np.shape(p))\n\n return uniform(key, shape, lax.dtype(p)) < p\n\n\ndef beta(key: jnp.ndarray,\n a: RealArray,\n b: RealArray,\n shape: Optional[Sequence[int]] = None,\n dtype: DTypeLikeFloat = dtypes.float_) -> jnp.ndarray:\n \"\"\"Sample Beta random values with given shape and float dtype.\n\n Args:\n key: a PRNGKey used as the random key.\n a: a float or array of floats broadcast-compatible with ``shape``\n representing the first parameter \"alpha\".\n b: a float or array of floats broadcast-compatible with ``shape``\n representing the second parameter \"beta\".\n shape: optional, a tuple of nonnegative integers specifying the result\n shape. Must be broadcast-compatible with ``a`` and ``b``. The default\n (None) produces a result shape by broadcasting ``a`` and ``b``.\n dtype: optional, a float dtype for the returned values (default float64 if\n jax_enable_x64 is true, otherwise float32).\n\n Returns:\n A random array with the specified dtype and shape given by ``shape`` if\n ``shape`` is not None, or else by broadcasting ``a`` and ``b``.\n \"\"\"\n if not dtypes.issubdtype(dtype, np.floating):\n raise ValueError(f\"dtype argument to `beta` must be a float \"\n f\"dtype, got {dtype}\")\n dtype = dtypes.canonicalize_dtype(dtype)\n if shape is not None:\n shape = core.canonicalize_shape(shape)\n return _beta(key, a, b, shape, dtype)\n\ndef _beta(key, a, b, shape, dtype):\n if shape is None:\n shape = lax.broadcast_shapes(np.shape(a), np.shape(b))\n else:\n _check_shape(\"beta\", shape, np.shape(a), np.shape(b))\n\n a = lax.convert_element_type(a, dtype)\n b = lax.convert_element_type(b, dtype)\n key_a, key_b = split(key)\n a = jnp.broadcast_to(a, shape)\n b = jnp.broadcast_to(b, shape)\n gamma_a = gamma(key_a, a, shape, dtype)\n gamma_b = gamma(key_b, b, shape, dtype)\n return gamma_a / (gamma_a + gamma_b)\n\n\ndef cauchy(key: jnp.ndarray,\n shape: Sequence[int] = (),\n dtype: DTypeLikeFloat = dtypes.float_) -> jnp.ndarray:\n \"\"\"Sample Cauchy random values with given shape and float dtype.\n\n Args:\n key: a PRNGKey used as the random key.\n shape: optional, a tuple of nonnegative integers representing the result\n shape. Default ().\n dtype: optional, a float dtype for the returned values (default float64 if\n jax_enable_x64 is true, otherwise float32).\n\n Returns:\n A random array with the specified shape and dtype.\n \"\"\"\n if not dtypes.issubdtype(dtype, np.floating):\n raise ValueError(f\"dtype argument to `cauchy` must be a float \"\n f\"dtype, got {dtype}\")\n dtype = dtypes.canonicalize_dtype(dtype)\n shape = core.canonicalize_shape(shape)\n return _cauchy(key, shape, dtype)\n\n@partial(jit, static_argnums=(1, 2))\ndef _cauchy(key, shape, dtype):\n _check_shape(\"cauchy\", shape)\n u = uniform(key, shape, dtype, minval=jnp.finfo(dtype).eps, maxval=1.)\n pi = _constant_like(u, np.pi)\n return lax.tan(lax.mul(pi, lax.sub(u, _constant_like(u, 0.5))))\n\n\ndef dirichlet(key: jnp.ndarray,\n alpha: RealArray,\n shape: Optional[Sequence[int]] = None,\n dtype: DTypeLikeFloat = dtypes.float_) -> jnp.ndarray:\n \"\"\"Sample Dirichlet random values with given shape and float dtype.\n\n Args:\n key: a PRNGKey used as the random key.\n alpha: an array of shape ``(..., n)`` used as the concentration\n parameter of the random variables.\n shape: optional, a tuple of nonnegative integers specifying the result\n batch shape; that is, the prefix of the result shape excluding the last\n element of value ``n``. Must be broadcast-compatible with\n ``alpha.shape[:-1]``. The default (None) produces a result shape equal to\n ``alpha.shape``.\n dtype: optional, a float dtype for the returned values (default float64 if\n jax_enable_x64 is true, otherwise float32).\n\n Returns:\n A random array with the specified dtype and shape given by\n ``shape + (alpha.shape[-1],)`` if ``shape`` is not None, or else\n ``alpha.shape``.\n \"\"\"\n if not dtypes.issubdtype(dtype, np.floating):\n raise ValueError(f\"dtype argument to `dirichlet` must be a float \"\n f\"dtype, got {dtype}\")\n dtype = dtypes.canonicalize_dtype(dtype)\n if shape is not None:\n shape = core.canonicalize_shape(shape)\n return _dirichlet(key, alpha, shape, dtype)\n\n@partial(jit, static_argnums=(2, 3))\ndef _dirichlet(key, alpha, shape, dtype):\n if not np.ndim(alpha) >= 1:\n msg = \"dirichlet requires alpha.ndim >= 1, got alpha.ndim == {}\"\n raise ValueError(msg.format(np.ndim(alpha)))\n\n if shape is None:\n shape = np.shape(alpha)[:-1]\n else:\n _check_shape(\"dirichlet\", shape, np.shape(alpha)[:-1])\n\n alpha = lax.convert_element_type(alpha, dtype)\n gamma_samples = gamma(key, alpha, shape + np.shape(alpha)[-1:], dtype)\n return gamma_samples / jnp.sum(gamma_samples, axis=-1, keepdims=True)\n\n\ndef exponential(key: jnp.ndarray,\n shape: Sequence[int] = (),\n dtype: DTypeLikeFloat = dtypes.float_) -> jnp.ndarray:\n \"\"\"Sample Exponential random values with given shape and float dtype.\n\n Args:\n key: a PRNGKey used as the random key.\n shape: optional, a tuple of nonnegative integers representing the result\n shape. Default ().\n dtype: optional, a float dtype for the returned values (default float64 if\n jax_enable_x64 is true, otherwise float32).\n\n Returns:\n A random array with the specified shape and dtype.\n \"\"\"\n if not dtypes.issubdtype(dtype, np.floating):\n raise ValueError(f\"dtype argument to `exponential` must be a float \"\n f\"dtype, got {dtype}\")\n dtype = dtypes.canonicalize_dtype(dtype)\n shape = core.canonicalize_shape(shape)\n return _exponential(key, shape, dtype)\n\n@partial(jit, static_argnums=(1, 2))\ndef _exponential(key, shape, dtype):\n _check_shape(\"exponential\", shape)\n u = uniform(key, shape, dtype)\n # taking 1 - u to move the domain of log to (0, 1] instead of [0, 1)\n return lax.neg(lax.log1p(lax.neg(u)))\n\n\ndef _gamma_one(key, alpha):\n # Ref: A simple method for generating gamma variables, George Marsaglia and Wai Wan Tsang\n # The algorithm can also be founded in:\n # https://en.wikipedia.org/wiki/Gamma_distribution#Generating_gamma-distributed_random_variables\n zero = _constant_like(alpha, 0)\n one = _constant_like(alpha, 1)\n minus_one = _constant_like(alpha, -1)\n one_over_two = _constant_like(alpha, 0.5)\n one_over_three = _constant_like(alpha, 1. / 3.)\n squeeze_const = _constant_like(alpha, 0.0331)\n dtype = lax.dtype(alpha)\n\n key, subkey = split(key)\n # for alpha < 1, we boost alpha to alpha + 1 and get a sample according to\n # Gamma(alpha) ~ Gamma(alpha+1) * Uniform()^(1 / alpha)\n boost = lax.select(lax.ge(alpha, one),\n one,\n lax.pow(uniform(subkey, (), dtype=dtype), lax.div(one, alpha)))\n alpha = lax.select(lax.ge(alpha, one), alpha, lax.add(alpha, one))\n\n d = lax.sub(alpha, one_over_three)\n c = lax.div(one_over_three, lax.sqrt(d))\n\n def _cond_fn(kXVU):\n _, X, V, U = kXVU\n # TODO: use lax.cond when its batching rule is supported\n # The reason is to avoid evaluating second condition which involves log+log\n # if the first condition is satisfied\n cond = lax.bitwise_and(lax.ge(U, lax.sub(one, lax.mul(squeeze_const, lax.mul(X, X)))),\n lax.ge(lax.log(U), lax.add(lax.mul(X, one_over_two),\n lax.mul(d, lax.add(lax.sub(one, V),\n lax.log(V))))))\n return cond\n\n def _body_fn(kXVU):\n def _next_kxv(kxv):\n key = kxv[0]\n key, subkey = split(key)\n x = normal(subkey, (), dtype=dtype)\n v = lax.add(one, lax.mul(x, c))\n return key, x, v\n\n key = kXVU[0]\n key, x_key, U_key = split(key, 3)\n _, x, v = lax.while_loop(lambda kxv: lax.le(kxv[2], zero), _next_kxv, (x_key, zero, minus_one))\n X = lax.mul(x, x)\n V = lax.mul(lax.mul(v, v), v)\n U = uniform(U_key, (), dtype=dtype)\n return key, X, V, U\n\n # initial state is chosen such that _cond_fn will return True\n _, _, V, _ = lax.while_loop(_cond_fn, _body_fn, (key, zero, one, _constant_like(alpha, 2)))\n z = lax.mul(lax.mul(d, V), boost)\n return lax.select(lax.eq(z, zero), jnp.finfo(z.dtype).tiny, z)\n\n\ndef _gamma_grad(sample, a):\n samples = jnp.reshape(sample, -1)\n alphas = jnp.reshape(a, -1)\n if xla_bridge.get_backend().platform == 'cpu':\n grads = lax.map(lambda args: lax.random_gamma_grad(*args), (alphas, samples))\n else:\n grads = vmap(lax.random_gamma_grad)(alphas, samples)\n return grads.reshape(np.shape(a))\n\ndef _gamma_impl(key, a, use_vmap=False):\n a_shape = jnp.shape(a)\n # split key to match the shape of a\n key_ndim = jnp.ndim(key) - 1\n key = jnp.reshape(key, (-1, 2))\n key = vmap(split, in_axes=(0, None))(key, prod(a_shape[key_ndim:]))\n keys = jnp.reshape(key, (-1, 2))\n alphas = jnp.reshape(a, -1)\n if use_vmap:\n samples = vmap(_gamma_one)(keys, alphas)\n else:\n samples = lax.map(lambda args: _gamma_one(*args), (keys, alphas))\n\n return jnp.reshape(samples, a_shape)\n\ndef _gamma_batching_rule(batched_args, batch_dims):\n k, a = batched_args\n bk, ba = batch_dims\n size = next(t.shape[i] for t, i in zip(batched_args, batch_dims) if i is not None)\n k = batching.bdim_at_front(k, bk, size)\n a = batching.bdim_at_front(a, ba, size)\n return random_gamma_p.bind(k, a), 0\n\nrandom_gamma_p = core.Primitive('random_gamma')\nrandom_gamma_p.def_impl(_gamma_impl)\nrandom_gamma_p.def_abstract_eval(lambda key, a: core.raise_to_shaped(a))\nad.defjvp2(random_gamma_p, None, lambda tangent, ans, key, a: tangent * _gamma_grad(ans, a))\nxla.translations_with_avals[random_gamma_p] = xla.lower_fun(\n partial(_gamma_impl, use_vmap=True),\n multiple_results=False, with_avals=True)\nxla.backend_specific_translations['cpu'][random_gamma_p] = xla.lower_fun(\n partial(_gamma_impl, use_vmap=False),\n multiple_results=False)\nbatching.primitive_batchers[random_gamma_p] = _gamma_batching_rule\n\ndef gamma(key: jnp.ndarray,\n a: RealArray,\n shape: Optional[Sequence[int]] = None,\n dtype: DTypeLikeFloat = dtypes.float_) -> jnp.ndarray:\n \"\"\"Sample Gamma random values with given shape and float dtype.\n\n Args:\n key: a PRNGKey used as the random key.\n a: a float or array of floats broadcast-compatible with ``shape``\n representing the parameter of the distribution.\n shape: optional, a tuple of nonnegative integers specifying the result\n shape. Must be broadcast-compatible with ``a``. The default (None)\n produces a result shape equal to ``a.shape``.\n dtype: optional, a float dtype for the returned values (default float64 if\n jax_enable_x64 is true, otherwise float32).\n\n Returns:\n A random array with the specified dtype and with shape given by ``shape`` if\n ``shape`` is not None, or else by ``a.shape``.\n \"\"\"\n if not dtypes.issubdtype(dtype, np.floating):\n raise ValueError(f\"dtype argument to `gamma` must be a float \"\n f\"dtype, got {dtype}\")\n dtype = dtypes.canonicalize_dtype(dtype)\n if shape is not None:\n shape = core.canonicalize_shape(shape)\n return _gamma(key, a, shape, dtype)\n\n@partial(jit, static_argnums=(2, 3))\ndef _gamma(key, a, shape, dtype):\n if shape is None:\n shape = np.shape(a)\n else:\n _check_shape(\"gamma\", shape, np.shape(a))\n\n a = lax.convert_element_type(a, dtype)\n if np.shape(a) != shape:\n a = jnp.broadcast_to(a, shape)\n return random_gamma_p.bind(key, a)\n\n\n@partial(jit, static_argnums=(2, 3, 4))\ndef _poisson_knuth(key, lam, shape, dtype, max_iters):\n # Knuth's algorithm for generating Poisson random variates.\n # Reference:\n # https://en.wikipedia.org/wiki/Poisson_distribution#Generating_Poisson-distributed_random_variables\n\n def body_fn(carry):\n i, k, rng, log_prod = carry\n rng, subkey = split(rng)\n k = lax.select(log_prod > -lam, k + 1, k)\n u = uniform(subkey, shape, np.float32)\n return i + 1, k, rng, log_prod + jnp.log(u)\n\n def cond_fn(carry):\n i, log_prod = carry[0], carry[3]\n return (log_prod > -lam).any() & (i < max_iters)\n\n k_init = lax.full_like(lam, 0, dtype, shape)\n log_rate_init = lax.full_like(lam, 0, np.float32, shape)\n k = lax.while_loop(cond_fn, body_fn, (0, k_init, key, log_rate_init))[1]\n return (k - 1).astype(dtype)\n\n\n@partial(jit, static_argnums=(2, 3, 4))\ndef _poisson_rejection(key, lam, shape, dtype, max_iters):\n # Transformed rejection due to Hormann.\n # Reference:\n # http://citeseer.ist.psu.edu/viewdoc/citations;jsessionid=1BEB35946CC807879F55D42512E5490C?doi=10.1.1.48.3054.\n log_lam = lax.log(lam)\n b = 0.931 + 2.53 * lax.sqrt(lam)\n a = -0.059 + 0.02483 * b\n inv_alpha = 1.1239 + 1.1328 / (b - 3.4)\n v_r = 0.9277 - 3.6224 / (b - 2)\n\n def body_fn(carry):\n i, k_out, accepted, key = carry\n key, subkey_0, subkey_1 = split(key, 3)\n\n u = uniform(subkey_0, shape, lam.dtype) - 0.5\n v = uniform(subkey_1, shape, lam.dtype)\n u_shifted = 0.5 - abs(u)\n\n k = lax.floor((2 * a / u_shifted + b) * u + lam + 0.43)\n s = lax.log(v * inv_alpha / (a / (u_shifted * u_shifted) + b))\n t = -lam + k * log_lam - lax.lgamma(k + 1)\n\n accept1 = (u_shifted >= 0.07) & (v <= v_r)\n reject = (k < 0) | ((u_shifted < 0.013) & (v > u_shifted))\n accept2 = s <= t\n accept = accept1 | (~reject & accept2)\n\n k_out = lax.select(accept, k, k_out)\n accepted |= accept\n\n return i + 1, k_out, accepted, key\n\n def cond_fn(carry):\n i, k_out, accepted, key = carry\n return (~accepted).any() & (i < max_iters)\n\n k_init = lax.full_like(lam, -1, lam.dtype, shape)\n accepted = lax.full_like(lam, False, jnp.bool_, shape)\n k = lax.while_loop(cond_fn, body_fn, (0, k_init, accepted, key))[1]\n return k.astype(dtype)\n\n\n@partial(jit, static_argnums=(2, 3))\ndef _poisson(key, lam, shape, dtype):\n # The implementation matches TensorFlow and NumPy:\n # https://github.com/tensorflow/tensorflow/blob/v2.2.0-rc3/tensorflow/core/kernels/random_poisson_op.cc\n # https://github.com/numpy/numpy/blob/v1.18.3/numpy/random/src/distributions/distributions.c#L574\n # For lambda < 10, we use the Knuth algorithm; otherwise, we use transformed\n # rejection sampling.\n use_knuth = lam < 10\n lam_knuth = lax.select(use_knuth, lam, lax.full_like(lam, 0.0))\n # The acceptance probability for rejection sampling maxes out at 89% as\n # λ -> ∞, so pick some arbitrary large value.\n lam_rejection = lax.select(use_knuth, lax.full_like(lam, 1e5), lam)\n max_iters = dtype.type(jnp.iinfo(dtype).max) # insanely conservative\n result = lax.select(\n use_knuth,\n _poisson_knuth(key, lam_knuth, shape, dtype, max_iters),\n _poisson_rejection(key, lam_rejection, shape, dtype, max_iters),\n )\n return lax.select(lam == 0, jnp.zeros_like(result), result)\n\n\ndef poisson(key: jnp.ndarray,\n lam: RealArray,\n shape: Sequence[int] = (),\n dtype: DTypeLikeInt = dtypes.int_) -> jnp.ndarray:\n \"\"\"Sample Poisson random values with given shape and integer dtype.\n\n Args:\n key: a PRNGKey used as the random key.\n lam: rate parameter (mean of the distribution), must be >= 0.\n shape: optional, a tuple of nonnegative integers representing the result\n shape. Default ().\n dtype: optional, a integer dtype for the returned values (default int64 if\n jax_enable_x64 is true, otherwise int32).\n\n Returns:\n A random array with the specified shape and dtype.\n \"\"\"\n dtype = dtypes.canonicalize_dtype(dtype)\n shape = core.canonicalize_shape(shape)\n if np.shape(lam) != shape:\n lam = jnp.broadcast_to(lam, shape)\n lam = lax.convert_element_type(lam, np.float32)\n return _poisson(key, lam, shape, dtype)\n\n\ndef gumbel(key: jnp.ndarray,\n shape: Sequence[int] = (),\n dtype: DTypeLikeFloat = dtypes.float_) -> jnp.ndarray:\n \"\"\"Sample Gumbel random values with given shape and float dtype.\n\n Args:\n key: a PRNGKey used as the random key.\n shape: optional, a tuple of nonnegative integers representing the result\n shape. Default ().\n dtype: optional, a float dtype for the returned values (default float64 if\n jax_enable_x64 is true, otherwise float32).\n\n Returns:\n A random array with the specified shape and dtype.\n \"\"\"\n if not dtypes.issubdtype(dtype, np.floating):\n raise ValueError(f\"dtype argument to `gumbel` must be a float \"\n f\"dtype, got {dtype}\")\n dtype = dtypes.canonicalize_dtype(dtype)\n shape = core.canonicalize_shape(shape)\n return _gumbel(key, shape, dtype)\n\n@partial(jit, static_argnums=(1, 2))\ndef _gumbel(key, shape, dtype):\n _check_shape(\"gumbel\", shape)\n return -jnp.log(-jnp.log(\n uniform(key, shape, dtype, minval=jnp.finfo(dtype).tiny, maxval=1.)))\n\n\ndef categorical(key: jnp.ndarray,\n logits: RealArray,\n axis: int = -1,\n shape: Optional[Sequence[int]] = None) -> jnp.ndarray:\n \"\"\"Sample random values from categorical distributions.\n\n Args:\n key: a PRNGKey used as the random key.\n logits: Unnormalized log probabilities of the categorical distribution(s) to sample from,\n so that `softmax(logits, axis)` gives the corresponding probabilities.\n axis: Axis along which logits belong to the same categorical distribution.\n shape: Optional, a tuple of nonnegative integers representing the result shape.\n Must be broadcast-compatible with ``np.delete(logits.shape, axis)``.\n The default (None) produces a result shape equal to ``np.delete(logits.shape, axis)``.\n\n Returns:\n A random array with int dtype and shape given by ``shape`` if ``shape``\n is not None, or else ``np.delete(logits.shape, axis)``.\n \"\"\"\n\n if axis >= 0:\n axis -= len(logits.shape)\n\n batch_shape = tuple(np.delete(logits.shape, axis))\n if shape is None:\n shape = batch_shape\n else:\n shape = tuple(shape)\n _check_shape(\"categorical\", shape, batch_shape)\n\n sample_shape = shape[:len(shape)-len(batch_shape)]\n return jnp.argmax(\n gumbel(key, sample_shape + logits.shape, logits.dtype) +\n lax.expand_dims(logits, tuple(range(len(sample_shape)))),\n axis=axis)\n\n\ndef laplace(key: jnp.ndarray,\n shape: Sequence[int] = (),\n dtype: DTypeLikeFloat = dtypes.float_) -> jnp.ndarray:\n \"\"\"Sample Laplace random values with given shape and float dtype.\n\n Args:\n key: a PRNGKey used as the random key.\n shape: optional, a tuple of nonnegative integers representing the result\n shape. Default ().\n dtype: optional, a float dtype for the returned values (default float64 if\n jax_enable_x64 is true, otherwise float32).\n\n Returns:\n A random array with the specified shape and dtype.\n \"\"\"\n if not dtypes.issubdtype(dtype, np.floating):\n raise ValueError(f\"dtype argument to `laplace` must be a float \"\n f\"dtype, got {dtype}\")\n dtype = dtypes.canonicalize_dtype(dtype)\n shape = core.canonicalize_shape(shape)\n return _laplace(key, shape, dtype)\n\n@partial(jit, static_argnums=(1, 2))\ndef _laplace(key, shape, dtype):\n _check_shape(\"laplace\", shape)\n u = uniform(\n key, shape, dtype, minval=-1. + jnp.finfo(dtype).epsneg, maxval=1.)\n return lax.mul(lax.sign(u), lax.log1p(lax.neg(lax.abs(u))))\n\n\ndef logistic(key: jnp.ndarray,\n shape: Sequence[int] = (),\n dtype: DTypeLikeFloat = dtypes.float_) -> jnp.ndarray:\n \"\"\"Sample logistic random values with given shape and float dtype.\n\n Args:\n key: a PRNGKey used as the random key.\n shape: optional, a tuple of nonnegative integers representing the result\n shape. Default ().\n dtype: optional, a float dtype for the returned values (default float64 if\n jax_enable_x64 is true, otherwise float32).\n\n Returns:\n A random array with the specified shape and dtype.\n \"\"\"\n if not dtypes.issubdtype(dtype, np.floating):\n raise ValueError(f\"dtype argument to `logistic` must be a float \"\n f\"dtype, got {dtype}\")\n dtype = dtypes.canonicalize_dtype(dtype)\n shape = core.canonicalize_shape(shape)\n return _logistic(key, shape, dtype)\n\n@partial(jit, static_argnums=(1, 2))\ndef _logistic(key, shape, dtype):\n _check_shape(\"logistic\", shape)\n x = uniform(key, shape, dtype, minval=jnp.finfo(dtype).eps, maxval=1.)\n return lax.log(lax.div(x, lax.sub(lax._const(x, 1), x)))\n\n\ndef pareto(key: jnp.ndarray,\n b: RealArray,\n shape: Optional[Sequence[int]] = None,\n dtype: DTypeLikeFloat = dtypes.float_) -> jnp.ndarray:\n \"\"\"Sample Pareto random values with given shape and float dtype.\n\n Args:\n key: a PRNGKey used as the random key.\n b: a float or array of floats broadcast-compatible with ``shape``\n representing the parameter of the distribution.\n shape: optional, a tuple of nonnegative integers specifying the result\n shape. Must be broadcast-compatible with ``b``. The default (None)\n produces a result shape equal to ``b.shape``.\n dtype: optional, a float dtype for the returned values (default float64 if\n jax_enable_x64 is true, otherwise float32).\n\n Returns:\n A random array with the specified dtype and with shape given by ``shape`` if\n ``shape`` is not None, or else by ``b.shape``.\n \"\"\"\n if not dtypes.issubdtype(dtype, np.floating):\n raise ValueError(f\"dtype argument to `pareto` must be a float \"\n f\"dtype, got {dtype}\")\n dtype = dtypes.canonicalize_dtype(dtype)\n if shape is not None:\n shape = core.canonicalize_shape(shape)\n return _pareto(key, b, shape, dtype)\n\n@partial(jit, static_argnums=(2, 3))\ndef _pareto(key, b, shape, dtype):\n if shape is None:\n shape = np.shape(b)\n else:\n _check_shape(\"pareto\", shape)\n\n b = lax.convert_element_type(b, dtype)\n e = exponential(key, shape, dtype)\n return lax.exp(e / b)\n\n\ndef t(key: jnp.ndarray,\n df: RealArray,\n shape: Sequence[int] = (),\n dtype: DTypeLikeFloat = dtypes.float_) -> jnp.ndarray:\n \"\"\"Sample Student's t random values with given shape and float dtype.\n\n Args:\n key: a PRNGKey used as the random key.\n df: a float or array of floats broadcast-compatible with ``shape``\n representing the parameter of the distribution.\n shape: optional, a tuple of nonnegative integers specifying the result\n shape. Must be broadcast-compatible with ``df``. The default (None)\n produces a result shape equal to ``df.shape``.\n dtype: optional, a float dtype for the returned values (default float64 if\n jax_enable_x64 is true, otherwise float32).\n\n Returns:\n A random array with the specified dtype and with shape given by ``shape`` if\n ``shape`` is not None, or else by ``df.shape``.\n \"\"\"\n if not dtypes.issubdtype(dtype, np.floating):\n raise ValueError(f\"dtype argument to `t` must be a float \"\n f\"dtype, got {dtype}\")\n dtype = dtypes.canonicalize_dtype(dtype)\n shape = core.canonicalize_shape(shape)\n return _t(key, df, shape, dtype)\n\n@partial(jit, static_argnums=(2, 3))\ndef _t(key, df, shape, dtype):\n if shape is None:\n shape = np.shape(df)\n else:\n _check_shape(\"t\", shape, np.shape(df))\n\n df = lax.convert_element_type(df, dtype)\n key_n, key_g = split(key)\n n = normal(key_n, shape, dtype)\n two = _constant_like(n, 2)\n half_df = lax.div(df, two)\n g = gamma(key_n, half_df, shape, dtype)\n return n * jnp.sqrt(half_df / g)\n\n\ndef rademacher(key: jnp.ndarray,\n shape: Sequence[int],\n dtype: DTypeLikeInt = dtypes.int_) -> jnp.ndarray:\n \"\"\"Sample from a Rademacher distribution.\n\n Args:\n key: a PRNGKey key.\n shape: The shape of the returned samples.\n dtype: The type used for samples.\n\n Returns:\n A jnp.array of samples, of shape `shape`. Each element in the output has\n a 50% change of being 1 or -1.\n\n \"\"\"\n dtype = dtypes.canonicalize_dtype(dtype)\n shape = core.canonicalize_shape(shape)\n return _rademacher(key, shape, dtype)\n\n\n@partial(jit, static_argnums=(1, 2))\ndef _rademacher(key, shape, dtype):\n bernoulli_samples = bernoulli(key=key, p=0.5, shape=shape)\n return (2 * bernoulli_samples - 1).astype(dtype)\n\n\ndef maxwell(key: jnp.ndarray,\n shape: Sequence[int] = (),\n dtype: DTypeLikeFloat = dtypes.float_) -> jnp.ndarray:\n \"\"\"Sample from a one sided Maxwell distribution.\n\n The scipy counterpart is `scipy.stats.maxwell`.\n\n Args:\n key: a PRNGKey key.\n shape: The shape of the returned samples.\n dtype: The type used for samples.\n\n Returns:\n A jnp.array of samples, of shape `shape`.\n\n \"\"\"\n # Generate samples using:\n # sqrt(X^2 + Y^2 + Z^2), X,Y,Z ~N(0,1)\n if not dtypes.issubdtype(dtype, np.floating):\n raise ValueError(f\"dtype argument to `maxwell` must be a float \"\n f\"dtype, got {dtype}\")\n dtype = dtypes.canonicalize_dtype(dtype)\n shape = core.canonicalize_shape(shape)\n return _maxwell(key, shape, dtype)\n\n\n@partial(jit, static_argnums=(1, 2))\ndef _maxwell(key, shape, dtype):\n shape = shape + (3,)\n norm_rvs = normal(key=key, shape=shape, dtype=dtype)\n return jnp.linalg.norm(norm_rvs, axis=-1)\n\n\ndef double_sided_maxwell(key: jnp.ndarray,\n loc: RealArray,\n scale: RealArray,\n shape: Sequence[int] = (),\n dtype: DTypeLikeFloat = dtypes.float_) -> jnp.ndarray:\n \"\"\"Sample from a double sided Maxwell distribution.\n\n Samples using:\n loc + scale* sgn(U-0.5)* one_sided_maxwell U~Unif;\n\n Args:\n key: a PRNGKey key.\n loc: The location parameter of the distribution.\n scale: The scale parameter of the distribution.\n shape: The shape added to the parameters loc and scale broadcastable shape.\n dtype: The type used for samples.\n\n Returns:\n A jnp.array of samples.\n\n \"\"\"\n if not dtypes.issubdtype(dtype, np.floating):\n raise ValueError(f\"dtype argument to `double_sided_maxwell` must be a float\"\n f\" dtype, got {dtype}\")\n dtype = dtypes.canonicalize_dtype(dtype)\n shape = core.canonicalize_shape(shape)\n return _double_sided_maxwell(key, loc, scale, shape, dtype)\n\n\n@partial(jit, static_argnums=(3, 4))\ndef _double_sided_maxwell(key, loc, scale, shape, dtype):\n params_shapes = lax.broadcast_shapes(np.shape(loc), np.shape(scale))\n if not shape:\n shape = params_shapes\n\n shape = shape + params_shapes\n maxwell_key, rademacher_key = split(key)\n maxwell_rvs = maxwell(maxwell_key, shape=shape, dtype=dtype)\n # Generate random signs for the symmetric variates.\n random_sign = rademacher(rademacher_key, shape=shape, dtype=dtype)\n assert random_sign.shape == maxwell_rvs.shape\n\n return random_sign * maxwell_rvs * scale + loc\n\n\ndef weibull_min(key: jnp.ndarray,\n scale: RealArray,\n concentration: RealArray,\n shape: Sequence[int] = (),\n dtype: DTypeLikeFloat = dtypes.float_) -> jnp.ndarray:\n \"\"\"Sample from a Weibull distribution.\n\n The scipy counterpart is `scipy.stats.weibull_min`.\n\n Args:\n key: a PRNGKey key.\n scale: The scale parameter of the distribution.\n concentration: The concentration parameter of the distribution.\n shape: The shape added to the parameters loc and scale broadcastable shape.\n dtype: The type used for samples.\n\n Returns:\n A jnp.array of samples.\n\n \"\"\"\n if not dtypes.issubdtype(dtype, np.floating):\n raise ValueError(f\"dtype argument to `weibull_min` must be a float \"\n f\"dtype, got {dtype}\")\n dtype = dtypes.canonicalize_dtype(dtype)\n shape = core.canonicalize_shape(shape)\n return _weibull_min(key, scale, concentration, shape, dtype)\n\n\n@partial(jit, static_argnums=(3, 4))\ndef _weibull_min(key, scale, concentration, shape, dtype):\n random_uniform = uniform(\n key=key, shape=shape, minval=0, maxval=1, dtype=dtype)\n\n # Inverse weibull CDF.\n return jnp.power(-jnp.log1p(-random_uniform), 1.0/concentration) * scale\n"
] | [
[
"numpy.log",
"numpy.sqrt",
"numpy.uint32",
"numpy.issubdtype",
"numpy.ndim",
"numpy.ceil",
"numpy.delete",
"numpy.int64",
"numpy.shape",
"numpy.iinfo",
"numpy.float32",
"numpy.binary_repr",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
omarsou/kernel_method_kaggle_challenge | [
"0f2e85166112b231699d9c9f7e3ae894e5ff7766"
] | [
"kernel/base_kernel.py"
] | [
"import numpy as np\nimport pickle\n\n\nclass Kernel:\n def __init__(self):\n self.train_phi = None\n self.K_matrix = None\n self.test_phi = None\n self.X_train = None\n pass\n\n def build_gram_matrix(self, X):\n raise NotImplementedError(\"Method build_gram_matrix not implemented.\")\n\n def test(self, x):\n raise NotImplementedError(\"Method test not implemented.\")\n\n def save_kernel(self, path):\n with open(path, \"wb\") as f:\n pickle.dump(self, f)\n\n @staticmethod\n def load_kernel(path):\n with open(path, \"rb\") as f:\n kernel_class = pickle.load(f)\n return kernel_class\n\n\nclass KernelIPExplicit(Kernel):\n def __init__(self):\n super().__init__()\n\n def build_gram_matrix(self, X):\n n = X.shape[0]\n output = np.zeros((n, n))\n self.train_phi = list()\n for i in range(n):\n item = X.loc[i, X.columns[1]]\n self.train_phi.append(self.make_phi(item))\n\n for i in range(n):\n for j in range(i, n):\n value = self.inner_product_phi(self.train_phi[i], self.train_phi[j])\n output[i, j] = output[j, i] = value\n\n self.K_matrix = output\n\n def test(self, indice_x):\n n = len(self.train_phi)\n output = np.zeros(n)\n for i in range(n):\n output[i] = self.inner_product_phi(self.train_phi[i], self.test_phi[indice_x])\n return output\n\n def make_test_phi(self, X):\n n = X.shape[0]\n self.test_phi = []\n for i in range(n):\n item = X.loc[i, X.columns[1]]\n self.test_phi.append(self.make_phi(item, train=False))\n return\n\n def make_phi(self, item, train=True):\n raise NotImplementedError(\"Method make_phi not implemented.\")\n\n def inner_product_phi(self, phi1, phi2):\n raise NotImplementedError(\"Method inner_product_phi not implemented.\")\n\n\nclass KernelIPImplicit(Kernel):\n def __init__(self):\n super().__init__()\n\n def build_gram_matrix(self, X):\n n = X.shape[0]\n self.X_train = X\n output = np.zeros((n, n))\n for i in range(n):\n for j in range(i, n):\n value1, value2 = X.loc[i, X.columns[1]], X.loc[j, X.columns[1]]\n output[i, j] = output[j, i] = self.K(value1, value2)\n self.K_matrix = output\n\n def test(self, x):\n X = self.X_train\n n = X.shape[0]\n output = np.zeros(n)\n for i in range(n):\n output[i] = self.K(X.loc[i, X.columns[1]], x)\n\n def K(self, item1, item2):\n raise NotImplementedError(\"Method K not implemented\")\n\n\nclass SumKernel:\n def __init__(self):\n self.train_phi = list()\n self.K_matrix = None\n self.test_phi = None\n self.X_train = None\n pass\n\n def build_gram_matrix(self, X):\n raise NotImplementedError(\"Method build_gram_matrix_sum not implemented.\")\n\n def build_gram_matrix_one(self, X, param):\n raise NotImplementedError(\"Method build_gram_matrix not implemented.\")\n\n def test(self, x):\n raise NotImplementedError(\"Method test not implemented.\")\n\n def save_kernel(self, path):\n with open(path, \"wb\") as f:\n pickle.dump(self, f)\n\n @staticmethod\n def load_kernel(path):\n with open(path, \"rb\") as f:\n kernel_class = pickle.load(f)\n return kernel_class\n\n\nclass SumKernelIPExplicitError(BaseException):\n pass\n\n\nclass SumKernelIPExplicit(SumKernel):\n def __init__(self, lst_params):\n super().__init__()\n if not isinstance(lst_params, list):\n raise SumKernelIPExplicitError(\"If you want to use only one param, you should use the individual param \"\n \"class method.\")\n self.lst_params = lst_params\n\n def build_gram_matrix(self, X):\n n = X.shape[0]\n output = np.zeros((n, n))\n for params in self.lst_params:\n intermediate_output, train_phi = self.build_gram_matrix_one(X, params)\n self.train_phi.append(train_phi)\n output += intermediate_output\n self.K_matrix = output\n\n def build_gram_matrix_one(self, X, params):\n n = X.shape[0]\n output = np.zeros((n, n))\n train_phi = list()\n for i in range(n):\n item = X.loc[i, X.columns[1]]\n train_phi.append(self.make_phi(item, True, params))\n\n for i in range(n):\n for j in range(i, n):\n value = self.inner_product_phi(train_phi[i], train_phi[j])\n output[i, j] = output[j, i] = value\n\n return output, train_phi\n\n def test(self, indice_x):\n n = len(self.train_phi[0])\n output = np.zeros(n)\n for idx, params in enumerate(self.lst_params):\n current_output = 0\n for i in range(n):\n current_output += self.inner_product_phi(self.train_phi[idx][i], self.test_phi[idx][indice_x])\n return output\n\n def make_test_phi(self, X):\n n = X.shape[0]\n self.test_phi = []\n for params in self.lst_params:\n current_test_phi = list()\n for i in range(n):\n item = X.loc[i, X.columns[1]]\n current_test_phi.append(self.make_phi(item, train=False, params=params))\n self.test_phi.append(current_test_phi)\n return\n\n def make_phi(self, item, train=True, params=None):\n raise NotImplementedError(\"Method make_phi not implemented.\")\n\n def inner_product_phi(self, phi1, phi2):\n raise NotImplementedError(\"Method inner_product_phi not implemented.\")\n"
] | [
[
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Nebula4869/real-time-object-detection-YOLOv4 | [
"a7b692999210747fd49cec2c35f2b7d8d5b7eecc"
] | [
"data_voc.py"
] | [
"import numpy as np\nimport random\nimport xml\nimport cv2\nimport os\n\n\ndef read_file(file_name):\n \"\"\"\n 读取 file_name 文件全部内容\n return:文件内容list\n \"\"\"\n if not os.path.isfile(file_name):\n return None\n result = []\n with open(file_name, 'r') as f:\n for line in f.readlines():\n # 去掉换行符和空格\n line = line.strip('\\n').strip()\n if len(line) == 0:\n continue\n result.append(line)\n return result\n\n\ndef word2id(names_file):\n \"\"\"\n 得到 名字 到 id 的转换字典\n return {}\n \"\"\"\n id_dict = {}\n contents = read_file(names_file)\n for i in range(len(contents)):\n id_dict[str(contents[i])] = i\n return id_dict\n\n\ndef parse_voc_xml(file_name, names_dict):\n \"\"\"\n 解析voc数据集的 xml 文件,每一个列表表示一个图片中的全部标签\n return [ [id1, x1, y1, w1, h1], [id2, x2, y2, w2, h2], ... ]\n \"\"\"\n # print(file_name)\n # print(names_dict)\n result = []\n if not os.path.isfile(file_name):\n return None\n doc = xml.dom.minidom.parse(file_name)\n root = doc.documentElement\n size = root.getElementsByTagName('size')[0]\n width = int(size.getElementsByTagName('width')[0].childNodes[0].data)\n height = int(size.getElementsByTagName('height')[0].childNodes[0].data)\n\n objs = root.getElementsByTagName('object')\n for obj in objs:\n name = obj.getElementsByTagName('name')[0].childNodes[0].data\n name_id = names_dict[name]\n\n bndbox = obj.getElementsByTagName('bndbox')[0]\n xmin = int(float(bndbox.getElementsByTagName('xmin')[0].childNodes[0].data))\n ymin = int(float(bndbox.getElementsByTagName('ymin')[0].childNodes[0].data))\n xmax = int(float(bndbox.getElementsByTagName('xmax')[0].childNodes[0].data))\n ymax = int(float(bndbox.getElementsByTagName('ymax')[0].childNodes[0].data))\n\n x = (xmax + xmin) / 2.0 / width\n w = (xmax - xmin) / width\n y = (ymax + ymin) / 2.0 / height\n h = (ymax - ymin) / height\n\n result.append([name_id, x, y, w, h])\n return result\n\n\nclass Data:\n def __init__(self, voc_root_dir, voc_dir_ls, voc_names, class_num, batch_size, anchors, multi_scale_img=True, width=608, height=608):\n self.data_dirs = [os.path.join(os.path.join(voc_root_dir, voc_dir), \"JPEGImages\") for voc_dir in voc_dir_ls] # 数据文件路径\n self.class_num = class_num # 分类数\n self.batch_size = batch_size\n self.anchors = np.asarray(anchors).astype(np.float32).reshape([-1, 2]) / [width, height] # [9,2]\n print(\"anchors:\\n\", self.anchors)\n self.multi_scale_img = multi_scale_img # 多尺度缩放图片\n\n self.imgs_path = []\n self.labels_path = []\n\n self.num_batch = 0 # 多少个 batch 了\n self.num_imgs = 0 # 一共多少张图片\n\n self.width = width\n self.height = height\n\n self.names_dict = word2id(voc_names) # 名字到 id 的字典\n\n # 初始化各项参数\n self.__init_args()\n \n # 初始化各项参数\n def __init_args(self):\n print(\"message:开始初始化路径\")\n\n # init imgs path\n for voc_dir in self.data_dirs:\n for img_name in os.listdir(voc_dir):\n img_path = os.path.join(voc_dir, img_name)\n label_path = img_path.replace(\"JPEGImages\", \"Annotations\")\n label_path = label_path.replace(img_name.split('.')[-1], \"xml\")\n if not os.path.isfile(img_path):\n print(\"warning:VOC 图片文件'\"+str(img_path)+\"'不存在\")\n continue\n if not os.path.isfile(label_path):\n print(\"warning:VOC 标签文件'\"+str(label_path)+\"'不存在\")\n continue\n self.imgs_path.append(img_path)\n self.labels_path.append(label_path)\n self.num_imgs += 1 \n print(\"message:VOC 数据初始化完成,一共有 \"+str(self.num_imgs)+\" 张图片\")\n \n if self.num_imgs <= 0:\n raise ValueError(\"没有可训练的图片, 程序退出\")\n \n return\n \n # 读取图片\n def read_img(self, img_file):\n \"\"\"\n 读取 img_file, 并 resize\n return:img, RGB & float\n \"\"\"\n if not os.path.exists(img_file):\n return None\n img = cv2.imread(img_file)\n img = cv2.resize(img, (self.width, self.height))\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n img = img.astype(np.float32)\n img = img/255.0\n return img\n \n # 读取标签\n def read_label(self, label_file, names_dict):\n \"\"\"\n 读取 label_file, 并生成 label_y1, label_y2, label_y3\n return:label_y1, label_y2, label_y3\n \"\"\"\n contents = parse_voc_xml(label_file, names_dict) \n if not contents:\n return None, None, None\n\n label_y1 = np.zeros((self.height // 32, self.width // 32, 3, 5 + self.class_num), np.float32)\n label_y2 = np.zeros((self.height // 16, self.width // 16, 3, 5 + self.class_num), np.float32)\n label_y3 = np.zeros((self.height // 8, self.width // 8, 3, 5 + self.class_num), np.float32)\n\n y_true = [label_y3, label_y2, label_y1]\n ratio = {0: 8, 1: 16, 2: 32}\n\n for label in contents:\n label_id = int(label[0])\n box = np.asarray(label[1: 5]).astype(np.float32) # label中保存的就是 x,y,w,h\n\n best_giou = 0\n best_index = 0\n for i in range(len(self.anchors)):\n min_wh = np.minimum(box[2:4], self.anchors[i])\n max_wh = np.maximum(box[2:4], self.anchors[i])\n giou = (min_wh[0] * min_wh[1]) / (max_wh[0] * max_wh[1])\n if giou > best_giou:\n best_giou = giou\n best_index = i\n \n # 012->0, 345->1, 678->2\n x = int(np.floor(box[0] * self.width / ratio[best_index // 3]))\n y = int(np.floor(box[1] * self.height / ratio[best_index // 3]))\n k = best_index % 3\n\n y_true[best_index // 3][y, x, k, 0:4] = box\n y_true[best_index // 3][y, x, k, 4:5] = 1.0\n y_true[best_index // 3][y, x, k, 5 + label_id] = 1.0\n \n return label_y1, label_y2, label_y3\n\n # 加载 batch_size 的数据\n def __get_data(self):\n \"\"\"\n 加载 batch_size 的标签和数据\n return:imgs, label_y1, label_y2, label_y3\n \"\"\"\n # 十个 batch 随机一次 size \n if self.multi_scale_img and (self.num_batch % 10 == 0):\n random_size = random.randint(10, 19) * 32\n self.width = self.height = random_size\n \n imgs = []\n labels_y1, labels_y2, labels_y3 = [], [], []\n\n count = 0\n while count < self.batch_size:\n curr_index = random.randint(0, self.num_imgs - 1)\n img_name = self.imgs_path[curr_index]\n label_name = self.labels_path[curr_index]\n\n img = self.read_img(img_name)\n label_y1, label_y2, label_y3 = self.read_label(label_name, self.names_dict)\n if img is None:\n print(\"VOC 文件'\" + img_name + \"'读取异常\")\n continue\n if label_y1 is None:\n print(\"VOC 文件'\" + label_name + \"'读取异常\")\n continue\n imgs.append(img)\n labels_y1.append(label_y1)\n labels_y2.append(label_y2)\n labels_y3.append(label_y3)\n\n count += 1\n\n self.num_batch += 1\n imgs = np.asarray(imgs)\n labels_y1 = np.asarray(labels_y1)\n labels_y2 = np.asarray(labels_y2)\n labels_y3 = np.asarray(labels_y3)\n \n return imgs, labels_y1, labels_y2, labels_y3\n\n # 迭代器\n def __next__(self):\n \"\"\"\n 迭代获得一个 batch 的数据\n \"\"\"\n return self.__get_data()\n"
] | [
[
"numpy.maximum",
"numpy.minimum",
"numpy.asarray",
"numpy.floor",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
carlos-alcan/network_app_classification | [
"faa19842ed17b277259dd64e14c7133ce6a61e56"
] | [
"engineered_features.py"
] | [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Apr 16 12:14:37 2019\n\n@author: carlosalcantara\n\"\"\"\n\n'''\nExpand data with engineered features using the feature_engineering_function.py\nSaves new csv file with specified name, overwriting input file if no save file\nname is given.\n\nUsage: engineered_features.py csvfile [savefile=csvfile]\n'''\nimport pandas as pd\nimport sys\nimport feature_engineering_function\n\n# Check for command line arguments\nif len(sys.argv) < 1:\n print('Usage: engineered_features.py csvfile [savefile=csvfile]')\n sys.exit(-1)\n\n# use original file name as new csv filename if none specified\nfile = sys.argv[1]\nif len(sys.argv) > 2:\n savefile = sys.argv[2]\nelse:\n savefile = file\n\n# read NetFlow data file\ndf = pd.read_csv(file)\n# add engineered features\ndf = feature_engineering_function.BLINC_features(df)\n# write NetFlow data file\ndf.to_csv(savefile, index=False)"
] | [
[
"pandas.read_csv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
CAMeL-Lab/CAMeLBERT_morphosyntactic_tagger | [
"5bea542c2e731d263281d0ab16ba9c065f602f94"
] | [
"scripts/run_token_classification.py"
] | [
"# -*- coding: utf-8 -*-\n\n# MIT License\n#\n# Copyright 2018-2021 New York University Abu Dhabi\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\"\"\" Fine-tuning pre-trained models for token classification tasks.\n Heavily adapted from: https://github.com/huggingface/transformers/blob/\n v3.0.1/examples/token-classification/run_ner.py\"\"\"\n\n\nimport logging\nimport os\nimport sys\nfrom dataclasses import dataclass, field\nfrom typing import Dict, List, Optional, Tuple\n\nimport numpy as np\nfrom seqeval.metrics import (\n accuracy_score as seq_accuracy_score,\n f1_score as seq_f1_score,\n precision_score as seq_precision_score,\n recall_score as seq_recall_score\n)\nfrom sklearn.metrics import (\n accuracy_score,\n f1_score,\n precision_score,\n recall_score\n)\n\nfrom torch import nn\nfrom transformers import (\n AutoConfig,\n AutoModelForTokenClassification,\n AutoTokenizer,\n EvalPrediction,\n HfArgumentParser,\n Trainer,\n TrainingArguments,\n set_seed,\n)\nfrom utils import TokenClassificationDataSet, Split, get_labels\n\n\nlogger = logging.getLogger(__name__)\n\n\n@dataclass\nclass ModelArguments:\n \"\"\"\n Arguments pertaining to which model/config/tokenizer we are\n going to fine-tune from.\n \"\"\"\n\n model_name_or_path: str = field(\n metadata={\"help\": \"Path to pretrained model or model identifier from \"\n \"huggingface.co/models\"}\n )\n\n config_name: Optional[str] = field(\n default=None, metadata={\"help\": \"Pretrained config name or path if \"\n \"not the same as model_name\"}\n )\n\n # If you want to tweak more attributes on your tokenizer, you should do it\n # in a distinct script, or just modify its tokenizer_config.json.\n\n tokenizer_name: Optional[str] = field(\n default=None, metadata={\"help\": \"Pretrained tokenizer name or path if \"\n \"not the same as model_name\"}\n )\n\n use_fast: bool = field(default=False, metadata={\"help\": \"Set this flag to \"\n \"use fast \"\n \"tokenization.\"})\n task_type: Optional[str] = field(\n default=\"ner\", metadata={\"help\": \"the name of the task (ner or pos)\"}\n )\n\n cache_dir: Optional[str] = field(\n default=None, metadata={\"help\": \"Where do you want to store the \"\n \"pretrained models downloaded from s3\"}\n )\n\n\n@dataclass\nclass DataTrainingArguments:\n \"\"\"\n Arguments pertaining to what data we are going to input our model for \n training and eval.\n \"\"\"\n\n data_dir: str = field(\n metadata={\"help\": \"The input data dir. Should contain the .txt files \"\n \"for a CoNLL-2003-formatted task.\"}\n )\n labels: Optional[str] = field(\n default=None,\n metadata={\"help\": \"Path to a file containing all labels.\"},\n )\n max_seq_length: int = field(\n default=128,\n metadata={\n \"help\": \"The maximum total input sequence length after \"\n \"tokenization. Sequences longer than this will be truncated, \"\n \"sequences shorter will be padded.\"\n },\n )\n overwrite_cache: bool = field(\n default=False, metadata={\"help\": \"Overwrite the cached training and \"\n \"evaluation sets\"}\n )\n blind_test: bool = field(\n default=False, metadata={\"help\": \"Use blind test set\"}\n )\n\n\ndef main():\n # See all possible arguments in src/transformers/training_args.py\n # or by passing the --help flag to this script.\n # We now keep distinct sets of args, for a cleaner separation of concerns.\n\n parser = HfArgumentParser((ModelArguments,\n DataTrainingArguments,\n TrainingArguments))\n if len(sys.argv) == 2 and sys.argv[1].endswith(\".json\"):\n # If we pass only one argument to the script and it's the path to a\n # json file, let's parse it to get our arguments.\n model_args, data_args, training_args = parser.parse_json_file(\n json_file=os.path.abspath(\n sys.argv[1]))\n else:\n model_args, data_args, training_args = parser.parse_args_into_dataclasses()\n\n if (\n os.path.exists(training_args.output_dir)\n and os.listdir(training_args.output_dir)\n and training_args.do_train\n and not training_args.overwrite_output_dir\n ):\n raise ValueError(\n f\"Output directory ({training_args.output_dir}) already exists \"\n \"and is not empty. Use --overwrite_output_dir to overcome.\"\n )\n\n # Setup logging\n logging.basicConfig(\n format=\"%(asctime)s - %(levelname)s - %(name)s - %(message)s\",\n datefmt=\"%m/%d/%Y %H:%M:%S\",\n level=(logging.INFO if training_args.local_rank in [-1, 0]\n else logging.WARN),\n )\n logger.warning(\n \"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, \"\n \"16-bits training: %s\",\n training_args.local_rank,\n training_args.device,\n training_args.n_gpu,\n bool(training_args.local_rank != -1),\n training_args.fp16,\n )\n logger.info(\"Training/evaluation parameters %s\", training_args)\n\n # Set seed\n set_seed(training_args.seed)\n\n # Prepare task\n labels = get_labels(data_args.labels)\n label_map: Dict[int, str] = {i: label for i, label in enumerate(labels)}\n num_labels = len(labels)\n\n # Load pretrained model and tokenizer\n #\n # Distributed training:\n # The .from_pretrained methods guarantee that only one local process can\n # concurrently download model & vocab.\n\n config = AutoConfig.from_pretrained(\n (model_args.config_name if model_args.config_name\n else model_args.model_name_or_path),\n num_labels=num_labels,\n id2label=label_map,\n label2id={label: i for i, label in enumerate(labels)},\n cache_dir=model_args.cache_dir,\n )\n tokenizer = AutoTokenizer.from_pretrained(\n (model_args.tokenizer_name if model_args.tokenizer_name\n else model_args.model_name_or_path),\n cache_dir=model_args.cache_dir,\n use_fast=model_args.use_fast,\n )\n model = AutoModelForTokenClassification.from_pretrained(\n model_args.model_name_or_path,\n from_tf=bool(\".ckpt\" in model_args.model_name_or_path),\n config=config,\n cache_dir=model_args.cache_dir,\n )\n\n # Get datasets\n train_dataset = (\n TokenClassificationDataSet(\n data_dir=data_args.data_dir,\n tokenizer=tokenizer,\n labels=labels,\n model_type=config.model_type,\n max_seq_length=data_args.max_seq_length,\n overwrite_cache=data_args.overwrite_cache,\n mode=Split.train,\n )\n if training_args.do_train\n else None\n )\n eval_dataset = (\n TokenClassificationDataSet(\n data_dir=data_args.data_dir,\n tokenizer=tokenizer,\n labels=labels,\n model_type=config.model_type,\n max_seq_length=data_args.max_seq_length,\n overwrite_cache=data_args.overwrite_cache,\n mode=Split.dev,\n )\n if training_args.do_eval\n else None\n )\n\n def align_predictions(predictions: np.ndarray,\n label_ids: np.ndarray) -> Tuple[List[int], List[int]]:\n preds = np.argmax(predictions, axis=2)\n\n batch_size, seq_len = preds.shape\n\n out_label_list = [[] for _ in range(batch_size)]\n preds_list = [[] for _ in range(batch_size)]\n\n for i in range(batch_size):\n for j in range(seq_len):\n if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:\n out_label_list[i].append(label_map[label_ids[i][j]])\n preds_list[i].append(label_map[preds[i][j]])\n\n return preds_list, out_label_list\n\n def compute_metrics(p: EvalPrediction) -> Dict:\n preds_list, out_label_list = align_predictions(p.predictions,\n p.label_ids)\n # If task type is NER, use seqeval metrics.\n # Otherwise, use scikit learn\n if model_args.task_type == \"ner\":\n return {\n \"accuracy\": seq_accuracy_score(out_label_list, preds_list),\n \"precision\": seq_precision_score(out_label_list, preds_list),\n \"recall\": seq_recall_score(out_label_list, preds_list),\n \"f1\": seq_f1_score(out_label_list, preds_list),\n }\n else:\n # Flatten the preds_list and out_label_list\n preds_list = [p for sublist in preds_list for p in sublist]\n out_label_list = [p for sublist in out_label_list for p in sublist]\n return {\n \"accuracy\": accuracy_score(out_label_list, preds_list),\n \"precision_micro\": precision_score(out_label_list, preds_list,\n average=\"micro\"),\n \"recall_micro\": recall_score(out_label_list, preds_list,\n average=\"micro\"),\n \"f1_micro\": f1_score(out_label_list, preds_list,\n average=\"micro\"),\n \"precision_macro\": precision_score(out_label_list, preds_list,\n average=\"macro\"),\n \"recall_macro\": recall_score(out_label_list, preds_list,\n average=\"macro\"),\n \"f1_macro\": f1_score(out_label_list, preds_list,\n average=\"macro\"),\n }\n\n # Initialize our Trainer\n trainer = Trainer(\n model=model,\n args=training_args,\n train_dataset=train_dataset,\n eval_dataset=eval_dataset,\n compute_metrics=compute_metrics,\n )\n\n # Training\n if training_args.do_train:\n trainer.train(\n model_path=(model_args.model_name_or_path \n if os.path.isdir(model_args.model_name_or_path)\n else None)\n )\n trainer.save_model()\n # For convenience, we also re-save the tokenizer to the same directory,\n # so that you can share your model easily on huggingface.co/models =)\n if trainer.is_world_master():\n tokenizer.save_pretrained(training_args.output_dir)\n\n # Evaluation\n results = {}\n if training_args.do_eval:\n logger.info(\"*** Evaluate ***\")\n\n result = trainer.evaluate()\n\n output_eval_file = os.path.join(training_args.output_dir,\n \"eval_results.txt\")\n if trainer.is_world_master():\n with open(output_eval_file, \"w\") as writer:\n logger.info(\"***** Eval results *****\")\n for key, value in result.items():\n logger.info(\" %s = %s\", key, value)\n writer.write(\"%s = %s\\n\" % (key, value))\n\n results.update(result)\n\n # Predict\n if training_args.do_predict:\n data_split = Split.test\n if data_args.blind_test:\n data_split = Split.blind_test\n test_dataset = TokenClassificationDataSet(\n data_dir=data_args.data_dir,\n tokenizer=tokenizer,\n labels=labels,\n model_type=config.model_type,\n max_seq_length=data_args.max_seq_length,\n overwrite_cache=data_args.overwrite_cache,\n mode=data_split,\n )\n\n predictions, label_ids, metrics = trainer.predict(test_dataset)\n preds_list, _ = align_predictions(predictions, label_ids)\n\n output_test_results_file = os.path.join(training_args.output_dir,\n f\"{data_split.value}_results.txt\")\n if trainer.is_world_master():\n with open(output_test_results_file, \"w\") as writer:\n for key, value in metrics.items():\n logger.info(\" %s = %s\", key, value)\n writer.write(\"%s = %s\\n\" % (key, value))\n\n # Save predictions\n output_test_predictions_file = os.path.join(training_args.output_dir,\n f\"{data_split.value}_predictions.txt\")\n if trainer.is_world_master():\n with open(output_test_predictions_file, \"w\") as writer:\n with open(os.path.join(data_args.data_dir, f\"{data_split.value}.txt\"), \"r\") as f:\n example_id = 0\n for line in f:\n if (line.startswith(\"-DOCSTART-\") or line == \"\"\n or line == \"\\n\"):\n writer.write(line)\n if not preds_list[example_id]:\n example_id += 1\n elif preds_list[example_id]:\n output_line = (line.split()[0] + \" \" + \n preds_list[example_id].pop(0) + \"\\n\")\n writer.write(output_line)\n else:\n logger.warning(\n \"Maximum sequence length exceeded: \"\n \"No prediction for '%s'.\", line.split()[0])\n\n return results\n\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"torch.nn.CrossEntropyLoss",
"sklearn.metrics.precision_score",
"numpy.argmax",
"sklearn.metrics.f1_score",
"sklearn.metrics.recall_score",
"sklearn.metrics.accuracy_score"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
chunfuchen/qiskit-acqua-tutorials | [
"74b0bcaac1678fc6c0de5be13e99d7ecd11b3075"
] | [
"artificial_intelligence/qsvm_kernel_multiclass.py"
] | [
"# -*- coding: utf-8 -*-\n\n# Copyright 2018 IBM.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# =============================================================================\n\nfrom datasets import *\nfrom qiskit_aqua.utils import split_dataset_to_data_and_labels\nfrom qiskit_aqua.input import get_input_instance\nfrom qiskit_aqua import run_algorithm\nimport numpy as np\n\nn = 2 # dimension of each data point\n\nsample_Total, training_input, test_input, class_labels = Wine(training_size=40,\n test_size=10, n=n, PLOT_DATA=False)\n\ntemp = [test_input[k] for k in test_input]\ntotal_array = np.concatenate(temp)\n\nparams = {\n 'problem': {'name': 'svm_classification', 'random_seed': 10598},\n 'algorithm': {\n 'name': 'QSVM.Kernel',\n },\n 'backend': {'name': 'qasm_simulator', 'shots': 1024},\n # 'multiclass_extension': {'name': 'OneAgainstRest'},\n 'multiclass_extension': {'name': 'AllPairs'},\n # 'multiclass_extension': {'name': 'ErrorCorrectingCode', 'code_size': 5},\n 'feature_map': {'name': 'SecondOrderExpansion', 'depth': 2, 'entangler_map': {0: [1]}}\n }\n\nalgo_input = get_input_instance('SVMInput')\nalgo_input.training_dataset = training_input\nalgo_input.test_dataset = test_input\nalgo_input.datapoints = total_array\n\nresult = run_algorithm(params, algo_input)\nprint(result)\n"
] | [
[
"numpy.concatenate"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
danielbee/PracticalIntroDataSci | [
"feecd7d1b18ba44fb3ea59d7709c2ff493c0c79f"
] | [
"scripts/parse_weather.py"
] | [
"# The purpose of this script is to collect all the station data into a single data structure. \n# This will require regular expressions to find things like station changes. \n\n#the hope is that we can simply export this single data structure to a single file is whatever format we want. \n\n# Need to figure out how to deal with 'null' values. \nimport re\n\nimport pandas as pd\n\ndef main(): \n\n dataPath = '../data/weather/'\n dataStationPath = dataPath+'stations/'\n with open(dataPath+'stations.txt') as f:\n stations = f.read().splitlines()\n bigFrame = []\n stationDataRaw = {}\n for station in stations: \n print(station)\n stationDataRaw[station]= open(dataStationPath+station+'.txt').read().splitlines()\n stationFrame = getDataFrame(stationDataRaw[station])\n # Extract things like height above sea level, longitude and latitude and site changes.\n stationFrame = getDataExtras(stationDataRaw[station],stationFrame)\n # add a column for the station\n stationFrame['station'] = station\n # Make station column the most signifiant index in the multiIndex\n stationFrame.set_index(['station', stationFrame.index],inplace=True)\n # Append to list of dataframes\n bigFrame.append(stationFrame)\n # Combine all the dataframes\n stationsData = pd.concat(bigFrame)\n # print(stationsData.reset_index().dtypes)\n # Print out in desired formats\n stationsData.to_excel(dataPath+'stationData.xlsx')\n stationsData.to_csv(dataPath+'stationData.csv')\n \n stationsData.to_string(dataPath+'stationData.txt')\n\n# bit of an assumption\ntableStart = re.compile('\\s{3}yyyy')\nreWord = re.compile('\\w+')\nreNum = re.compile('[0-9.]+')\ndef getDataFrame(raw):\n for ln,line in enumerate(raw):\n if re.search(tableStart,line):\n tableStartLine = ln\n # stop going through lines\n break\n\n table = raw[tableStartLine:]\n # remove empty string lines\n table = list(filter(None, table))\n headers= table[0].split()\n #print(headers)\n prevEnd = 0\n units = {}\n headerCols = [re.search(header,table[0]) for header in headers]\n for colI,col in enumerate(headerCols):\n units[headers[colI]] = reWord.findall(table[1],prevEnd,col.end())\n prevEnd = col.end()\n records = []\n for row in table[2:]:\n \n prevEnd = 0\n record = {}\n for colI,col in enumerate(headerCols):\n res= reNum.findall(row,prevEnd,col.end())\n \n record[headers[colI]] = res[0] if res else None\n prevEnd = col.end()\n if record['yyyy'] != None:\n records.append(record)\n \n df = pd.DataFrame.from_dict(records)\n df[['yyyy','mm']] = df[['yyyy','mm']].astype(int)\n # other columns\n df[['tmax','tmin','af','rain','sun']] = df[['tmax','tmin','af','rain','sun']].astype(float)\n df.set_index(['yyyy', 'mm'],inplace=True)\n #print(df)\n return df\n \nimport math\ndef getDataExtras(raw,df):\n topRaw = '\\n'.join(raw[0:20])\n\n gridRef = re.findall(r'\\d+E \\d+N',topRaw)\n asml=[]\n latlon=[]\n lowerYr=[]\n upperYrMonth=[]\n upperYr=[]\n ## Extract Features\n for line in raw[0:20]:\n if re.search(gridRef[0],line):\n print(line)\n if len(gridRef) > 1 : \n yearSearch = re.search(r'([1-2][7-9,0][0-9]{2})?\\s+(\\bfrom\\b|\\bafter\\b|\\bto\\b|\\buntil\\b)\\s+([a-zA-Z]*)\\s*([1-2][7-9,0][0-9]{2})',line)\n #print(yearSearch)\n if yearSearch:\n lowerYr.append(yearSearch.group(1))\n upperYrMonth.append(yearSearch.group(3))\n upperYr.append(yearSearch.group(4))\n print('from {} to {} {}'.format(lowerYr[0],upperYrMonth[0],upperYr[0]))\n\n asml.append(re.search(r'(\\d+)\\s*m\\w*\\samsl',line).group(1))\n latlonSearch = re.search(r'lat\\s*(-*\\d+\\.\\d+) lon\\s*(-*\\d+\\.\\d+)',str.lower(line))\n if latlonSearch:\n latlon.append((latlonSearch.group(1),latlonSearch.group(2)))\n else:\n #print(\"No long lat!!\")\n latlon.append(getLatLong(gridRef[0]))\n if len(gridRef) > 1 :\n # we have site change\n if re.search(gridRef[1],line):\n print(line)\n yearSearch = re.search(r'([1-2][7-9,0][0-9]{2})?\\s+(\\bfrom\\b|\\bafter\\b|\\bto\\b)\\s+([a-zA-Z]*)\\s*([1-2][7-9,0][0-9]{2})',line)\n #print(yearSearch)\n if yearSearch:\n lowerYr.append(yearSearch.group(1))\n upperYrMonth.append(yearSearch.group(3))\n upperYr.append(yearSearch.group(4))\n print('from {} to {} {}'.format(lowerYr[-1],upperYrMonth[-1],upperYr[-1]))\n asml.append(re.search(r'(\\d+)\\s*m\\w*\\samsl',line).group(1))\n latlonSearch = re.search(r'lat\\s*(-*\\d+\\.\\d+) lon\\s*(-*\\d+\\.\\d+)',str.lower(line))\n if latlonSearch:\n latlon.append((latlonSearch.group(1),latlonSearch.group(2)))\n else:\n #print(\"No long lat!!\")\n latlon.append(getLatLong(gridRef[0]))\n #print('asml:{}\\nlatlon:{}'.format(asml,latlon))\n ## Add features to dataframe\n\n # This is wrong, but i just want to get data in there and start classify.\n # Tehcnically, we should determine site changes , which may have a significant impact on frost days if asml gets higher. \n extra_df = setExtrasInDf(df,\n df_filter= df.index.get_level_values('yyyy') > 0,\n asml=asml[0], lat=latlon[0][0],long=latlon[0][1],gridRef=gridRef[0]\n )\n \n with open('dfL.txt','a') as f:\n print(extra_df.to_string(), file=f)\n return extra_df\n if len(gridRef) >1:\n # Need to apply features using extracted years. \n #print(df.dtypes)\n tempTypeDf = df.reset_index()\n #tempTypeDf[['yyyy','mm']] = tempTypeDf[['yyyy','mm']].astype(int)\n #tempTypeDf[['tmax','tmin','af','rain','sun']] = tempTypeDf[['tmax','tmin','af','rain','sun']].astype(float)\n #defensive\n if len(lowerYr) >0 and len(upperYr) >0:\n # We were able to find SOMETHING we can use.\n print('lower: {} \\t upper: {} \\t month {}'.format(lowerYr,upperYr,upperYrMonth))\n #if upperYr[0] > lowerYr[1]: \n # print('issue')\n if len(lowerYr) == 1:\n # super simple\n #if upperYrMonth[0]:\n # \n # tempTypeDf = setExtrasInDf(tempTypeDf,\n # df_filter= tempTypeDf['yyyy']<int(upperYr[0]) or (tempTypeDf['yyyy']==int(upperYr[0]) and tempTypeDf['mm']<int(upperYrMonth[0])),\n # asml=asml[0], lat=latlon[0][0],long=latlon[0][1],gridRef=gridRef[0]\n # )\n # tempTypeDf = setExtrasInDf(tempTypeDf,\n # df_filter= tempTypeDf['yyyy']>=int(upperYr[0]) or (tempTypeDf['yyyy']==int(upperYr[0]) and tempTypeDf['mm']>=int(upperYrMonth[0])),\n # asml=asml[1], lat=latlon[1][0],long=latlon[1][1],gridRef=gridRef[1]\n # )\n #else:\n tempTypeDf = setExtrasInDf(tempTypeDf,\n df_filter= tempTypeDf['yyyy']<int(upperYr[0]),\n asml=asml[0], lat=latlon[0][0],long=latlon[0][1],gridRef=gridRef[0]\n )\n\n tempTypeDf = setExtrasInDf(tempTypeDf,\n df_filter=tempTypeDf['yyyy']>=int(upperYr[0]),\n asml=asml[1], lat=latlon[1][0],long=latlon[1][1],gridRef=gridRef[1]\n )\n #if lowerYr[0] and upperYr[0]:\n # tempTypeDf = setExtrasInDf(tempTypeDf,\n # df_filter= tempTypeDf['yyyy']>=int(lowerYr[0]) and tempTypeDf['yyyy']<int(upperYr[0]),\n # asml=asml[0], lat=latlon[0][0],long=latlon[0][1],gridRef=gridRef[0]\n # )\n # tempTypeDf = setExtrasInDf(tempTypeDf,\n # df_filter= tempTypeDf['yyyy']>=int(upperYr[0]),\n # asml=asml[1], lat=latlon[1][0],long=latlon[1][1],gridRef=gridRef[1]\n # )\n #elif upperYr[0] and lowerYr[0] == None:\n \n\n #if lowerYr[0] == None and lowerYr[1] == None:\n # if upperYr[0] and upperYr[1]:\n # # Nice simple case\n # if upperYr[0] == upperYr[1]:\n # tempTypeDf = setExtrasInDf(tempTypeDf,\n # df_filter= tempTypeDf['yyyy']<int(upperYr[1]),\n # asml=asml[0], lat=latlon[0][0],long=latlon[0][1],gridRef=gridRef[0]\n # )\n # tempTypeDf = setExtrasInDf(tempTypeDf,\n # df_filter= tempTypeDf['yyyy']>=int(upperYr[1]),\n # asml=asml[0], lat=latlon[0][0],long=latlon[0][1],gridRef=gridRef[0]\n # )\n ## TODO: \n #if upperYrMonth[0] and upperYrMonth[1] :\n#\n #elif upperYrMonth[0] and upperYrMonth[1] == None:\n #elif upperYrMonth[1]:\n #else:\n else : \n print('unable to aquire site change year. Will dump other grid refs of {} and keep only {}.'.format(gridRef[1:],gridRef[0]))\n if len(upperYr) >0 :\n #tempTypeDf = setExtrasInDf(\n # tempTypeDf,\n # df_filter= tempTypeDf['yyyy']<int(upperYr[-1]),\n # asml=asml[0], lat=latlon[0][0],long=latlon[0][1],gridRef=gridRef[0])\n #tempTypeDf = setExtrasInDf(\n # tempTypeDf,\n # df_filter= tempTypeDf['yyyy']>=int(upperYr[-1]),\n # asml=asml[-1], lat=latlon[-1][0],long=latlon[-1][1],gridRef=gridRef[-1])\n #tempTypeDf.loc[tempTypeDf['yyyy']<int(upperYr[-1]),'asml'] = int(asml[0])\n #tempTypeDf.loc[tempTypeDf['yyyy']<int(upperYr[-1]),'Lat'] = float(latlon[0][0])\n #tempTypeDf.loc[tempTypeDf['yyyy']<int(upperYr[-1]),'Long'] = float(latlon[0][1])\n #tempTypeDf.loc[tempTypeDf['yyyy']>=int(upperYr[-1]),'asml'] = int(asml[-1])\n #tempTypeDf.loc[tempTypeDf['yyyy']>=int(upperYr[-1]),'Lat'] = float(latlon[-1][0])\n #tempTypeDf.loc[tempTypeDf['yyyy']>=int(upperYr[-1]),'Long'] = float(latlon[-1][1])\n #print(len(tempTypeDf.reset_index()['yyyy'])) \n #print(len([int(x) for x in tempTypeDf.index.get_level_values('yyyy').values if (math.isnan(float(x)) == False)]))\n with open('df.txt','a') as f:\n print(tempTypeDf.to_string(), file=f)\n # print(tempTypeDf.reset_index().dropna(subset=['yyyy']).to_string(), file=f)\n #with open('df_before.txt','w') as f:\n # print(tempTypeDf.reset_index().to_string(), file=f)\n #.loc[:(upperYr[-1],),:])\n #print([int(x) for x in tempTypeDf.index.get_level_values('yyyy').values if (math.isnan(float(x)) == False and x == upperYr[-1])])\n #print([int(x) for x in tempTypeDf.index.get_level_values('yyyy').values if (math.isnan(float(x)) == False and x != upperYr[-1])])\n\ndef setExtrasInDf(df, df_filter, asml, lat, long, gridRef): \n df.loc[df_filter,'asml'] = int(asml)\n df.loc[df_filter,'lat'] = float(lat)\n df.loc[df_filter,'long'] = float(long)\n df.loc[df_filter,'gridRef'] = str(gridRef)\n return df\ndef getLatLong(gridRef):\n import requests\n page = requests.get('http://www.nearby.org.uk/coord.cgi?p='+gridRef+'&f=conv')\n #print(page.text)\n pageSearch = re.search(r'Decimal: <B>(-*\\d+\\.\\d+) (-*\\d+\\.\\d+)</B>',page.text)\n return (pageSearch.group(1),pageSearch.group(2))\nmain()"
] | [
[
"pandas.concat",
"pandas.DataFrame.from_dict"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
51N84D/Virtual-Try-On | [
"3b3d4f6066885446e2a6eadb6c2668237e62e03b"
] | [
"data/dataloader.py"
] | [
"# coding=utf-8\nimport torch\nimport torch.utils.data as data\nimport torchvision.transforms as transforms\nfrom torch.utils.data import DataLoader, Dataset\nfrom PIL import Image\nfrom PIL import ImageDraw\nfrom addict import Dict\nimport os.path as osp\nimport numpy as np\nimport argparse\nimport matplotlib.pyplot as plt\nimport sys\nimport cv2\nimport json\n\nclass CPDataset(data.Dataset):\n def __init__(self, opt):\n super(CPDataset, self).__init__()\n # base setting\n self.opt = opt\n\n self.dataroot = opt.data.files.base\n\n if opt.model.is_train:\n self.datamode = \"train\"\n self.data_list = opt.data.files.train\n else:\n self.datamode = \"test\"\n self.data_list = opt.data.files.test\n\n\n print(self.data_list)\n self.fine_height = opt.data.transforms.height\n self.fine_width = opt.data.transforms.width\n self.radius = opt.data.transforms.radius\n\n self.data_path = osp.join(self.dataroot, self.datamode)\n\n self.transform = transforms.Compose(\n [transforms.ToTensor(), transforms.Normalize((0.5,), (0.5,))]\n )\n\n # load data list\n im_names = []\n c_names = []\n\n with open(osp.join(self.dataroot, self.data_list), \"r\") as f:\n print(f)\n for line in f.readlines():\n im_name, c_name = line.strip().split()\n im_names.append(im_name)\n c_names.append(c_name)\n\n self.im_names = im_names\n self.c_names = c_names\n\n def name(self):\n return \"CPDataset\"\n\n def __getitem__(self, index):\n c_name = self.c_names[index]\n im_name = self.im_names[index]\n\n # cloth image & cloth mask\n c = Image.open(osp.join(self.data_path, \"cloth\", c_name))\n #c.show()\n cm = Image.open(osp.join(self.data_path, \"cloth-mask\", c_name))\n\n c = self.transform(c) # [-1,1]\n cm_array = np.array(cm)\n cm_array = (cm_array >= 128).astype(np.float32)\n cm = torch.from_numpy(cm_array) # [0,1]\n cm.unsqueeze_(0)\n\n # person image\n im = Image.open(osp.join(self.data_path, \"image\", im_name))\n im = self.transform(im) # [-1,1]\n\n # load parsing image\n parse_name = im_name.replace(\".jpg\", \".png\")\n im_parse = Image.open(osp.join(self.data_path, \"image-parse\", parse_name))\n parse_array = np.array(im_parse)\n\n # -------Find segmentation class labels manually\n #Image1 = Image.open(osp.join(self.data_path, 'image-parse', parse_name))\n Image2 = Image.open(osp.join(self.data_path, \"image\", im_name))\n\n #plt.imshow(Image1)\n #plt.imshow(parse_array, alpha=0.5)\n #plt.imshow(Image2)\n\n #plt.colorbar()\n #plt.show()\n # shirt = 126, pants = 59\n # hair = 76, face = 29\n # ------End\n\n parse_shape = (parse_array > 0).astype(np.float32)\n\n parse_cloth = (parse_array == 126).astype(np.float32)\n\n # get cropped top img\n source = Image.open(osp.join(self.data_path, \"image\", im_name))\n mask = Image.fromarray(np.uint8(255 * parse_cloth)).convert(\"L\")\n blankImg = Image.new(\"RGB\", (self.fine_height, self.fine_width), (255, 255, 255))\n\n imgCropped = Image.composite(source, blankImg, mask)\n #imgCropped.show()\n #mask.show()\n imgCropped = self.transform(imgCropped) # [-1,1]\n\n # shape downsample\n parse_shape = Image.fromarray((parse_shape * 255).astype(np.uint8))\n parse_shape = parse_shape.resize(\n (self.fine_width // 16, self.fine_height // 16), Image.BILINEAR\n )\n parse_shape = parse_shape.resize((self.fine_width, self.fine_height), Image.BILINEAR)\n shape = self.transform(parse_shape) # [-1,1]\n pcm = torch.from_numpy(parse_cloth) # [0,1]\n #plt.imshow(pcm)\n #plt.show()\n\n # clean up\n im_c = im * pcm + (1 - pcm) # [-1,1], fill 1 for other parts\n\n pcm = pcm.unsqueeze_(0) \n\n #-----pose\n pose_name = im_name.replace('.jpg', '_keypoints.json')\n with open(osp.join(self.data_path, 'pose', pose_name), 'r') as f:\n pose_label = json.load(f)\n pose_data = pose_label['people'][0]['pose_keypoints']\n pose_data = np.array(pose_data)\n pose_data = pose_data.reshape((-1,3))\n\n point_num = pose_data.shape[0]\n pose_map = torch.zeros(point_num, self.fine_height, self.fine_width)\n r = self.radius\n im_pose = Image.new('L', (self.fine_width, self.fine_height))\n pose_draw = ImageDraw.Draw(im_pose)\n for i in range(point_num):\n one_map = Image.new('L', (self.fine_width, self.fine_height))\n draw = ImageDraw.Draw(one_map)\n pointx = pose_data[i,0]\n pointy = pose_data[i,1]\n if pointx > 1 and pointy > 1:\n draw.ellipse((pointx-r, pointy-r, pointx+r, pointy+r), 'white', 'white')\n pose_draw.ellipse((pointx-r, pointy-r, pointx+r, pointy+r), 'white', 'white')\n #plt.imshow(one_map, cmap='jet', alpha=.9)\n #plt.show()\n one_map = self.transform(one_map) #[-1,1]\n pose_map[i] = one_map[0]\n\n #plt.imshow(im_pose, cmap='jet', alpha=0.5)\n #plt.show()\n\n #for i in range(18):\n # show_ = np.squeeze(pose_map[i])\n # plt.imshow(Image2)\n # plt.imshow(show_, cmap=\"jet\", alpha=.5)\n # plt.show()\n\n #just for visualization\n im_pose = self.transform(im_pose)\n\n\n result = {\n \"c_name\": c_name, # for visualization\n \"im_name\": im_name, # for visualization or ground truth\n \"pose_image\": im_pose, #visualize pose, can overlay with image for better visualization\n \"pose\": pose_map, #for input\n \"cloth\": c, # for input\n \"cloth_mask\": cm, # for input\n \"image\": imgCropped, # for visualization\n \"parse_cloth\": pcm, # was im_c # for ground truth\n \"shape\": shape, # for visualization\n }\n\n return Dict(result)\n\n def __len__(self):\n return len(self.im_names)\n\n\nclass CPDataLoader(object):\n def __init__(self, opt, dataset):\n super(CPDataLoader, self).__init__()\n\n if opt.data.loaders.shuffle:\n train_sampler = torch.utils.data.sampler.RandomSampler(dataset)\n else:\n train_sampler = None\n\n self.data_loader = torch.utils.data.DataLoader(\n dataset,\n batch_size=opt.data.loaders.batch_size,\n shuffle=(train_sampler is None),\n num_workers=opt.data.loaders.num_workers,\n pin_memory=True,\n sampler=train_sampler,\n )\n self.dataset = dataset\n self.data_iter = self.data_loader.__iter__()\n\n def next_batch(self):\n try:\n batch = self.data_iter.__next__()\n except StopIteration:\n self.data_iter = self.data_loader.__iter__()\n batch = self.data_iter.__next__()\n\n return batch\n\n\ndef get_loader(opts):\n return DataLoader(\n CPDataset(opts),\n batch_size=opts.data.loaders.get(\"batch_size\", 4),\n shuffle=True,\n num_workers=opts.data.loaders.get(\"num_workers\", 8),\n )\n"
] | [
[
"torch.zeros",
"numpy.uint8",
"torch.utils.data.DataLoader",
"torch.from_numpy",
"numpy.array",
"torch.utils.data.sampler.RandomSampler"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
pmohtat/PyBaMM | [
"8f0a6d82e26c19f5735ed81b55671574af29eb16"
] | [
"tests/unit/test_expression_tree/test_operations/test_jac.py"
] | [
"#\n# Tests for the jacobian methods\n#\nimport pybamm\n\nimport numpy as np\nimport unittest\nfrom scipy.sparse import eye\nfrom tests import get_mesh_for_testing\n\n\ndef test_multi_var_function(arg1, arg2):\n return arg1 + arg2\n\n\nclass TestJacobian(unittest.TestCase):\n def test_variable_is_statevector(self):\n a = pybamm.Symbol(\"a\")\n with self.assertRaisesRegex(\n TypeError, \"Jacobian can only be taken with respect to a 'StateVector'\"\n ):\n a.jac(a)\n\n def test_linear(self):\n y = pybamm.StateVector(slice(0, 4))\n u = pybamm.StateVector(slice(0, 2))\n v = pybamm.StateVector(slice(2, 4))\n\n y0 = np.ones(4)\n\n func = u\n jacobian = np.array([[1, 0, 0, 0], [0, 1, 0, 0]])\n dfunc_dy = func.jac(y).evaluate(y=y0)\n np.testing.assert_array_equal(jacobian, dfunc_dy.toarray())\n\n func = -v\n jacobian = np.array([[0, 0, -1, 0], [0, 0, 0, -1]])\n dfunc_dy = func.jac(y).evaluate(y=y0)\n np.testing.assert_array_equal(jacobian, dfunc_dy.toarray())\n\n func = 3 * u + 4 * v\n jacobian = np.array([[3, 0, 4, 0], [0, 3, 0, 4]])\n dfunc_dy = func.jac(y).evaluate(y=y0)\n np.testing.assert_array_equal(jacobian, dfunc_dy.toarray())\n\n func = 7 * u - v * 9\n jacobian = np.array([[7, 0, -9, 0], [0, 7, 0, -9]])\n dfunc_dy = func.jac(y).evaluate(y=y0)\n np.testing.assert_array_equal(jacobian, dfunc_dy.toarray())\n\n A = pybamm.Matrix(2 * eye(2))\n func = A @ u\n jacobian = np.array([[2, 0, 0, 0], [0, 2, 0, 0]])\n dfunc_dy = func.jac(y).simplify().evaluate(y=y0)\n np.testing.assert_array_equal(jacobian, dfunc_dy.toarray())\n\n func = u @ pybamm.StateVector(slice(0, 1))\n with self.assertRaises(NotImplementedError):\n func.jac(y)\n\n # when differentiating by independent part of the state vector\n jacobian = np.array([[0, 0], [0, 0]])\n du_dv = u.jac(v).evaluate().toarray()\n np.testing.assert_array_equal(du_dv, jacobian)\n\n def test_nonlinear(self):\n y = pybamm.StateVector(slice(0, 4))\n u = pybamm.StateVector(slice(0, 2))\n v = pybamm.StateVector(slice(2, 4))\n\n y0 = np.array([1, 2, 3, 4])\n\n func = v ** 2\n jacobian = np.array([[0, 0, 6, 0], [0, 0, 0, 8]])\n dfunc_dy = func.jac(y).evaluate(y=y0)\n np.testing.assert_array_equal(jacobian, dfunc_dy.toarray())\n\n func = 2 ** v\n jacobian = np.array(\n [[0, 0, 2 ** 3 * np.log(2), 0], [0, 0, 0, 2 ** 4 * np.log(2)]]\n )\n dfunc_dy = func.jac(y).evaluate(y=y0)\n np.testing.assert_array_equal(jacobian, dfunc_dy.toarray())\n\n func = v ** v\n jacobian = [[0, 0, 27 * (1 + np.log(3)), 0], [0, 0, 0, 256 * (1 + np.log(4))]]\n dfunc_dy = func.jac(y).evaluate(y=y0)\n np.testing.assert_array_almost_equal(jacobian, dfunc_dy.toarray())\n\n func = u * v\n jacobian = np.array([[3, 0, 1, 0], [0, 4, 0, 2]])\n dfunc_dy = func.jac(y).evaluate(y=y0)\n np.testing.assert_array_equal(jacobian, dfunc_dy.toarray())\n\n func = u * (u + v)\n jacobian = np.array([[5, 0, 1, 0], [0, 8, 0, 2]])\n dfunc_dy = func.jac(y).evaluate(y=y0)\n np.testing.assert_array_equal(jacobian, dfunc_dy.toarray())\n\n func = 1 / u + v / 3\n jacobian = np.array([[-1, 0, 1 / 3, 0], [0, -1 / 4, 0, 1 / 3]])\n dfunc_dy = func.jac(y).evaluate(y=y0)\n np.testing.assert_array_equal(jacobian, dfunc_dy.toarray())\n\n func = u / v\n jacobian = np.array([[1 / 3, 0, -1 / 9, 0], [0, 1 / 4, 0, -1 / 8]])\n dfunc_dy = func.jac(y).evaluate(y=y0)\n np.testing.assert_array_equal(jacobian, dfunc_dy.toarray())\n\n func = v / (1 + v)\n jacobian = np.array([[0, 0, 1 / 16, 0], [0, 0, 0, 1 / 25]])\n dfunc_dy = func.jac(y).evaluate(y=y0)\n np.testing.assert_array_equal(jacobian, dfunc_dy.toarray())\n\n def test_multislice_raises(self):\n y1 = pybamm.StateVector(slice(0, 4), slice(7, 8))\n y_dot1 = pybamm.StateVectorDot(slice(0, 4), slice(7, 8))\n y2 = pybamm.StateVector(slice(4, 7))\n with self.assertRaises(NotImplementedError):\n y1.jac(y1)\n with self.assertRaises(NotImplementedError):\n y2.jac(y1)\n with self.assertRaises(NotImplementedError):\n y_dot1.jac(y1)\n\n def test_linear_ydot(self):\n y = pybamm.StateVector(slice(0, 4))\n y_dot = pybamm.StateVectorDot(slice(0, 4))\n u = pybamm.StateVector(slice(0, 2))\n v = pybamm.StateVector(slice(2, 4))\n u_dot = pybamm.StateVectorDot(slice(0, 2))\n v_dot = pybamm.StateVectorDot(slice(2, 4))\n\n y0 = np.ones(4)\n y_dot0 = np.ones(4)\n\n func = u_dot\n jacobian = np.array([[1, 0, 0, 0], [0, 1, 0, 0]])\n dfunc_dy = func.jac(y_dot).evaluate(y=y0, y_dot=y_dot0)\n np.testing.assert_array_equal(jacobian, dfunc_dy.toarray())\n\n func = -v_dot\n jacobian = np.array([[0, 0, -1, 0], [0, 0, 0, -1]])\n dfunc_dy = func.jac(y_dot).evaluate(y=y0, y_dot=y_dot0)\n np.testing.assert_array_equal(jacobian, dfunc_dy.toarray())\n\n func = u_dot\n jacobian = np.array([[0, 0, 0, 0], [0, 0, 0, 0]])\n dfunc_dy = func.jac(y).evaluate(y=y0, y_dot=y_dot0)\n np.testing.assert_array_equal(jacobian, dfunc_dy.toarray())\n\n func = -v_dot\n jacobian = np.array([[0, 0, 0, 0], [0, 0, 0, 0]])\n dfunc_dy = func.jac(y).evaluate(y=y0, y_dot=y_dot0)\n np.testing.assert_array_equal(jacobian, dfunc_dy.toarray())\n\n func = u\n jacobian = np.array([[0, 0, 0, 0], [0, 0, 0, 0]])\n dfunc_dy = func.jac(y_dot).evaluate(y=y0, y_dot=y_dot0)\n np.testing.assert_array_equal(jacobian, dfunc_dy.toarray())\n\n func = -v\n jacobian = np.array([[0, 0, 0, 0], [0, 0, 0, 0]])\n dfunc_dy = func.jac(y_dot).evaluate(y=y0, y_dot=y_dot0)\n np.testing.assert_array_equal(jacobian, dfunc_dy.toarray())\n\n def test_functions(self):\n y = pybamm.StateVector(slice(0, 4))\n u = pybamm.StateVector(slice(0, 2))\n v = pybamm.StateVector(slice(2, 4))\n const = pybamm.Scalar(1)\n\n y0 = np.array([1.0, 2.0, 3.0, 4.0])\n\n func = pybamm.sin(u)\n jacobian = np.array([[np.cos(1), 0, 0, 0], [0, np.cos(2), 0, 0]])\n dfunc_dy = func.jac(y).evaluate(y=y0)\n np.testing.assert_array_equal(jacobian, dfunc_dy.toarray())\n\n func = pybamm.cos(v)\n jacobian = np.array([[0, 0, -np.sin(3), 0], [0, 0, 0, -np.sin(4)]])\n dfunc_dy = func.jac(y).evaluate(y=y0)\n np.testing.assert_array_equal(jacobian, dfunc_dy.toarray())\n\n func = pybamm.sin(3 * u * v)\n jacobian = np.array(\n [\n [9 * np.cos(9), 0, 3 * np.cos(9), 0],\n [0, 12 * np.cos(24), 0, 6 * np.cos(24)],\n ]\n )\n dfunc_dy = func.jac(y).evaluate(y=y0)\n np.testing.assert_array_equal(jacobian, dfunc_dy.toarray())\n\n func = pybamm.cos(5 * pybamm.exp(u + v))\n jacobian = np.array(\n [\n [\n -5 * np.exp(4) * np.sin(5 * np.exp(4)),\n 0,\n -5 * np.exp(4) * np.sin(5 * np.exp(4)),\n 0,\n ],\n [\n 0,\n -5 * np.exp(6) * np.sin(5 * np.exp(6)),\n 0,\n -5 * np.exp(6) * np.sin(5 * np.exp(6)),\n ],\n ]\n )\n dfunc_dy = func.jac(y).evaluate(y=y0)\n np.testing.assert_array_equal(jacobian, dfunc_dy.toarray())\n\n # when child evaluates to number\n func = pybamm.sin(const)\n dfunc_dy = func.jac(y).evaluate(y=y0)\n np.testing.assert_array_equal(0, dfunc_dy)\n\n # several children\n func = pybamm.Function(test_multi_var_function, 2 * y, 3 * y)\n jacobian = np.diag(5 * np.ones(4))\n dfunc_dy = func.jac(y).evaluate(y=y0)\n np.testing.assert_array_equal(jacobian, dfunc_dy.toarray())\n\n def test_index(self):\n vec = pybamm.StateVector(slice(0, 5))\n ind = pybamm.Index(vec, 3)\n jac = ind.jac(vec).evaluate(y=np.linspace(0, 2, 5)).toarray()\n np.testing.assert_array_equal(jac, np.array([[0, 0, 0, 1, 0]]))\n\n # jac of ind of something that isn't a StateVector should return zeros\n const_vec = pybamm.Vector(np.ones(3))\n ind = pybamm.Index(const_vec, 2)\n jac = ind.jac(vec).evaluate(y=np.linspace(0, 2, 5)).toarray()\n np.testing.assert_array_equal(jac, np.array([[0, 0, 0, 0, 0]]))\n\n def test_jac_of_number(self):\n \"Jacobian of a number should be zero\"\n a = pybamm.Scalar(1)\n b = pybamm.Scalar(2)\n\n y = pybamm.StateVector(slice(0, 1))\n\n self.assertEqual(a.jac(y).evaluate(), 0)\n\n add = a + b\n self.assertEqual(add.jac(y).evaluate(), 0)\n\n subtract = a - b\n self.assertEqual(subtract.jac(y).evaluate(), 0)\n\n multiply = a * b\n self.assertEqual(multiply.jac(y).evaluate(), 0)\n\n divide = a / b\n self.assertEqual(divide.jac(y).evaluate(), 0)\n\n power = a ** b\n self.assertEqual(power.jac(y).evaluate(), 0)\n\n def test_jac_of_symbol(self):\n a = pybamm.Symbol(\"a\")\n y = pybamm.StateVector(slice(0, 1))\n with self.assertRaises(NotImplementedError):\n a.jac(y)\n\n def test_spatial_operator(self):\n a = pybamm.Variable(\"a\")\n b = pybamm.SpatialOperator(\"Operator\", a)\n y = pybamm.StateVector(slice(0, 1))\n with self.assertRaises(NotImplementedError):\n b.jac(y)\n\n def test_jac_of_unary_operator(self):\n a = pybamm.Scalar(1)\n b = pybamm.UnaryOperator(\"Operator\", a)\n y = pybamm.StateVector(slice(0, 1))\n with self.assertRaises(NotImplementedError):\n b.jac(y)\n\n def test_jac_of_independent_variable(self):\n a = pybamm.IndependentVariable(\"Variable\")\n y = pybamm.StateVector(slice(0, 1))\n self.assertEqual(a.jac(y).evaluate(), 0)\n\n def test_jac_of_inner(self):\n a = pybamm.Scalar(1)\n b = pybamm.Scalar(2)\n y = pybamm.StateVector(slice(0, 1))\n self.assertEqual(pybamm.inner(a, b).jac(y).evaluate(), 0)\n self.assertEqual(pybamm.inner(a, y).jac(y).evaluate(), 1)\n self.assertEqual(pybamm.inner(y, b).jac(y).evaluate(), 2)\n vec = pybamm.StateVector(slice(0, 2))\n jac = pybamm.inner(a * vec, b * vec).jac(vec).evaluate(y=np.ones(2)).toarray()\n np.testing.assert_array_equal(jac, 4 * np.eye(2))\n\n def test_jac_of_heaviside(self):\n a = pybamm.Scalar(1)\n y = pybamm.StateVector(slice(0, 5))\n np.testing.assert_array_equal(\n ((a < y) * y ** 2).jac(y).evaluate(y=5 * np.ones(5)), 10 * np.eye(5)\n )\n np.testing.assert_array_equal(\n ((a < y) * y ** 2).jac(y).evaluate(y=-5 * np.ones(5)), 0\n )\n\n def test_jac_of_minimum_maximum(self):\n y = pybamm.StateVector(slice(0, 10))\n y_test = np.linspace(0, 2, 10)\n np.testing.assert_array_equal(\n np.diag(pybamm.minimum(1, y ** 2).jac(y).evaluate(y=y_test)),\n 2 * y_test * (y_test < 1),\n )\n np.testing.assert_array_equal(\n np.diag(pybamm.maximum(1, y ** 2).jac(y).evaluate(y=y_test)),\n 2 * y_test * (y_test > 1),\n )\n\n def test_jac_of_abs(self):\n y = pybamm.StateVector(slice(0, 10))\n absy = abs(y)\n jac = absy.jac(y)\n y_test = np.linspace(-2, 2, 10)\n np.testing.assert_array_equal(\n np.diag(jac.evaluate(y=y_test).toarray()), np.sign(y_test)\n )\n\n def test_jac_of_sign(self):\n y = pybamm.StateVector(slice(0, 10))\n func = pybamm.sign(y) * y\n jac = func.jac(y)\n y_test = np.linspace(-2, 2, 10)\n np.testing.assert_array_equal(np.diag(jac.evaluate(y=y_test)), np.sign(y_test))\n\n def test_jac_of_domain_concatenation(self):\n # create mesh\n mesh = get_mesh_for_testing()\n y = pybamm.StateVector(slice(0, 100))\n\n # Jacobian of a DomainConcatenation of constants is a zero matrix of the\n # appropriate size\n a_dom = [\"negative electrode\"]\n b_dom = [\"separator\"]\n c_dom = [\"positive electrode\"]\n a_npts = mesh[a_dom[0]][0].npts\n b_npts = mesh[b_dom[0]][0].npts\n c_npts = mesh[c_dom[0]][0].npts\n a = 2 * pybamm.Vector(np.ones(a_npts), domain=a_dom)\n b = pybamm.Vector(np.ones(b_npts), domain=b_dom)\n c = 3 * pybamm.Vector(np.ones(c_npts), domain=c_dom)\n\n conc = pybamm.DomainConcatenation([a, b, c], mesh)\n jac = conc.jac(y).evaluate().toarray()\n np.testing.assert_array_equal(jac, np.zeros((100, 100)))\n\n # Jacobian of a DomainConcatenation of StateVectors\n a = 2 * pybamm.StateVector(slice(0, a_npts), domain=a_dom)\n b = pybamm.StateVector(slice(a_npts, a_npts + b_npts), domain=b_dom)\n c = 3 * pybamm.StateVector(\n slice(a_npts + b_npts, a_npts + b_npts + c_npts), domain=c_dom\n )\n conc = pybamm.DomainConcatenation([a, b, c], mesh)\n\n y0 = np.ones(100)\n jac = conc.jac(y).evaluate(y=y0).toarray()\n np.testing.assert_array_equal(\n jac,\n np.diag(\n np.concatenate(\n [2 * np.ones(a_npts), np.ones(b_npts), 3 * np.ones(c_npts)]\n )\n ),\n )\n\n # multi=domain case not implemented\n a = 2 * pybamm.StateVector(slice(0, a_npts), domain=a_dom)\n b = pybamm.StateVector(\n slice(a_npts, a_npts + b_npts + c_npts), domain=b_dom + c_dom\n )\n conc = pybamm.DomainConcatenation([a, b], mesh)\n with self.assertRaisesRegex(\n NotImplementedError, \"jacobian only implemented for when each child has\"\n ):\n conc.jac(y)\n\n\nif __name__ == \"__main__\":\n print(\"Add -v for more debug output\")\n import sys\n\n if \"-v\" in sys.argv:\n debug = True\n pybamm.settings.debug_mode = True\n unittest.main()\n"
] | [
[
"numpy.log",
"numpy.linspace",
"scipy.sparse.eye",
"numpy.eye",
"numpy.cos",
"numpy.ones",
"numpy.testing.assert_array_equal",
"numpy.sign",
"numpy.sin",
"numpy.exp",
"numpy.array",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
KhelmholtzR/ProgLearn | [
"f5177c720e53d2f5936272998b94e0746135a3b9"
] | [
"proglearn/transformers.py"
] | [
"\"\"\"\nMain Author: Will LeVine\nCorresponding Email: [email protected]\n\"\"\"\nfrom tensorflow import keras\nimport numpy as np\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.utils.validation import check_array, check_is_fitted, check_X_y\n\nfrom .base import BaseTransformer\n\n\nclass NeuralClassificationTransformer(BaseTransformer):\n \"\"\"\n A class used to transform data from a category to a specialized representation.\n\n Parameters\n ----------\n network : object\n A neural network used in the classification transformer.\n\n euclidean_layer_idx : int\n An integer to represent the final layer of the transformer.\n\n optimizer : str or keras.optimizers instance\n An optimizer used when compiling the neural network.\n\n loss : str, default=\"categorical_crossentropy\"\n A loss function used when compiling the neural network.\n\n pretrained : bool, default=False\n A boolean used to identify if the network is pretrained.\n\n compile_kwargs : dict, default={\"metrics\": [\"acc\"]}\n A dictionary containing metrics for judging network performance.\n\n fit_kwargs : dict, default={\n \"epochs\": 100,\n \"callbacks\": [keras.callbacks.EarlyStopping(patience=5, monitor=\"val_acc\")],\n \"verbose\": False,\n \"validation_split\": 0.33,\n },\n A dictionary to hold epochs, callbacks, verbose, and validation split for the network.\n\n Attributes\n ----------\n encoder_ : object\n A Keras model with inputs and outputs based on the network attribute.\n Output layers are determined by the euclidean_layer_idx parameter.\n\n fitted_ : boolean\n A boolean flag initialized after the model is fitted.\n \"\"\"\n\n def __init__(\n self,\n network,\n euclidean_layer_idx,\n optimizer,\n loss=\"categorical_crossentropy\",\n pretrained=False,\n compile_kwargs={\"metrics\": [\"acc\"]},\n fit_kwargs={\n \"epochs\": 100,\n \"callbacks\": [keras.callbacks.EarlyStopping(patience=5, monitor=\"val_acc\")],\n \"verbose\": False,\n \"validation_split\": 0.33,\n },\n ):\n self.network = keras.models.clone_model(network)\n self.encoder_ = keras.models.Model(\n inputs=self.network.inputs,\n outputs=self.network.layers[euclidean_layer_idx].output,\n )\n self.pretrained = pretrained\n self.optimizer = optimizer\n self.loss = loss\n self.compile_kwargs = compile_kwargs\n self.fit_kwargs = fit_kwargs\n\n def fit(self, X, y):\n \"\"\"\n Fits the transformer to data X with labels y.\n\n Parameters\n ----------\n X : ndarray\n Input data matrix.\n y : ndarray\n Output (i.e. response data matrix).\n\n Returns\n -------\n self : NeuralClassificationTransformer\n The object itself.\n \"\"\"\n check_X_y(X, y, ensure_2d=False, allow_nd=True)\n _, y = np.unique(y, return_inverse=True)\n\n self.network.compile(\n loss=self.loss, optimizer=self.optimizer, **self.compile_kwargs\n )\n\n self.network.fit(X, keras.utils.to_categorical(y), **self.fit_kwargs)\n self.fitted_ = True\n\n return self\n\n def transform(self, X):\n \"\"\"\n Performs inference using the transformer.\n\n Parameters\n ----------\n X : ndarray\n Input data matrix.\n\n Returns\n -------\n X_transformed : ndarray\n The transformed input.\n\n Raises\n ------\n NotFittedError\n When the model is not fitted.\n \"\"\"\n check_array(X, ensure_2d=False, allow_nd=True)\n check_is_fitted(self, attributes=\"fitted_\")\n return self.encoder_.predict(X)\n\n\nclass TreeClassificationTransformer(BaseTransformer):\n \"\"\"\n A class used to transform data from a category to a specialized representation.\n\n Parameters\n ----------\n kwargs : dict, default={}\n A dictionary to contain parameters of the tree.\n\n Attributes\n ----------\n transformer : sklearn.tree.DecisionTreeClassifier\n an internal sklearn DecisionTreeClassifier\n \"\"\"\n\n def __init__(self, kwargs={}):\n self.kwargs = kwargs\n\n def fit(self, X, y):\n \"\"\"\n Fits the transformer to data X with labels y.\n\n Parameters\n ----------\n X : ndarray\n Input data matrix.\n y : ndarray\n Output (i.e. response data matrix).\n\n Returns\n -------\n self : TreeClassificationTransformer\n The object itself.\n \"\"\"\n X, y = check_X_y(X, y)\n self.transformer_ = DecisionTreeClassifier(**self.kwargs).fit(X, y)\n return self\n\n def transform(self, X):\n \"\"\"\n Performs inference using the transformer.\n\n Parameters\n ----------\n X : ndarray\n Input data matrix.\n\n Returns\n -------\n X_transformed : ndarray\n The transformed input.\n\n Raises\n ------\n NotFittedError\n When the model is not fitted.\n \"\"\"\n X = check_array(X)\n check_is_fitted(self)\n return self.transformer_.apply(X)\n"
] | [
[
"sklearn.utils.validation.check_is_fitted",
"tensorflow.keras.models.clone_model",
"sklearn.utils.validation.check_array",
"numpy.unique",
"tensorflow.keras.models.Model",
"sklearn.tree.DecisionTreeClassifier",
"sklearn.utils.validation.check_X_y",
"tensorflow.keras.callbacks.EarlyStopping",
"tensorflow.keras.utils.to_categorical"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.7",
"2.6",
"2.4",
"2.3",
"2.5",
"2.2"
]
}
] |
AndreyBuyanov/ImageProcessing.Lb5.TextureSegmentation | [
"1509817ee2719573b04eba6f49154d7b38af853d"
] | [
"App.py"
] | [
"from PyQt5 import QtWidgets, uic\nfrom PyQt5.QtGui import QImage, QPixmap, QPalette, qRgb, qGray\nimport sys\nimport numpy as np\nfrom typing import Callable\nfrom numbers import Number\n\n\ndef process_image(\n input_image: np.array,\n kernel_size: int,\n kernel_fn: Callable[[np.array], float]) -> np.array:\n padding_width: int = kernel_size // 2\n padding_height: int = kernel_size // 2\n padding = ((padding_height, padding_height), (padding_width, padding_width))\n input_image_padding: np.array = np.pad(\n array=input_image,\n pad_width=padding,\n mode='edge')\n result_image: np.array = np.zeros(input_image.shape, dtype='float')\n image_height, image_width = result_image.shape\n for image_x in range(image_width):\n for image_y in range(image_height):\n x_pos_begin = image_x\n x_pos_end = image_x + kernel_size\n y_pos_begin = image_y\n y_pos_end = image_y + kernel_size\n image_segment: np.array = input_image_padding[y_pos_begin:y_pos_end, x_pos_begin:x_pos_end]\n result_image[image_y][image_x] = kernel_fn(image_segment)\n return result_image\n\n\ndef mean_fn(\n image_segment: np.array) -> float:\n return float(np.mean(image_segment))\n\n\ndef std_fn(\n image_segment: np.array) -> float:\n return float(np.std(image_segment))\n\n\ndef convert_to_binary(\n input_image: np.array,\n threshold: int = 127) -> np.array:\n max_val: int = 255\n min_val: int = 0\n initial_conv: np.array = np.where((input_image <= threshold), input_image, max_val)\n final_conv: np.array = np.where((initial_conv > threshold), initial_conv, min_val)\n return final_conv\n\n\ndef normalize_image(\n input_image: np.array) -> np.array:\n result_image: np.array = np.zeros(input_image.shape)\n input_max = input_image.max()\n input_min = input_image.min()\n input_range = input_max - input_min\n height, width = input_image.shape\n for y in range(height):\n for x in range(width):\n input_value = input_image[y][x]\n scaled_input_value = (input_value - input_min) / input_range if input_range != 0 else 0\n result_image[y][x] = scaled_input_value * 255.0\n return result_image\n\n\ndef fill_image(\n input_image: np.array,\n value: Number,\n replace_value: Number):\n height, width = input_image.shape\n for y in range(height):\n for x in range(width):\n if input_image[y, x] == value:\n input_image[y, x] = replace_value\n\n\ndef mark_objects(\n input_image: np.array) -> np.array:\n result_image: np.array = np.copy(input_image)\n current_object_id = 1\n height, width = input_image.shape\n for y in range(height):\n for x in range(width):\n if y == 0:\n c = 0\n else:\n c = result_image[y - 1, x]\n if x == 0:\n b = 0\n else:\n b = result_image[y, x - 1]\n a = result_image[y, x]\n if a == 0:\n pass\n elif b == 0 and c == 0:\n current_object_id += 1\n result_image[y, x] = current_object_id\n elif b != 0 and c == 0:\n result_image[y, x] = b\n elif b == 0 and c != 0:\n result_image[y, x] = c\n elif b != 0 and c != 0:\n if b == c:\n result_image[y, x] = b\n else:\n result_image[y, x] = b\n fill_image(\n input_image=result_image,\n value=c,\n replace_value=b)\n return result_image\n\n\ndef delete_objects(\n input_image: np.array,\n object_size: int):\n unique_mask, hist = np.unique(input_image, return_counts=True)\n for i in range(1, len(unique_mask)):\n if hist[i] < object_size:\n for (y, x), _ in np.ndenumerate(input_image):\n if input_image[y, x] == unique_mask[i]:\n input_image[y, x] = 0\n\n\nclass Ui(QtWidgets.QMainWindow):\n def __init__(self):\n super(Ui, self).__init__()\n uic.loadUi('Main.ui', self)\n\n self.action_open = self.findChild(QtWidgets.QAction, 'actionOpen')\n self.action_open.triggered.connect(self.action_open_triggered)\n\n self.action_exit = self.findChild(QtWidgets.QAction, 'actionExit')\n self.action_exit.triggered.connect(self.action_exit_triggered)\n\n self.bt_apply = self.findChild(QtWidgets.QPushButton, 'btApply')\n self.bt_apply.clicked.connect(self.bt_apply_pressed)\n\n self.input_image_canvas = QtWidgets.QLabel()\n self.input_image_canvas.setBackgroundRole(QPalette.Base)\n self.input_image_canvas.setSizePolicy(\n QtWidgets.QSizePolicy.Ignored,\n QtWidgets.QSizePolicy.Ignored)\n self.input_image_canvas.setScaledContents(True)\n self.sa_input_image = self.findChild(QtWidgets.QScrollArea, 'saInputImage')\n self.sa_input_image.setWidget(self.input_image_canvas)\n self.sa_input_image.setWidgetResizable(False)\n\n self.processed_image_canvas = QtWidgets.QLabel()\n self.processed_image_canvas.setBackgroundRole(QPalette.Base)\n self.processed_image_canvas.setSizePolicy(\n QtWidgets.QSizePolicy.Ignored,\n QtWidgets.QSizePolicy.Ignored)\n self.processed_image_canvas.setScaledContents(True)\n self.sa_processed_image = self.findChild(QtWidgets.QScrollArea, 'saProcessedImage')\n self.sa_processed_image.setWidget(self.processed_image_canvas)\n self.sa_processed_image.setWidgetResizable(False)\n\n self.mask_image_canvas = QtWidgets.QLabel()\n self.mask_image_canvas.setBackgroundRole(QPalette.Base)\n self.mask_image_canvas.setSizePolicy(\n QtWidgets.QSizePolicy.Ignored,\n QtWidgets.QSizePolicy.Ignored)\n self.mask_image_canvas.setScaledContents(True)\n self.sa_mask_image = self.findChild(QtWidgets.QScrollArea, 'saMask')\n self.sa_mask_image.setWidget(self.mask_image_canvas)\n self.sa_mask_image.setWidgetResizable(False)\n\n self.segmented_image_canvas = QtWidgets.QLabel()\n self.segmented_image_canvas.setBackgroundRole(QPalette.Base)\n self.segmented_image_canvas.setSizePolicy(\n QtWidgets.QSizePolicy.Ignored,\n QtWidgets.QSizePolicy.Ignored)\n self.segmented_image_canvas.setScaledContents(True)\n self.sa_segmented_image = self.findChild(QtWidgets.QScrollArea, 'saSegmentedImage')\n self.sa_segmented_image.setWidget(self.segmented_image_canvas)\n self.sa_segmented_image.setWidgetResizable(False)\n\n self.cb_method = self.findChild(QtWidgets.QComboBox, 'cbMethod')\n self.cb_method.addItems(['Mean', 'Std'])\n\n self.le_kernel_size = self.findChild(QtWidgets.QLineEdit, 'leKernelSize')\n\n self.le_threshold = self.findChild(QtWidgets.QLineEdit, 'leThreshold')\n\n self.le_delete_objects = self.findChild(QtWidgets.QLineEdit, 'leDeleteObjects')\n\n self.show()\n\n def action_open_triggered(self):\n options = QtWidgets.QFileDialog.Options()\n file_name, _ = QtWidgets.QFileDialog.\\\n getOpenFileName(self,\n 'QFileDialog.getOpenFileName()',\n '',\n 'Images (*.png *.jpeg *.jpg *.bmp *.gif)',\n options=options)\n if file_name:\n image = QImage(file_name).convertToFormat(QImage.Format_Grayscale8)\n if image.isNull():\n QtWidgets.QMessageBox.\\\n information(self,\n \"Texture segmentation\",\n \"Cannot load %s.\" % file_name)\n return\n\n self.input_image_canvas.setPixmap(QPixmap.fromImage(image))\n self.input_image_canvas.adjustSize()\n\n def action_exit_triggered(self):\n self.close()\n\n def bt_apply_pressed(self):\n method = self.cb_method.currentIndex()\n kernel_size = int(self.le_kernel_size.text())\n threshold = int(self.le_threshold.text())\n object_size = int(self.le_delete_objects.text())\n\n input_q_image = self.input_image_canvas.pixmap().toImage().convertToFormat(QImage.Format_Grayscale8)\n input_image = np.zeros((input_q_image.height(), input_q_image.width()), dtype='float')\n for (y, x), _ in np.ndenumerate(input_image):\n input_image[y, x] = qGray(input_q_image.pixel(x, y))\n\n if method == 0:\n kernel_fn = mean_fn\n elif method == 1:\n kernel_fn = std_fn\n else:\n return\n processed_image: np.array = process_image(\n input_image=input_image,\n kernel_size=kernel_size,\n kernel_fn=kernel_fn)\n normalized_image: np.array = normalize_image(input_image=processed_image)\n binarized_image: np.array = convert_to_binary(input_image=normalized_image, threshold=threshold)\n marked_image = mark_objects(input_image=binarized_image)\n delete_objects(\n input_image=marked_image,\n object_size=object_size)\n segmented_image = np.copy(input_image)\n for (y, x), _ in np.ndenumerate(segmented_image):\n if marked_image[y, x] == 0:\n segmented_image[y, x] = 0\n self.set_image(\n input_image=normalized_image,\n canvas=self.processed_image_canvas)\n self.set_image(\n input_image=normalize_image(\n input_image=marked_image),\n canvas=self.mask_image_canvas)\n self.set_image(\n input_image=segmented_image,\n canvas=self.segmented_image_canvas)\n\n @staticmethod\n def set_image(input_image: np.array, canvas: QtWidgets.QLineEdit):\n height, width = input_image.shape\n q_image = QImage(width, height, QImage.Format_RGB32)\n for y in range(height):\n for x in range(width):\n pixel = int(input_image[y, x])\n q_image.setPixel(x, y, qRgb(pixel, pixel, pixel))\n canvas.setPixmap(QPixmap.fromImage(q_image))\n canvas.adjustSize()\n\n\nif __name__ == '__main__':\n app = QtWidgets.QApplication(sys.argv)\n window = Ui()\n app.exec_()\n"
] | [
[
"numpy.pad",
"numpy.unique",
"numpy.copy",
"numpy.std",
"numpy.mean",
"numpy.ndenumerate",
"numpy.where",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
BUTSpeechFIT/ASR_Transformer | [
"814f720aa8265e9a377869f93dc65b251338e985",
"814f720aa8265e9a377869f93dc65b251338e985",
"814f720aa8265e9a377869f93dc65b251338e985"
] | [
"Transformer_training_V2.py",
"ASR_TransV1/Dataloader_for_AM_v3_dev.py",
"ASR_TransV1/TRANSFORMER_ASR_V1_CTC.py"
] | [
"#!/usr/bin/python\nimport sys\nimport os\nimport subprocess\nfrom os.path import join, isdir\nimport torch\n\n\n#*************************************************************************************************************************\n####### Loading the Parser and default arguments\n#import pdb;pdb.set_trace()\n\n#sys.path.insert(0,'/mnt/matylda3/vydana/HOW2_EXP/Gen_V1/ATTNCODE/Trans_V1')\nsys.path.insert(0,'/mnt/matylda3/vydana/HOW2_EXP/ASR_Transformer/ASR_TransV1')\nimport Transformer_arg\nfrom Transformer_arg import parser\nargs = parser.parse_args()\n\n#************************\nimport Set_gpus\nfrom Set_gpus import Set_gpu\nif args.gpu:\n Set_gpu()\n\n#import safe_gpu\n#from safe_gpu import safe_gpu\n#gpu_owner = safe_gpu.GPUOwner()\n#***********************\n\nimport numpy as np\nimport fileinput\nimport json\nimport random\nfrom itertools import chain\nfrom numpy.random import permutation\n##------------------------------------------------------------------\n#import torch\nfrom torch.autograd import Variable\n#----------------------------------------\nimport torch.nn as nn\nfrom torch import autograd, nn, optim\nos.environ['PYTHONUNBUFFERED'] = '0'\nimport torch.nn.parallel\nimport torch.backends.cudnn as cudnn\nimport torch.optim as optim\n\nfrom random import shuffle\nfrom statistics import mean\nimport matplotlib\nimport matplotlib.pyplot as plt\nplt.switch_backend('agg')\nmatplotlib.pyplot.viridis()\nimport glob\n\n###save architecture for decoding\nmodel_path_name=join(args.model_dir,'model_architecture_')\nwith open(model_path_name, 'w') as f:\n json.dump(args.__dict__, f, indent=2)\nprint(args)\nsys.path.insert(0,'/mnt/matylda3/vydana/HOW2_EXP/ASR_Transformer/ASR_TransV1')\n# #####setting the gpus in the gpu cluster\n# #**********************************\n#import Set_gpus\n#from Set_gpus import Set_gpu\n#if args.gpu:\n# Set_gpu()\n \n###----------------------------------------\nfrom Dataloader_for_AM_v2 import DataLoader\nfrom utils__ import weights_init,reduce_learning_rate,read_as_list,gaussian_noise,plotting\n#==============================================================\nsys.path.insert(0,'/mnt/matylda3/vydana/HOW2_EXP/ASR_Transformer/ASR_TransV1')\nfrom TRANSFORMER_ASR_V1 import Transformer\nfrom Initializing_Transformer_ASR import Initialize_Att_model\nfrom Transformer_Training_loop import train_val_model\nfrom Load_sp_model import Load_sp_models\n##==================================\n#==============================================================\nif not isdir(args.model_dir):\n os.makedirs(args.model_dir)\n\npng_dir=args.model_dir+'_png'\nif not isdir(png_dir):\n os.makedirs(png_dir)\n############################################\n#=============================================================\ndef main():\n ##Load setpiece models for Dataloaders\n Word_model=Load_sp_models(args.Word_model_path)\n Char_model=Load_sp_models(args.Char_model_path)\n ###initilize the model\n model,optimizer=Initialize_Att_model(args)\n #============================================================\n #------------------------------------------------------------ \n #\n train_gen = DataLoader(files=glob.glob(args.data_dir + \"train_splits/*\"),\n max_batch_label_len=args.max_batch_label_len,\n max_batch_len=args.max_batch_len,\n max_feat_len=args.max_feat_len,\n max_label_len=args.max_label_len,\n Word_model=Word_model,\n Char_model=Char_model,\n apply_cmvn=int(args.apply_cmvn))\n\n dev_gen = DataLoader(files=glob.glob(args.data_dir + \"dev_splits/*\"),\n max_batch_label_len=2000,\n max_batch_len=args.max_batch_len,\n max_feat_len=5000,\n max_label_len=1000,\n Word_model=Word_model,\n Char_model=Char_model,\n apply_cmvn=int(args.apply_cmvn))\n\n\n #Flags that may change while training \n if args.spec_aug_flag==2:\n weight_noise_flag=False\n spec_aug_flag=True\n else:\n weight_noise_flag=False\n spec_aug_flag=False\n val_history=np.zeros(args.nepochs)\n #======================================\n for epoch in range(args.nepochs):\n ##start of the epoch\n tr_CER=[]; tr_BPE_CER=[]; L_train_cost=[]\n model.train();\n validate_interval = int(args.validate_interval * args.accm_grad) if args.accm_grad>0 else args.validate_interval\n for trs_no in range(validate_interval):\n B1 = train_gen.next()\n assert B1 is not None, \"None should never come out of the DataLoader\"\n\n Output_trainval_dict=train_val_model(smp_no=trs_no,\n args = args, \n model = model,\n optimizer = optimizer,\n data_dict = B1,\n weight_noise_flag=weight_noise_flag,\n spec_aug_flag=spec_aug_flag,\n trainflag = True)\n #\n #\n #get the losses form the dict\n L_train_cost.append(Output_trainval_dict.get('cost_cpu'))\n tr_CER.append(Output_trainval_dict.get('Char_cer'))\n tr_BPE_CER.append(Output_trainval_dict.get('Word_cer'))\n #attention_map=Output_trainval_dict.get('attention_record').data.cpu().numpy()\n #==========================================\n if (trs_no%args.tr_disp==0):\n print(\"tr ep:==:>\",epoch,\"sampl no:==:>\",trs_no,\"train_cost==:>\",mean(L_train_cost),\"CER:\",mean(tr_CER),'BPE_CER',mean(tr_BPE_CER),flush=True) \n #------------------------\n if args.plot_fig_training:\n plot_name=join(png_dir,'train_epoch'+str(epoch)+'_attention_single_file_'+str(trs_no)+'.png')\n\n plotting(plot_name,attention_map)\n \n ###validate the model\n model.eval()\n #=======================================================\n Vl_CER=[]; Vl_BPE_CER=[];L_val_cost=[]\n val_examples=0\n for vl_smp in range(args.max_val_examples):\n B1 = dev_gen.next()\n smp_feat = B1.get('smp_feat')\n val_examples+=smp_feat.shape[0]\n assert B1 is not None, \"None should never come out of the DataLoader\"\n\n ##brak when the examples are more\n if (val_examples >= args.max_val_examples):\n break;\n #-------------------------------------- \n Val_Output_trainval_dict=train_val_model(smp_no=trs_no,\n args=args,\n model = model,\n optimizer = optimizer,\n data_dict = B1,\n weight_noise_flag=False,\n spec_aug_flag=False,\n trainflag = False)\n \n L_val_cost.append(Val_Output_trainval_dict.get('cost_cpu'))\n Vl_CER.append(Val_Output_trainval_dict.get('Char_cer'))\n Vl_BPE_CER.append(Val_Output_trainval_dict.get('Word_cer'))\n #attention_map=Val_Output_trainval_dict.get('attention_record').data.cpu().numpy()\n\n #====================================================== \n #======================================================\n if (vl_smp%args.vl_disp==0) or (val_examples==args.max_val_examples-1):\n print(\"val epoch:==:>\",epoch,\"val smp no:==:>\",vl_smp,\"val_cost:==:>\",mean(L_val_cost),\"CER:\",mean(Vl_CER),'BPE_CER',mean(Vl_BPE_CER),flush=True) \n\n if args.plot_fig_validation:\n plot_name=join(png_dir,'val_epoch'+str(epoch)+'_attention_single_file_'+str(vl_smp)+'.png') \n plotting(plot_name,attention_map) \n #----------------------------------------------------\n#==================================================================\n val_history[epoch]=(mean(Vl_CER)*100)\n print(\"val_history:\",val_history[:epoch+1])\n #================================================================== \n ####saving_weights \n ct=\"model_epoch_\"+str(epoch)+\"_sample_\"+str(trs_no)+\"_\"+str(mean(L_train_cost))+\"___\"+str(mean(L_val_cost))+\"__\"+str(mean(Vl_CER))\n print(ct)\n torch.save(model.state_dict(),join(args.model_dir,str(ct)))\n ####saving otpimizer helped Transformer\n #torch.save(optimizer.state_dict(),join(args.model_dir,str(ct)+'_opt'))\n\n ####################################################### \n #######################################################\n ###open the file write and close it to avoid delays\n with open(args.weight_text_file,'a+') as weight_saving_file:\n print(join(args.model_dir,str(ct)), file=weight_saving_file)\n\n with open(args.Res_text_file,'a+') as Res_saving_file:\n print(float(mean(Vl_CER)), file=Res_saving_file)\n #=================================\n # early_stopping and checkpoint averaging: \n if args.early_stopping:\n A=val_history\n Non_zero_loss=A[A>0]\n min_cpts=np.argmin(Non_zero_loss)\n Non_zero_len=len(Non_zero_loss)\n\n if ((Non_zero_len-min_cpts)>1):\n weight_noise_flag=True\n spec_aug_flag=True\n\n if (Non_zero_len-min_cpts) > args.early_stopping_patience: \n print(\"The model is early stopping........\",\"minimum value of model is:\",min_cpts)\n exit(0)\n\n#=======================================================\n#=============================================================================================\nif __name__ == '__main__':\n main()\n\n\n\n",
"#!/usr/bin/python\nimport kaldi_io\nimport sys\nimport os\nfrom os.path import join, isdir\nfrom numpy.random import permutation\nimport itertools\nimport keras\nimport numpy as np\nfrom keras.preprocessing.sequence import pad_sequences\nimport queue\nfrom threading import Thread\nimport random\nimport glob\n\n\nimport sys\nsys.path.insert(0, '/home/vydana/ASR_Transformer/ASR_TransV1')\n\nimport CMVN\nfrom CMVN import CMVN\nfrom Load_sp_model import Load_sp_models\n\nfrom random import sample\nimport copy\n\n#===============================================\n#----------------------------------------------- \nclass DataLoader(object):\n def __init__(self,files, max_batch_label_len, max_batch_len, max_feat_len, max_label_len, Word_model, Char_model, queue_size=500,apply_cmvn=1):\n\n self.files = files\n if self.files==[]:\n print('input to data generator in empty')\n exit(0)\n\n self.text_file_dict ={} \n\n self.Word_model = Word_model\n self.Char_model = Char_model\n\n self.expand_n=40\n self.resamp_batches_start_flag=False\n self.max_batch_len = max_batch_len\n self.max_batch_label_len = max_batch_label_len\n self.max_batch_label_len = self.max_batch_label_len\n\n\n self.max_feat_len = max_feat_len\n self.max_label_len = max_label_len\n self.apply_cmvn = apply_cmvn\n \n\n self.queue = queue.Queue(queue_size)\n self.Word_padding_id = self.Word_model.__len__()\n self.Char_padding_id = self.Char_model.__len__()\n self.word_space_token = self.Word_model.EncodeAsIds('_____')[0]\n \n self._thread = Thread(target=self.__load_data)\n self._thread.daemon = True\n #print(self._thread)\n self._thread.start()\n #--------------------------------\n #self._threads = []\n #for i in range(10):\n # self._thread = Thread(target=self.__load_data)\n # self._thread.daemon = True\n # self._thread.start()\n # self._threads.append(self._thread)\n #for process in self._threads:\n # process.join()\n #-------------------------------\n \n def __reset_the_data_holders(self):\n self.batch_data=[]\n self.batch_labels=[]\n self.batch_names=[]\n self.batch_length=[]\n self.batch_label_length=[]\n \n self.batch_word_labels=[]\n self.batch_word_label_length=[]\n \n self.batch_word_text=[]\n self.batch_word_text_length=[]\n\n self.batch_word_text_tgt=[]\n self.batch_word_text_length_tgt=[]\n \n #---------------------------------------------------------------------\n def make_batching_dict(self):\n #----------------------------------------\n smp_feat = pad_sequences(self.batch_data,maxlen=max(self.batch_length),dtype='float32',padding='post',value=0.0)\n smp_char_labels = pad_sequences(self.batch_word_labels,maxlen=max(self.batch_word_label_length),dtype='int32',padding='post',value=self.Word_padding_id)\n smp_word_label = pad_sequences(self.batch_word_labels,maxlen=max(self.batch_word_label_length),dtype='int32',padding='post',value=self.Word_padding_id)\n\n smp_trans_text = pad_sequences(self.batch_word_text_tgt, maxlen=max(self.batch_word_text_length_tgt),dtype=object,padding='post',value=' ')\n smp_trans_text_tgt = pad_sequences(self.batch_word_text_tgt, maxlen=max(self.batch_word_text_length_tgt),dtype=object,padding='post',value=' ')\n\n batch_data_dict={\n 'smp_names':self.batch_names,\n 'smp_feat':smp_feat,\n 'smp_char_label':smp_char_labels,\n 'smp_word_label':smp_word_label,\n 'smp_trans_text':smp_trans_text,\n 'smp_trans_text_tgt': smp_trans_text_tgt,\n 'smp_feat_length':self.batch_length,\n 'smp_label_length':self.batch_label_length,\n 'smp_word_label_length':self.batch_word_label_length,\n 'smp_word_text_length':self.batch_word_text_length,\n 'smp_word_text_length_tgt':self.batch_word_text_length_tgt}\n return batch_data_dict\n\n\n #------------------------------------------\n def __load_data(self):\n ###initilize the lists\n while True:\n self.__reset_the_data_holders()\n max_batch_label_len = self.max_batch_label_len\n random.shuffle(self.files)\n for inp_file in self.files:\n #print(inp_file)\n with open(inp_file) as f:\n for line in f:\n #print('---->',line)\n #============================\n split_lines=line.split(' @@@@ ')\n #============================\n ##assigining\n key = split_lines[0]\n #print(key)\n scp_path = split_lines[1]\n #============================\n ### Char labels\n #============================\n src_text = split_lines[3] \n src_tok = split_lines[4] \n\n #=============================\n if len(src_tok)>0:\n src_tok = [int(i) for i in src_tok.split(' ')] \n else:\n continue;\n #============================ \n word_tokens = src_tok\n\n\t\t\t##*********#########*************\n\t\t\t###CHanged to match the JOint training\n word_labels = src_text.split(' ')\n\n\t\t\t##*********#########*************\n\n #--------------------------\n if not (scp_path == 'None'):\n mat = kaldi_io.read_mat(scp_path)\n if self.apply_cmvn:\n mat = CMVN(mat)\n #print(key,mat.shape)\n else:\n mat=np.zeros((100,249),dtype=np.float32)\n #--------------------------\n\n if (mat.shape[0]>self.max_feat_len) or (mat.shape[0]<len(word_labels)) or (len(word_tokens) > self.max_label_len):\n #print(\"key,mat.shape,char_labels,char_tokens,self.max_label_len\",key,mat.shape,len(char_labels),len(char_tokens),self.max_label_len)\n continue;\n #==============================================================\n ###Add to the list\n ####\n self.batch_data.append(mat) \n self.batch_names.append(key)\n self.batch_length.append(mat.shape[0])\n \n self.batch_word_labels.append(word_tokens)\n self.batch_word_label_length.append(len(word_tokens))\n\n self.batch_word_text_tgt.append(word_labels)\n self.batch_word_text_length_tgt.append(len(word_labels)) \n #==============================================================\n #==============================================================\n # total_labels_in_batch is used to keep track of the length of sequences in a batch, just make sure it does not overflow the gpu\n ##in general lstm training we are not using this because self.max_batch_len will be around 10-20 and self.max_batch_label_len is usuvally set very high \n expect_len_of_features = max(max(self.batch_length,default=0), mat.shape[0])\n expect_len_of_labels = max(max(self.batch_word_label_length, default=0),len(word_tokens))\n total_labels_in_batch = (expect_len_of_features + expect_len_of_labels)*(len(self.batch_names)+4)\n \n #print(expect_len_of_features,expect_len_of_labels,total_labels_in_batch,self.max_batch_label_len)\n ###check if ypu have enough labels output and if you have then push to the queue\n ###else keep adding them to the lists\n ###if the queue has less than three batches and the next batch is not ready yet fill them with reampleed batches\n \n if (total_labels_in_batch > self.max_batch_label_len) or (len(self.batch_data)==self.max_batch_len):\n batch_data_dict = self.make_batching_dict()\n self.queue.put(batch_data_dict) \n self.__reset_the_data_holders()\n \n if len(self.batch_names)>0:\n ### Collect the left over stuff as the last batch\n #-----------------------------------------------\n batch_data_dict = self.make_batching_dict()\n self.queue.put(batch_data_dict)\n\n def next(self, timeout=3000):\n return self.queue.get(block=True, timeout=timeout)\n#===================================================================\n\n\n# sys.path.insert(0,'/mnt/matylda3/vydana/HOW2_EXP/KAT_Attention')\n# import Attention_arg\n# from Attention_arg import parser\n# args = parser.parse_args()\n# print(args)\n\n\n# ###debugger\n# args.Word_model_path='/mnt/matylda3/vydana/benchmarking_datasets/Timit/models/Timit_PHSEQ_100/Timit_PHSEQ__100__word.model'\n# args.Char_model_path='/mnt/matylda3/vydana/benchmarking_datasets/Timit/models/Timit_PHSEQ_100/Timit_PHSEQ__100__word.model'\n# args.text_file = '/mnt/matylda3/vydana/benchmarking_datasets/Timit/All_text'\n# args.train_path='/mnt/matylda3/vydana/benchmarking_datasets/Timit/scp_files/train/'\n# args.dev_path='/mnt/matylda3/vydana/benchmarking_datasets/Timit/scp_files/dev/'\n# Word_model=Load_sp_models(args.Word_model_path)\n# Char_model=Load_sp_models(args.Char_model_path)\n# train_gen = DataLoader(files=glob.glob(args.train_path + \"*\"),max_batch_label_len=20000, max_batch_len=4,max_feat_len=2000,max_label_len=200,Word_model=Word_model,Char_model=Char_model,text_file=args.text_file)\n# for i in range(10):\n# B1 = train_gen.next()\n# print(B1.keys())\n# #breakpoint()\n\n",
"import numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport math\nimport pdb \nfrom torch.autograd import Variable\n\nimport torch.nn as nn\nimport torch\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nimport torch.nn.utils.weight_norm as wtnrm\n\nimport numpy as np\nfrom keras.preprocessing.sequence import pad_sequences\n\n\n\n#from Trans_utilities import get_attn_key_pad_mask, get_subsequent_mask, get_attn_pad_mask_encoder, get_attn_pad_mask,get_encoder_non_pad_mask, get_decoder_non_pad_mask,pad_list\n#from Trans_Decoder import Decoder\n#from Trans_Encoder import Encoder\n#from Trans_conv_layers import Conv_2D_Layers\n\n\nimport sys\nimport kaldi_io\nsys.path.insert(0,'/mnt/matylda3/vydana/HOW2_EXP/ASR_Transformer/ASR_TransV1')\nfrom CMVN import CMVN\nfrom utils__ import weights_init,count_parameters\n\n\n#--------------------------------------------------------------------------\nclass Transformer(nn.Module):\n \"\"\"An encoder-decoder framework only includes attention. \"\"\"\n def __init__(self,args):\n super(Transformer, self).__init__() \n self.label_smoothing = args.label_smoothing\n\n if args.conv_model=='Conv_2D_4Layers':\n print(\"using conv layers\",)\n from Trans_conv_layers import Conv_2D_4Layers as Conv_2D_Layers\n else:\n from Trans_conv_layers import Conv_2D_Layers\n #print(\"using conv layers\",Conv_2D_Layers)\n \n if args.encoder=='conformer':\n from Conf_Encoder import Encoder \n else:\n from Trans_Encoder import Encoder\n\n #----------------------------------\n if args.decoder=='decoder':\n print(\"Nothing special\")\n else:\n from Trans_Decoder_ctc import Decoder\n #----------------------------------\n\n self.conv_layers = Conv_2D_Layers(args=args)\n self.encoder = Encoder(args=args,MT_flag=False)\n self.decoder = Decoder(args=args)\n #----------------------------------\n def forward(self, padded_input,padded_target):\n ###conv layers\n conv_padded_input=self.conv_layers(padded_input)\n \n #General Transformer ASR model\n encoder_padded_outputs, *_ = self.encoder(conv_padded_input)\n\n output_dict = self.decoder(padded_target, encoder_padded_outputs)\n return output_dict\n #=============================================================================================================\n #=============================================================================================================\n def predict(self, feat_path,args):\n print(\"went to the decoder loop\")\n with torch.no_grad():\n #breakpoint()\n #### read feature matrices \n smp_feat=kaldi_io.read_mat(feat_path)\n smp_feat=CMVN(smp_feat)\n input=torch.from_numpy(smp_feat) \n input = Variable(input.float(), requires_grad=False).double().float()\n input=input.unsqueeze(0)\n \n #print(\"args.LM_model,args.Am_weight,args.beam,args.gamma,args.len_pen\",args.LM_model,args.Am_weight,args.beam,args.gamma,args.len_pen)\n ###conv layers\n conv_padded_input=self.conv_layers(input)\n\n #General Transformer ASR model\n encoder_padded_outputs, *_ = self.encoder(conv_padded_input)\n nbest_hyps,scoring_list = self.decoder.recognize_batch_beam_autoreg_LM_multi_hyp(encoder_padded_outputs,args.beam,args.Am_weight,args.gamma,args.LM_model,args.len_pen,args)\n #===================================================================================\n beam_len = nbest_hyps.size(0)\n hyp = {'score': 0.0, 'yseq': None,'state': None, 'alpha_i_list':None, 'Text_seq':None}\n #===============================================\n Output_dict=[]\n for I in range(beam_len): \n new_hyp={}\n new_hyp['yseq'] = nbest_hyps[I]\n new_hyp['score'] = scoring_list[I].sum()\n new_hyp['Text_seq'] = self.decoder.get_charecters_for_sequences(nbest_hyps[I].unsqueeze(0))\n\n new_hyp['state'] = hyp['state']\n new_hyp['alpha_i_list'] = hyp['alpha_i_list']\n\n Output_dict.append(new_hyp)\n return Output_dict\n #----------------------------------------------------------------\n#=============================================================================================================\n#=============================================================================================================\n#-------------------------------------------------------------------------------------------------------------\n\n#=============================================================================================================\n#-------------------------------------------------------------------------------------------------------------\nclass TransformerOptimizer(object):\n \"\"\"A simple wrapper class for learning rate scheduling\"\"\"\n\n def __init__(self, optimizer, k, d_model, step_num, warmup_steps=4000):\n self.optimizer = optimizer\n self.k = k\n \n #present_lr=[param_group['lr'] for param_group in self.optimizer.param_groups]\n self.init_lr = d_model ** (-0.5)\n self.warmup_steps = warmup_steps\n self.step_num = step_num\n self.reduction_factor=1\n def zero_grad(self):\n self.optimizer.zero_grad()\n\n def step(self):\n self._update_lr()\n self.optimizer.step()\n\n def _update_lr(self):\n self.step_num += 1\n lr = self.k * self.init_lr * min(self.step_num ** (-0.5),\n self.step_num * (self.warmup_steps ** (-1.5)))\n \n #print(lr,self.step_num ** (-0.5),self.step_num * self.warmup_steps ** (-1.5),self.reduction_factor)\n\n for param_group in self.optimizer.param_groups:\n param_group['lr'] = lr\n\n def load_state_dict(self, state_dict):\n self.optimizer.load_state_dict(state_dict)\n\n def state_dict(self):\n return self.optimizer.state_dict()\n\n def set_k(self, k):\n self.k = k\n\n def set_step_num(self, step_num):\n self.step_num=step_num\n\n def reduce_learning_rate(self, k):\n self.reduction_factor = self.reduction_factor*k\n #print(self.reduction_factor)\n \n def print_lr(self):\n present_lr=[param_group['lr'] for param_group in self.optimizer.param_groups]\n return present_lr[0]\n\n\n#==============================================================================================================\n#---------------------------------------------------------------------------------------------------------------\n"
] | [
[
"matplotlib.pyplot.switch_backend",
"numpy.zeros",
"numpy.argmin",
"matplotlib.pyplot.viridis"
],
[
"numpy.zeros"
],
[
"torch.no_grad",
"torch.from_numpy"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
petabricks/petabricks | [
"b498b93880b0c4ac3924ddb82cff2e6541e60bd1"
] | [
"scripts/misc/csvavg.py"
] | [
"#!/usr/bin/python\n\nimport csv, sys\nimport numpy\n\ndialect = csv.excel_tab\nmulti_file=len(sys.argv[1:])>1\n\ninputs = map(lambda x: csv.DictReader(x, dialect=dialect), map(open, sys.argv[1:]))\nrows = map(csv.DictReader.next, inputs)\nheaders = inputs[0].fieldnames\noutput = csv.writer(sys.stdout, dialect=dialect)\noutput.writerow(headers)\n\ndef mkavg(k):\n try:\n values = map(lambda x: float(x[k]), rows)\n return \"%f +- %f\" % (numpy.mean(values), numpy.std(values))\n except:\n return 'error'\n\nif multi_file:\n try:\n while True:\n output.writerow(map(mkavg, headers))\n rows = map(csv.DictReader.next, inputs)\n except StopIteration:\n pass\nelse:\n counts=dict()\n sums=dict()\n for k in headers:\n try:\n sums[k]=float(rows[0][k])\n except:\n sums[k]=0.0\n counts[k]=1.0\n for row in inputs[0]:\n for k in headers:\n try:\n sums[k]+=float(row[k])\n except:\n sums[k]=0.0\n counts[k]+=1.0\n\n\n output.writerow(map(lambda k: sums[k]/counts[k], headers))\n\n\n"
] | [
[
"numpy.std",
"numpy.mean"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
MaxCodeXTC/panel | [
"1d34e8ce4734eec10f8e64af11c5a3fecaab5bac"
] | [
"panel/widgets/indicators.py"
] | [
"import os\nimport sys\n\nfrom math import pi\n\nimport numpy as np\nimport param\n\nfrom bokeh.plotting import figure\nfrom bokeh.models import ColumnDataSource\nfrom tqdm.asyncio import tqdm as _tqdm\n\nfrom ..layout import Column, Row\nfrom ..models import (\n HTML, Progress as _BkProgress, TrendIndicator as _BkTrendIndicator\n)\nfrom ..pane.markup import Str\nfrom ..reactive import SyncableData\nfrom ..util import escape, updating\nfrom ..viewable import Viewable\nfrom .base import Widget\n\nRED = \"#d9534f\"\nGREEN = \"#5cb85c\"\nBLUE = \"#428bca\"\n\nclass Indicator(Widget):\n \"\"\"\n Indicator is a baseclass for widgets which indicate some state.\n \"\"\"\n\n sizing_mode = param.ObjectSelector(default='fixed', objects=[\n 'fixed', 'stretch_width', 'stretch_height', 'stretch_both',\n 'scale_width', 'scale_height', 'scale_both', None])\n\n __abstract = True\n\n\nclass BooleanIndicator(Indicator):\n\n value = param.Boolean(default=False, doc=\"\"\"\n Whether the indicator is active or not.\"\"\")\n\n __abstract = True\n\n\nclass BooleanStatus(BooleanIndicator):\n\n color = param.ObjectSelector(default='dark', objects=[\n 'primary', 'secondary', 'success', 'info', 'danger', 'warning',\n 'light', 'dark'])\n\n height = param.Integer(default=20, doc=\"\"\"\n height of the circle.\"\"\")\n\n width = param.Integer(default=20, doc=\"\"\"\n Width of the circle.\"\"\")\n\n value = param.Boolean(default=False, doc=\"\"\"\n Whether the indicator is active or not.\"\"\")\n\n _rename = {'color': None}\n\n _source_transforms = {'value': None}\n\n _widget_type = HTML\n\n def _process_param_change(self, msg):\n msg = super()._process_param_change(msg)\n value = msg.pop('value', None)\n if value is None:\n return msg\n msg['css_classes'] = ['dot-filled', self.color] if value else ['dot']\n return msg\n\n\nclass LoadingSpinner(BooleanIndicator):\n\n bgcolor = param.ObjectSelector(default='light', objects=['dark', 'light'])\n\n color = param.ObjectSelector(default='dark', objects=[\n 'primary', 'secondary', 'success', 'info', 'danger', 'warning',\n 'light', 'dark'])\n\n height = param.Integer(default=125, doc=\"\"\"\n height of the circle.\"\"\")\n\n width = param.Integer(default=125, doc=\"\"\"\n Width of the circle.\"\"\")\n\n value = param.Boolean(default=False, doc=\"\"\"\n Whether the indicator is active or not.\"\"\")\n\n _rename = {'color': None, 'bgcolor': None}\n\n _source_transforms = {'value': None}\n\n _widget_type = HTML\n\n def _process_param_change(self, msg):\n msg = super()._process_param_change(msg)\n value = msg.pop('value', None)\n if value is None:\n return msg\n color_cls = f'{self.color}-{self.bgcolor}'\n msg['css_classes'] = ['loader', 'spin', color_cls] if value else ['loader', self.bgcolor]\n return msg\n\n\nclass ValueIndicator(Indicator):\n \"\"\"\n A ValueIndicator provides a visual representation for a numeric\n value.\n \"\"\"\n\n value = param.Number(default=None, allow_None=True)\n\n __abstract = True\n\n\nclass Progress(ValueIndicator):\n\n active = param.Boolean(default=True, doc=\"\"\"\n If no value is set the active property toggles animation of the\n progress bar on and off.\"\"\")\n\n bar_color = param.ObjectSelector(default='success', objects=[\n 'primary', 'secondary', 'success', 'info', 'danger', 'warning',\n 'light', 'dark'])\n\n max = param.Integer(default=100, doc=\"The maximum value of the progress bar.\")\n\n value = param.Integer(default=None, bounds=(-1, None), doc=\"\"\"\n The current value of the progress bar. If set to None the progress\n bar will be indeterminate and animate depending on the active\n parameter. If set to -1 the progress bar will be empty.\"\"\")\n\n _rename = {'name': None}\n\n _widget_type = _BkProgress\n\n @param.depends('max', watch=True)\n def _update_value_bounds(self):\n self.param.value.bounds = (-1, self.max)\n\n def __init__(self,**params):\n super().__init__(**params)\n self._update_value_bounds()\n\n\nclass Number(ValueIndicator):\n \"\"\"\n The Number indicator renders the value as text optionally colored\n according to the color thresholds.\n \"\"\"\n\n default_color = param.String(default='black')\n\n colors = param.List(default=None)\n\n format = param.String(default='{value}')\n\n font_size = param.String(default='54pt')\n\n nan_format = param.String(default='-', doc=\"\"\"\n How to format nan values.\"\"\")\n\n title_size = param.String(default='18pt')\n\n _rename = {}\n\n _source_transforms = {\n 'value': None, 'colors': None, 'default_color': None,\n 'font_size': None, 'format': None, 'nan_format': None,\n 'title_size': None\n }\n\n _widget_type = HTML\n\n def _process_param_change(self, msg):\n msg = super()._process_param_change(msg)\n font_size = msg.pop('font_size', self.font_size)\n title_font_size = msg.pop('title_size', self.title_size)\n name = msg.pop('name', self.name)\n format = msg.pop('format', self.format)\n value = msg.pop('value', self.value)\n nan_format = msg.pop('nan_format', self.nan_format)\n color = msg.pop('default_color', self.default_color)\n colors = msg.pop('colors', self.colors)\n for val, clr in (colors or [])[::-1]:\n if value is not None and value <= val:\n color = clr\n if value is None:\n value = float('nan')\n value = format.format(value=value).replace('nan', nan_format)\n text = f'<div style=\"font-size: {font_size}; color: {color}\">{value}</div>'\n if self.name:\n title_font_size = msg.pop('title_size', self.title_size)\n text = f'<div style=\"font-size: {title_font_size}; color: {color}\">{name}</div>\\n{text}'\n msg['text'] = escape(text)\n return msg\n\n\nclass String(ValueIndicator):\n \"\"\"\n The String indicator renders a string with a title.\n \"\"\"\n\n default_color = param.String(default='black')\n\n font_size = param.String(default='54pt')\n\n title_size = param.String(default='18pt')\n\n value = param.String(default=None, allow_None=True)\n\n _rename = {}\n\n _source_transforms = {\n 'value': None, 'default_color': None, 'font_size': None, 'title_size': None\n }\n\n _widget_type = HTML\n\n def _process_param_change(self, msg):\n msg = super()._process_param_change(msg)\n font_size = msg.pop('font_size', self.font_size)\n title_font_size = msg.pop('title_size', self.title_size)\n name = msg.pop('name', self.name)\n value = msg.pop('value', self.value)\n color = msg.pop('default_color', self.default_color)\n text = f'<div style=\"font-size: {font_size}; color: {color}\">{value}</div>'\n if self.name:\n title_font_size = msg.pop('title_size', self.title_size)\n text = f'<div style=\"font-size: {title_font_size}; color: {color}\">{name}</div>\\n{text}'\n msg['text'] = escape(text)\n return msg\n\n\nclass Gauge(ValueIndicator):\n \"\"\"\n A Gauge represents a value in some range as a position on\n speedometer or gauge. It is similar to a Dial but visually a lot\n busier.\n \"\"\"\n\n annulus_width = param.Integer(default=10, doc=\"\"\"\n Width of the gauge annulus.\"\"\")\n\n bounds = param.Range(default=(0, 100), doc=\"\"\"\n The upper and lower bound of the dial.\"\"\")\n\n colors = param.List(default=None, doc=\"\"\"\n Color thresholds for the Gauge, specified as a list of tuples\n of the fractional threshold and the color to switch to.\"\"\")\n\n custom_opts = param.Dict(doc=\"\"\"\n Additional options to pass to the ECharts Gauge definition.\"\"\")\n\n height = param.Integer(default=300, bounds=(0, None))\n\n end_angle = param.Number(default=-45, doc=\"\"\"\n Angle at which the gauge ends.\"\"\")\n\n format = param.String(default='{value}%', doc=\"\"\"\n Formatting string for the value indicator.\"\"\")\n\n num_splits = param.Integer(default=10, doc=\"\"\"\n Number of splits along the gauge.\"\"\")\n\n show_ticks = param.Boolean(default=True, doc=\"\"\"\n Whether to show ticks along the dials.\"\"\")\n\n show_labels = param.Boolean(default=True, doc=\"\"\"\n Whether to show tick labels along the dials.\"\"\")\n\n start_angle = param.Number(default=225, doc=\"\"\"\n Angle at which the gauge starts.\"\"\")\n\n tooltip_format = param.String(default='{b} : {c}%', doc=\"\"\"\n Formatting string for the hover tooltip.\"\"\")\n\n title_size = param.Integer(default=18, doc=\"\"\"\n Size of title font.\"\"\")\n\n value = param.Number(default=25, doc=\"\"\"\n Value to indicate on the gauge a value within the declared bounds.\"\"\")\n\n width = param.Integer(default=300, bounds=(0, None))\n\n _rename = {}\n\n _source_transforms = {\n 'annulus_width': None, 'bounds': None, 'colors': None,\n 'custom_opts': None, 'end_angle': None, 'format': None,\n 'num_splits': None, 'show_ticks': None, 'show_labels': None,\n 'start_angle': None, 'tooltip_format': None, 'title_size': None,\n 'value': None\n }\n\n @property\n def _widget_type(self):\n if 'panel.models.echarts' not in sys.modules:\n from ..models.echarts import ECharts\n else:\n ECharts = getattr(sys.modules['panel.models.echarts'], 'ECharts')\n return ECharts\n\n def __init__(self, **params):\n super().__init__(**params)\n self._update_value_bounds()\n\n @param.depends('bounds', watch=True)\n def _update_value_bounds(self):\n self.param.value.bounds = self.bounds\n\n def _process_param_change(self, msg):\n msg = super()._process_param_change(msg)\n vmin, vmax = msg.pop('bounds', self.bounds)\n msg['data'] = {\n 'tooltip': {\n 'formatter': msg.pop('tooltip_format', self.tooltip_format)\n },\n 'series': [{\n 'name': 'Gauge',\n 'type': 'gauge',\n 'axisTick': {'show': msg.pop('show_ticks', self.show_ticks)},\n 'axisLabel': {'show': msg.pop('show_labels', self.show_labels)},\n 'title': {'fontWeight': 'bold', 'fontSize': msg.pop('title_size', self.title_size)},\n 'splitLine': {'show': True},\n 'radius': '100%',\n 'detail': {'formatter': msg.pop('format', self.format)},\n 'min': vmin,\n 'max': vmax,\n 'startAngle': msg.pop('start_angle', self.start_angle),\n 'endAngle': msg.pop('end_angle', self.end_angle),\n 'splitNumber': msg.pop('num_splits', self.num_splits),\n 'data': [{'value': msg.pop('value', self.value), 'name': self.name}],\n 'axisLine': {\n 'lineStyle': {\n 'width': msg.pop('annulus_width', self.annulus_width),\n }\n }\n }]\n }\n colors = msg.pop('colors', self.colors)\n if colors:\n msg['data']['series'][0]['axisLine']['lineStyle']['color'] = colors\n custom_opts = msg.pop('custom_opts', self.custom_opts)\n if custom_opts:\n gauge = msg['data']['series'][0]\n for k, v in custom_opts.items():\n if k not in gauge or not isinstance(gauge[k], dict):\n gauge[k] = v\n else:\n gauge[k].update(v)\n return msg\n\n\nclass Dial(ValueIndicator):\n \"\"\"\n A Dial represents a value in some range as a position on an\n annular dial. It is similar to a Gauge but more minimal visually.\n \"\"\"\n\n annulus_width = param.Number(default=0.2, doc=\"\"\"\n Width of the radial annulus as a fraction of the total.\"\"\")\n\n bounds = param.Range(default=(0, 100), doc=\"\"\"\n The upper and lower bound of the dial.\"\"\")\n\n colors = param.List(default=None, doc=\"\"\"\n Color thresholds for the Dial, specified as a list of tuples\n of the fractional threshold and the color to switch to.\"\"\")\n\n default_color = param.String(default='lightblue', doc=\"\"\"\n Color of the radial annulus if not color thresholds are supplied.\"\"\")\n\n end_angle = param.Number(default=25, doc=\"\"\"\n Angle at which the dial ends.\"\"\")\n\n format = param.String(default='{value}%', doc=\"\"\"\n Formatting string for the value indicator and lower/upper bounds.\"\"\")\n\n height = param.Integer(default=250, bounds=(1, None))\n\n nan_format = param.String(default='-', doc=\"\"\"\n How to format nan values.\"\"\")\n\n needle_color = param.String(default='black', doc=\"\"\"\n Color of the Dial needle.\"\"\")\n\n needle_width = param.Number(default=0.1, doc=\"\"\"\n Radial width of the needle.\"\"\")\n\n start_angle = param.Number(default=-205, doc=\"\"\"\n Angle at which the dial starts.\"\"\")\n\n tick_size = param.String(default=None, doc=\"\"\"\n Font size of the Dial min/max labels.\"\"\")\n\n title_size = param.String(default=None, doc=\"\"\"\n Font size of the Dial title.\"\"\")\n\n unfilled_color = param.String(default='whitesmoke', doc=\"\"\"\n Color of the unfilled region of the Dial.\"\"\")\n\n value_size = param.String(default=None, doc=\"\"\"\n Font size of the Dial value label.\"\"\")\n\n value = param.Number(default=25, allow_None=True, doc=\"\"\"\n Value to indicate on the dial a value within the declared bounds.\"\"\")\n\n width = param.Integer(default=250, bounds=(1, None))\n\n _manual_params = [\n 'value', 'start_angle', 'end_angle', 'bounds',\n 'annulus_width', 'format', 'background', 'needle_width',\n 'tick_size', 'title_size', 'value_size', 'colors',\n 'default_color', 'unfilled_color', 'height',\n 'width', 'nan_format', 'needle_color'\n ]\n\n _data_params = _manual_params\n\n _rename = {'background': 'background_fill_color'}\n\n def __init__(self, **params):\n super().__init__(**params)\n self._update_value_bounds()\n\n @param.depends('bounds', watch=True)\n def _update_value_bounds(self):\n self.param.value.bounds = self.bounds\n\n def _get_data(self):\n vmin, vmax = self.bounds\n value = self.value\n if value is None:\n value = float('nan')\n fraction = (value-vmin)/(vmax-vmin)\n start = (np.radians(360-self.start_angle) - pi % (2*pi)) + pi\n end = (np.radians(360-self.end_angle) - pi % (2*pi)) + pi\n distance = (abs(end-start) % (pi*2))\n if end>start:\n distance = (pi*2)-distance\n radial_fraction = distance*fraction\n angle = start if np.isnan(fraction) else (start-radial_fraction)\n inner_radius = 1-self.annulus_width\n\n color = self.default_color\n for val, clr in (self.colors or [])[::-1]:\n if fraction <= val:\n color = clr\n\n annulus_data = {\n 'starts': np.array([start, angle]),\n 'ends' : np.array([angle, end]),\n 'color': [color, self.unfilled_color],\n 'radius': np.array([inner_radius, inner_radius])\n }\n\n x0s, y0s, x1s, y1s, clrs = [], [], [], [], []\n colors = self.colors or []\n for (val, _), (_, clr) in zip(colors[:-1], colors[1:]):\n tangle = start-(distance*val)\n if (vmin + val * (vmax-vmin)) <= value:\n continue\n x0, y0 = np.cos(tangle), np.sin(tangle)\n x1, y1 = x0*inner_radius, y0*inner_radius\n x0s.append(x0)\n y0s.append(y0)\n x1s.append(x1)\n y1s.append(y1)\n clrs.append(clr)\n\n threshold_data = {\n 'x0': x0s, 'y0': y0s, 'x1': x1s, 'y1': y1s, 'color': clrs\n }\n\n center_radius = 1-self.annulus_width/2.\n x, y = np.cos(angle)*center_radius, np.sin(angle)*center_radius\n needle_start = pi+angle-(self.needle_width/2.)\n needle_end = pi+angle+(self.needle_width/2.)\n needle_data = {\n 'x': np.array([x]),\n 'y': np.array([y]),\n 'start': np.array([needle_start]),\n 'end': np.array([needle_end]),\n 'radius': np.array([center_radius])\n }\n\n value = self.format.format(value=value).replace('nan', self.nan_format)\n min_value = self.format.format(value=vmin)\n max_value = self.format.format(value=vmax)\n tminx, tminy = np.cos(start)*center_radius, np.sin(start)*center_radius\n tmaxx, tmaxy = np.cos(end)*center_radius, np.sin(end)*center_radius\n tmin_angle, tmax_angle = start+pi, end+pi % pi\n scale = (self.height/400)\n title_size = self.title_size if self.title_size else '%spt' % (scale*32)\n value_size = self.value_size if self.value_size else '%spt' % (scale*48)\n tick_size = self.tick_size if self.tick_size else '%spt' % (scale*18)\n\n text_data= {\n 'x': np.array([0, 0, tminx, tmaxx]),\n 'y': np.array([-.2, -.5, tminy, tmaxy]),\n 'text': [self.name, value, min_value, max_value],\n 'rot': np.array([0, 0, tmin_angle, tmax_angle]),\n 'size': [title_size, value_size, tick_size, tick_size],\n 'color': ['black', color, 'black', 'black']\n }\n return annulus_data, needle_data, threshold_data, text_data\n\n def _get_model(self, doc, root=None, parent=None, comm=None):\n params = self._process_param_change(self._init_params())\n model = figure(\n x_range=(-1,1), y_range=(-1,1), tools=[],\n outline_line_color=None, toolbar_location=None,\n width=self.width, height=self.height, **params\n )\n model.xaxis.visible = False\n model.yaxis.visible = False\n model.grid.visible = False\n\n annulus, needle, threshold, text = self._get_data()\n\n # Draw annulus\n annulus_source = ColumnDataSource(data=annulus, name='annulus_source')\n model.annular_wedge(\n x=0, y=0, inner_radius='radius', outer_radius=1, start_angle='starts',\n end_angle='ends', line_color='gray', color='color', direction='clock',\n source=annulus_source\n )\n\n # Draw needle\n needle_source = ColumnDataSource(data=needle, name='needle_source')\n model.wedge(\n x='x', y='y', radius='radius', start_angle='start', end_angle='end',\n fill_color=self.needle_color, line_color=self.needle_color,\n source=needle_source, name='needle_renderer'\n )\n\n # Draw thresholds\n threshold_source = ColumnDataSource(data=threshold, name='threshold_source')\n model.segment(\n x0='x0', x1='x1', y0='y0', y1='y1', line_color='color', source=threshold_source,\n line_width=2\n )\n\n # Draw labels\n text_source = ColumnDataSource(data=text, name='label_source')\n model.text(\n x='x', y='y', text='text', font_size='size', text_align='center',\n text_color='color', source=text_source, text_baseline='top',\n angle='rot'\n )\n\n if root is None:\n root = model\n self._models[root.ref['id']] = (model, parent)\n return model\n\n def _manual_update(self, events, model, doc, root, parent, comm):\n update_data = False\n for event in events:\n if event.name in ('width', 'height'):\n model.update(**{event.name: event.new})\n if event.name in self._data_params:\n update_data = True\n elif event.name == 'needle_color':\n needle_r = model.select(name='needle_renderer')\n needle_r.glyph.line_color = event.new\n needle_r.glyph.fill_color = event.new\n if not update_data:\n return\n annulus, needle, threshold, labels = self._get_data()\n model.select(name='annulus_source').data.update(annulus)\n model.select(name='needle_source').data.update(needle)\n model.select(name='threshold_source').data.update(threshold)\n model.select(name='label_source').data.update(labels)\n\n\nclass Trend(SyncableData, Indicator):\n \"\"\"\n The Trend indicator enables the user to display a Dashboard KPI Card.\n\n The card can be layout out as:\n\n * a column (text and plot on top of each other) or\n * a row (text and plot after each other)\n\n The text section is responsive and resizes on window resize.\n \"\"\"\n\n data = param.Parameter(doc=\"\"\"\n The plot data declared as a dictionary of arrays or a DataFrame.\"\"\")\n\n layout = param.ObjectSelector(default=\"column\", objects=[\"column\", \"row\"])\n\n plot_x = param.String(default=\"x\", doc=\"\"\"\n The name of the key in the plot_data to use on the x-axis.\"\"\")\n\n plot_y = param.String(default=\"y\", doc=\"\"\"\n The name of the key in the plot_data to use on the y-axis.\"\"\")\n\n plot_color = param.String(default=BLUE, doc=\"\"\"\n The color to use in the plot.\"\"\")\n\n plot_type = param.ObjectSelector(default=\"bar\", objects=[\"line\", \"step\", \"area\", \"bar\"], doc=\"\"\"\n The plot type to render the plot data as.\"\"\")\n\n pos_color = param.String(GREEN, doc=\"\"\"\n The color used to indicate a positive change.\"\"\")\n\n neg_color = param.String(RED, doc=\"\"\"\n The color used to indicate a negative change.\"\"\")\n\n title = param.String(doc=\"\"\"The title or a short description of the card\"\"\")\n\n value = param.Parameter(default='auto', doc=\"\"\"\n The primary value to be displayed.\"\"\")\n\n value_change = param.Parameter(default='auto', doc=\"\"\"\n A secondary value. For example the change in percent.\"\"\")\n\n _data_params = ['data']\n\n _manual_params = ['data']\n\n _rename = {'data': None, 'selection': None}\n\n _widget_type = _BkTrendIndicator\n\n def _get_data(self):\n if self.data is None:\n return None, {self.plot_x: [], self.plot_y: []}\n elif isinstance(self.data, dict):\n return self.data, self.data\n return self.data, ColumnDataSource.from_df(self.data)\n\n def _init_params(self):\n props = super()._init_params()\n self._processed, self._data = self._get_data()\n props['source'] = ColumnDataSource(data=self._data)\n return props\n\n def _trigger_auto_values(self):\n trigger = []\n if self.value == 'auto':\n trigger.append('value')\n if self.value_change == 'auto':\n trigger.append('value_change')\n if trigger:\n self.param.trigger(*trigger)\n\n @updating\n def _stream(self, stream, rollover=None):\n self._trigger_auto_values()\n super()._stream(stream, rollover)\n\n def _update_cds(self, *events):\n super()._update_cds(*events)\n self._trigger_auto_values()\n\n def _process_param_change(self, msg):\n msg = super()._process_param_change(msg)\n ys = self._data.get(self.plot_y, [])\n if 'value' in msg and msg['value'] == 'auto':\n if len(ys):\n msg['value'] = ys[-1]\n else:\n msg['value'] = 0\n if 'value_change' in msg and msg['value_change'] == 'auto':\n if len(ys) > 1:\n y1, y2 = self._data.get(self.plot_y)[-2:]\n msg['value_change'] = 0 if y1 == 0 else (y2/y1 - 1)\n else:\n msg['value_change'] = 0\n return msg\n\n\nMARGIN = {\n \"text_pane\": {\"column\": (5, 10, 0, 10), \"row\": (0, 10, 0, 10)},\n \"progress\": {\"column\": (0, 10, 5, 10), \"row\": (12, 10, 0, 10)},\n}\n\n\n\nclass ptqdm(_tqdm):\n\n def __init__(self, *args, **kwargs):\n self._indicator = kwargs.pop('indicator')\n super().__init__(*args, **kwargs)\n\n def display(self, msg=None, pos=None, bar_style=None):\n super().display(msg, pos)\n style = self._indicator.text_pane.style or {}\n color = self.colour or 'black'\n self._indicator.text_pane.style = dict(style, color=color)\n if self.total is not None and self.n is not None:\n self._indicator.max = int(self.total) # Can be numpy.int64\n self._indicator.value = int(self.n)\n self._indicator.text = self._to_text(**self.format_dict)\n return True\n\n def _to_text(self, n, total, **kwargs):\n return self.format_meter(n, total, **{**kwargs, \"ncols\": 0})\n\n def close(self):\n super().close()\n if not self.leave:\n self._indicator.reset()\n return _tqdm\n\n\nclass Tqdm(Indicator):\n\n layout = param.ClassSelector(class_=(Column, Row), precedence=-1, constant=True, doc=\"\"\"\n The layout for the text and progress indicator.\"\"\",)\n\n max = param.Integer(default=100, doc=\"\"\"\n The maximum value of the progress indicator.\"\"\")\n\n progress = param.ClassSelector(class_=Progress, precedence=-1, doc=\"\"\"\n The Progress indicator used to display the progress.\"\"\",)\n\n text = param.String(default='', doc=\"\"\"\n The current tqdm style progress text.\"\"\")\n\n text_pane = param.ClassSelector(class_=Str, precedence=-1, doc=\"\"\"\n The pane to display the text to.\"\"\")\n\n value = param.Integer(default=0, bounds=(0, None), doc=\"\"\"\n The current value of the progress bar. If set to None the progress\n bar will be indeterminate and animate depending on the active\n parameter.\"\"\")\n\n margin = param.Parameter(default=0, doc=\"\"\"\n Allows to create additional space around the component. May\n be specified as a two-tuple of the form (vertical, horizontal)\n or a four-tuple (top, right, bottom, left).\"\"\")\n\n width = param.Integer(default=400, bounds=(0, None), doc=\"\"\"\n The width of the component (in pixels). This can be either\n fixed or preferred width, depending on width sizing policy.\"\"\")\n\n write_to_console = param.Boolean(default=False, doc=\"\"\"\n Whether or not to also write to the console.\"\"\")\n\n _layouts = {Row: 'row', Column: 'column'}\n\n _rename = {'value': None, 'min': None, 'max': None, 'text': None}\n\n def __init__(self, **params):\n layout = params.pop('layout', 'column')\n layout = self._layouts.get(layout, layout) \n if \"text_pane\" not in params:\n sizing_mode = 'stretch_width' if layout == 'column' else 'fixed'\n params[\"text_pane\"] = Str(\n None, min_height=20, min_width=280, sizing_mode=sizing_mode,\n margin=MARGIN[\"text_pane\"][layout],\n )\n if \"progress\" not in params:\n params[\"progress\"] = Progress(\n active=False,\n sizing_mode=\"stretch_width\",\n min_width=100,\n margin=MARGIN[\"progress\"][layout],\n )\n\n layout_params = {p: params.get(p, getattr(self, p)) for p in Viewable.param}\n if layout == 'row' or layout is Row:\n params['layout'] = Row(\n params['progress'], params['text_pane'], **layout_params\n )\n else:\n params['layout'] = Column(\n params['text_pane'], params['progress'], **layout_params\n )\n super().__init__(**params)\n\n self.param.watch(self._update_layout, list(Viewable.param))\n\n if self.value == 0:\n # Hack: to give progress the initial look\n self.progress.max = 100000\n self.progress.value = 1\n else:\n self.progress.max = self.max\n self.progress.value = self.value\n self.text_pane.object = self.text\n\n def _get_model(self, doc, root=None, parent=None, comm=None):\n model = self.layout._get_model(doc, root, parent, comm)\n if root is None:\n root = model\n self._models[root.ref['id']] = (model, parent)\n return model\n\n def _cleanup(self, root):\n super()._cleanup(root)\n self.layout._cleanup(root)\n\n def _update_layout(self, *events):\n self.layout.param.set_param(**{event.name: event.new for event in events})\n\n @param.depends(\"text\", watch=True)\n def _update_text(self):\n if self.text_pane:\n self.text_pane.object = self.text\n\n @param.depends(\"value\", watch=True)\n def _update_value(self):\n if self.progress:\n self.progress.value = self.value\n\n @param.depends(\"max\", watch=True)\n def _update_max(self):\n if self.progress:\n self.progress.max = self.max\n\n def __call__(self, *args, **kwargs):\n kwargs['indicator'] = self\n if not self.write_to_console:\n f = open(os.devnull, 'w')\n kwargs['file'] = f\n return ptqdm(*args, **kwargs)\n\n __call__.__doc__ = ptqdm.__doc__\n\n def pandas(self, *args, **kwargs):\n kwargs['indicator'] = self\n if not self.write_to_console and 'file' not in kwargs:\n f = open(os.devnull, 'w')\n kwargs['file'] = f\n return ptqdm.pandas(*args, **kwargs)\n\n def reset(self):\n \"\"\"Resets the parameters\"\"\"\n self.value = self.param.value.default\n self.text = self.param.text.default\n"
] | [
[
"numpy.radians",
"numpy.isnan",
"numpy.cos",
"numpy.sin",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
rubenlozanoaht3m/DataDogm | [
"cd605e8072cca31e8418830c3300657ae2fa5b16",
"cd605e8072cca31e8418830c3300657ae2fa5b16",
"cd605e8072cca31e8418830c3300657ae2fa5b16",
"cd605e8072cca31e8418830c3300657ae2fa5b16"
] | [
"examples/pipeline/hetero_ftl/pipeline-hetero-ftl-with-predict.py",
"examples/benchmark_quality/hetero_sbt/gbdt-binary.py",
"python/federatedml/util/test/early_stop_test.py",
"python/federatedml/feature/feature_selection/correlation_filter.py"
] | [
"#\n# Copyright 2019 The FATE Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport argparse\n\nfrom pipeline.backend.pipeline import PipeLine\nfrom pipeline.component import DataTransform\nfrom pipeline.component.hetero_ftl import HeteroFTL\nfrom pipeline.component.reader import Reader\nfrom pipeline.interface.data import Data\nfrom tensorflow.keras import optimizers\nfrom tensorflow.keras.layers import Dense\nfrom tensorflow.keras import initializers\nfrom pipeline.component.evaluation import Evaluation\n\nfrom pipeline.utils.tools import load_job_config\n\n\ndef main(config=\"../../config.yaml\", namespace=\"\"):\n # obtain config\n if isinstance(config, str):\n config = load_job_config(config)\n parties = config.parties\n guest = parties.guest[0]\n host = parties.host[0]\n\n guest_train_data = {\"name\": \"nus_wide_guest\", \"namespace\": f\"experiment{namespace}\"}\n host_train_data = {\"name\": \"nus_wide_host\", \"namespace\": f\"experiment{namespace}\"}\n pipeline = PipeLine().set_initiator(role='guest', party_id=guest).set_roles(guest=guest, host=host)\n\n reader_0 = Reader(name=\"reader_0\")\n reader_0.get_party_instance(role='guest', party_id=guest).component_param(table=guest_train_data)\n reader_0.get_party_instance(role='host', party_id=host).component_param(table=host_train_data)\n\n data_transform_0 = DataTransform(name=\"data_transform_0\")\n data_transform_0.get_party_instance(\n role='guest', party_id=guest).component_param(\n with_label=True, output_format=\"dense\")\n data_transform_0.get_party_instance(role='host', party_id=host).component_param(with_label=False)\n\n hetero_ftl_0 = HeteroFTL(name='hetero_ftl_0',\n epochs=10, alpha=1, batch_size=-1, mode='plain')\n\n hetero_ftl_0.add_nn_layer(Dense(units=32, activation='sigmoid',\n kernel_initializer=initializers.RandomNormal(stddev=1.0),\n bias_initializer=initializers.Zeros()))\n\n hetero_ftl_0.compile(optimizer=optimizers.Adam(lr=0.01))\n evaluation_0 = Evaluation(name='evaluation_0', eval_type=\"binary\")\n\n pipeline.add_component(reader_0)\n pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data))\n pipeline.add_component(hetero_ftl_0, data=Data(train_data=data_transform_0.output.data))\n pipeline.add_component(evaluation_0, data=Data(data=hetero_ftl_0.output.data))\n\n pipeline.compile()\n\n pipeline.fit()\n\n # predict\n # deploy required components\n pipeline.deploy_component([data_transform_0, hetero_ftl_0])\n\n predict_pipeline = PipeLine()\n # add data reader onto predict pipeline\n predict_pipeline.add_component(reader_0)\n # add selected components from train pipeline onto predict pipeline\n # specify data source\n predict_pipeline.add_component(\n pipeline, data=Data(\n predict_input={\n pipeline.data_transform_0.input.data: reader_0.output.data}))\n # run predict model\n predict_pipeline.predict()\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(\"PIPELINE DEMO\")\n parser.add_argument(\"-config\", type=str,\n help=\"config file\")\n args = parser.parse_args()\n if args.config is not None:\n main(args.config)\n else:\n main()\n",
"#\n# Copyright 2019 The FATE Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport argparse\nimport os\nimport pandas as pd\nfrom sklearn.metrics import roc_auc_score, precision_score, accuracy_score, recall_score\nfrom sklearn.ensemble import GradientBoostingClassifier\nfrom pipeline.utils.tools import JobConfig\n\n\ndef main(config=\"../../config.yaml\", param=\"./gbdt_config_binary.yaml\"):\n # obtain config\n\n if isinstance(param, str):\n param = JobConfig.load_from_file(param)\n\n data_guest = param[\"data_guest\"]\n data_host = param[\"data_host\"]\n idx = param[\"idx\"]\n label_name = param[\"label_name\"]\n\n print('config is {}'.format(config))\n if isinstance(config, str):\n config = JobConfig.load_from_file(config)\n data_base_dir = config[\"data_base_dir\"]\n print('data base dir is', data_base_dir)\n else:\n data_base_dir = config.data_base_dir\n\n # prepare data\n df_guest = pd.read_csv(os.path.join(data_base_dir, data_guest), index_col=idx)\n df_host = pd.read_csv(os.path.join(data_base_dir, data_host), index_col=idx)\n df = df_guest.join(df_host, rsuffix='host')\n y = df[label_name]\n X = df.drop(label_name, axis=1)\n clf = GradientBoostingClassifier(random_state=0, n_estimators=120 if 'epsilon' in data_guest else 50)\n clf.fit(X, y)\n y_prob = clf.predict(X)\n\n try:\n auc_score = roc_auc_score(y, y_prob)\n except BaseException:\n print(f\"no auc score available\")\n return\n\n result = {\"auc\": auc_score}\n print(result)\n return {}, result\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(\"BENCHMARK-QUALITY SKLEARN JOB\")\n parser.add_argument(\"-param\", type=str,\n help=\"config file for params\")\n args = parser.parse_args()\n if args.config is not None:\n main(args.param)\n main()\n",
"import unittest\nfrom federatedml.callbacks.validation_strategy import ValidationStrategy\nimport numpy as np\nfrom federatedml.util import consts\nfrom federatedml.param.evaluation_param import EvaluateParam\n\n\nclass TestValidationStrategy(unittest.TestCase):\n\n def setUp(self) -> None:\n self.role = 'guest'\n self.mode = 'hetero'\n self.early_stopping_round = 1\n self.use_first_metric_only = False\n\n @staticmethod\n def generate_fake_eval_metrics(total_rounds, decrease_round, metrics=['ks', 'auc'], start_val=0.8):\n assert total_rounds >= decrease_round\n eval_result_list = []\n start_decrease_round = total_rounds - decrease_round\n for i in range(total_rounds):\n if i < start_decrease_round:\n start_val += 0.01\n else:\n start_val -= 0.01\n\n eval_dict = {metric: start_val for metric in metrics}\n eval_result_list.append(eval_dict)\n return eval_result_list\n\n def test_early_stopping(self):\n\n test_rounds = [i for i in range(10, 100)]\n decrease_rounds = [np.random.randint(i) for i in test_rounds]\n\n for test_round, decrease_round in zip(test_rounds, decrease_rounds):\n\n eval_dicts = self.generate_fake_eval_metrics(test_round, decrease_round, )\n self.early_stopping_round = decrease_round - 1\n\n if self.early_stopping_round <= 0:\n continue\n\n validation_strategy = ValidationStrategy(\n self.role,\n self.mode,\n early_stopping_rounds=self.early_stopping_round,\n use_first_metric_only=self.use_first_metric_only)\n\n for idx, eval_res in enumerate(eval_dicts):\n validation_strategy.performance_recorder.update(eval_res)\n check_rs = validation_strategy.check_early_stopping()\n if check_rs:\n self.assertTrue((test_round - decrease_round + self.early_stopping_round - 1) == idx)\n print('test checking passed')\n break\n\n def test_use_first_metric_only(self):\n\n def evaluate(param, early_stopping_rounds, use_first_metric_only):\n\n eval_type = param.eval_type\n metric_list = param.metrics\n first_metric = None\n\n if early_stopping_rounds and use_first_metric_only and len(metric_list) != 0:\n\n single_metric_list = None\n if eval_type == consts.BINARY:\n single_metric_list = consts.BINARY_SINGLE_VALUE_METRIC\n elif eval_type == consts.REGRESSION:\n single_metric_list = consts.REGRESSION_SINGLE_VALUE_METRICS\n elif eval_type == consts.MULTY:\n single_metric_list = consts.MULTI_SINGLE_VALUE_METRIC\n\n for metric in metric_list:\n if metric in single_metric_list:\n first_metric = metric\n break\n\n return first_metric\n\n param_0 = EvaluateParam(metrics=['roc', 'lift', 'ks', 'auc', 'gain'], eval_type='binary')\n param_1 = EvaluateParam(metrics=['acc', 'precision', 'auc'], eval_type='binary')\n param_2 = EvaluateParam(metrics=['acc', 'precision', 'gain', 'recall', 'lift'], eval_type='binary')\n param_3 = EvaluateParam(metrics=['acc', 'precision', 'gain', 'auc', 'recall'], eval_type='multi')\n\n print(evaluate(param_0, 10, True))\n print(evaluate(param_1, 10, True))\n print(evaluate(param_2, 10, True))\n print(evaluate(param_3, 10, True))\n\n def test_best_iter(self):\n\n test_rounds = [i for i in range(10, 100)]\n decrease_rounds = [np.random.randint(i) for i in test_rounds]\n\n for test_round, decrease_round in zip(test_rounds, decrease_rounds):\n\n eval_dicts = self.generate_fake_eval_metrics(test_round, decrease_round, )\n self.early_stopping_round = decrease_round - 1\n\n if self.early_stopping_round <= 0:\n continue\n\n validation_strategy = ValidationStrategy(self.role, self.mode,\n early_stopping_rounds=self.early_stopping_round,\n use_first_metric_only=self.use_first_metric_only)\n\n for idx, eval_res in enumerate(eval_dicts):\n validation_strategy.performance_recorder.update(eval_res)\n check_rs = validation_strategy.check_early_stopping()\n if check_rs:\n best_perform = validation_strategy.performance_recorder.cur_best_performance\n self.assertDictEqual(best_perform, eval_dicts[test_round - decrease_round - 1])\n print('best iter checking passed')\n break\n\n def test_homo_checking(self):\n try:\n validation_strategy = ValidationStrategy(self.role, mode='homo',\n early_stopping_rounds=1)\n except Exception as e:\n # throwing an error is expected\n print(e)\n print('error detected {}, homo checking passed'.format(e))\n\n\nif __name__ == '__main__':\n tvs = TestValidationStrategy()\n tvs.setUp()\n tvs.test_use_first_metric_only()\n # tvs.test_early_stopping()\n # tvs.test_best_iter()\n # tvs.test_homo_checking() # expect checking error !!!\n",
"#\n# Copyright 2019 The FATE Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\n\nfrom federatedml.feature.feature_selection.iso_model_filter import FederatedIsoModelFilter\nfrom federatedml.param.feature_selection_param import CorrelationFilterParam\nfrom federatedml.protobuf.generated import feature_selection_meta_pb2\nfrom federatedml.util.component_properties import ComponentProperties\nfrom federatedml.util import LOGGER\n\n\nclass CorrelationFilter(FederatedIsoModelFilter):\n \"\"\"\n filter the columns if all values in this feature is the same\n\n \"\"\"\n\n def __init__(self, filter_param: CorrelationFilterParam, external_model, correlation_model,\n role, cpp: ComponentProperties):\n super().__init__(filter_param, iso_model=external_model, role=role, cpp=cpp)\n self.correlation_model = correlation_model\n self.host_party_id = int(self.correlation_model.parties[1][1:-1].split(\",\")[1])\n self.take_high = False\n\n def _parse_filter_param(self, filter_param: CorrelationFilterParam):\n self.sort_metric = filter_param.sort_metric\n self.threshold = filter_param.threshold\n self.select_federated = filter_param.select_federated\n\n def get_meta_obj(self):\n result = feature_selection_meta_pb2.FilterMeta(\n metrics=\"correlation\",\n filter_type=\"Sort and filter by threshold\",\n threshold=self.threshold,\n select_federated=self.select_federated\n )\n return result\n\n def _guest_fit(self, suffix):\n sorted_idx, col_names = self.__sort_features()\n filtered_name, host_filtered_name = self.__select_corr(sorted_idx, col_names)\n # LOGGER.debug(f\"select_col_name: {self.selection_properties.select_col_names}\")\n for name in self.selection_properties.select_col_names:\n if name not in filtered_name:\n self.selection_properties.add_left_col_name(name)\n self.selection_properties.add_feature_value(name, 0.0)\n else:\n self.selection_properties.add_feature_value(name, filtered_name[name])\n\n if self.select_federated:\n host_id = self.cpp.host_party_idlist.index(self.host_party_id)\n host_prop = self.host_selection_properties[host_id]\n for name in host_prop.select_col_names:\n if name not in host_filtered_name:\n host_prop.add_left_col_name(name)\n host_prop.add_feature_value(name, 0.0)\n else:\n host_prop.add_feature_value(name, host_filtered_name[name])\n self._keep_one_feature(pick_high=self.take_high, selection_properties=host_prop,\n feature_values=[])\n if self.select_federated:\n self.sync_obj.sync_select_results(self.host_selection_properties, suffix=suffix)\n\n def __select_corr(self, sorted_idx, col_names):\n guest_col_names = self.correlation_model.col_names\n host_col_names = self.correlation_model.host_col_names\n filtered_name = {}\n host_filtered_name = {}\n for idx in sorted_idx:\n party, name = col_names[idx]\n if name in filtered_name:\n continue\n if party == 'guest':\n row = guest_col_names.index(name)\n corr = self.correlation_model.local_corr[row, :]\n filtered_name = self.__get_filtered_column(corr, filtered_name, guest_col_names, name, True)\n corr = self.correlation_model.corr[row, :]\n host_filtered_name = self.__get_filtered_column(corr, host_filtered_name,\n host_col_names, name, False)\n # LOGGER.debug(f\"guest_col_name: {name}, filtered_name: {filtered_name}, \"\n # f\"host_filtered_name: {host_filtered_name}\")\n else:\n column = host_col_names.index(name)\n corr = self.correlation_model.corr[:, column]\n filtered_name = self.__get_filtered_column(corr, filtered_name, guest_col_names, name, False)\n # LOGGER.debug(f\"host_col_name: {name}, filtered_name: {filtered_name}, \"\n # f\"host_filtered_name: {host_filtered_name}\")\n return filtered_name, host_filtered_name\n\n def __get_filtered_column(self, corr, filtered_name, all_names, curt_name, is_local=True):\n for idx, v in enumerate(corr):\n if np.abs(v) > self.threshold:\n _name = all_names[idx]\n if is_local and _name == curt_name:\n continue\n if _name in filtered_name:\n continue\n else:\n filtered_name[_name] = v\n return filtered_name\n\n def __sort_features(self):\n metric_info = self.iso_model.get_metric_info(self.sort_metric)\n all_feature_values = metric_info.get_partial_values(self.selection_properties.select_col_names)\n col_names = [(\"guest\", x) for x in self.selection_properties.select_col_names]\n\n if self.select_federated:\n assert len(self.correlation_model.parties) == 2, \"Correlation Model should contain host info\" \\\n \"for select_federated in correlation_filter\"\n LOGGER.debug(f\"correlation_parties: {self.correlation_model.parties}\")\n host_id = self.cpp.host_party_idlist.index(self.host_party_id)\n host_property = self.host_selection_properties[host_id]\n all_feature_values.extend(metric_info.get_partial_values(\n host_property.select_col_names, self.host_party_id\n ))\n col_names.extend([(self.host_party_id, x) for x in host_property.select_col_names])\n sorted_idx = np.argsort(all_feature_values)[::-1]\n return sorted_idx, col_names\n"
] | [
[
"tensorflow.keras.initializers.Zeros",
"tensorflow.keras.initializers.RandomNormal",
"tensorflow.keras.optimizers.Adam"
],
[
"sklearn.metrics.roc_auc_score",
"sklearn.ensemble.GradientBoostingClassifier"
],
[
"numpy.random.randint"
],
[
"numpy.argsort",
"numpy.abs"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"2.6",
"2.4",
"2.3",
"2.5",
"2.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Surya97/MWDB-project | [
"508562913624416415cd143cef9b7689066037ef"
] | [
"Phase3/Feedback.py"
] | [
"import os\nimport sys\nfrom pathlib import Path\nsys.path.insert(1, '../Phase1')\nsys.path.insert(2, '../Phase2')\nimport misc\nimport numpy as np\n\nclass Feedback:\n def __init__(self):\n self.task5_result = None\n self.reduced_pickle_file_folder = os.path.join(Path(os.path.dirname(__file__)).parent,\n 'Phase2', 'pickle_files')\n self.set_task5_result()\n self.dataset = list()\n self.X = None\n self.y = None\n self.dataset=list()\n\n def generate_input_data_set(self, rorir_map, dataset_features):\n for image_id, label in rorir_map.items():\n image_id = os.path.basename(image_id)\n if label==0 or label==1:\n feat = dataset_features[image_id].tolist()\n feat+=[label]\n self.dataset.append(np.array(feat))\n return\n\n def set_task5_result(self):\n self.task5_result = misc.load_from_pickle(self.reduced_pickle_file_folder, 'Task_5_Result')\n\n def generate_input_data(self, rorir_map, dataset_features):\n X = []\n y = []\n\n for image_id, label in rorir_map.items():\n image_id = os.path.basename(image_id)\n if label == 0 or label == 1:\n X.append(dataset_features[image_id])\n y+=[rorir_map[image_id]]\n X = np.array(X)\n y = np.array(y)\n self.X=X\n self.y=y\n\n return\n\n def euclidean_distance(self, dist1, dist2):\n return (sum([(a - b) ** 2 for a, b in zip(dist1, dist2)])) ** 0.5\n\n def save_result(self, result):\n reduced_pickle_file_folder = os.path.join(Path(os.path.dirname(__file__)).parent,\n 'Phase2', 'pickle_files')\n misc.save2pickle(result, reduced_pickle_file_folder, 'Task_5_Result')"
] | [
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
XiaoshengLin/shadow3 | [
"d007ae59a2038db4f9275f7bb026bd1b11549e5f"
] | [
"tests/test_lens.py"
] | [
"import Shadow\nimport numpy\n\n# using mac oasys, for plots\n# from srxraylib.plot.gol import set_qt\n# set_qt()\n\n#\n# runs an absorber of 10 um thickness for a source at 10 keV\n#\n#\n\ndef run_example_lens(user_units_to_cm=1.0,npoint=5000,use_prerefl=0):\n #\n # Python script to run shadow3. Created automatically with ShadowTools.make_python_script_from_list().\n #\n #\n # initialize shadow3 source (oe0) and beam\n #\n beam = Shadow.Beam()\n oe0 = Shadow.Source()\n oe1 = Shadow.OE()\n oe2 = Shadow.OE()\n\n #\n # Define variables. See meaning of variables in:\n # https://raw.githubusercontent.com/srio/shadow3/master/docs/source.nml\n # https://raw.githubusercontent.com/srio/shadow3/master/docs/oe.nml\n #\n\n oe0.FDISTR = 3\n oe0.FSOURCE_DEPTH = 0\n oe0.F_PHOT = 0\n oe0.HDIV1 = 1.0\n oe0.HDIV2 = 1.0\n oe0.ISTAR1 = 0\n oe0.NPOINT = 500000\n oe0.PH1 = 8000.0\n oe0.SIGDIX = 2.49999994e-05\n oe0.SIGDIZ = 8.00000089e-06\n oe0.SIGMAX = 0.0122999996 / user_units_to_cm\n oe0.SIGMAZ = 0.000699999975 / user_units_to_cm\n oe0.VDIV1 = 1.0\n oe0.VDIV2 = 1.0\n\n\n\n oe1.CCC = numpy.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -1.0, 0.0])\n oe1.FCYL = 1\n if use_prerefl:\n oe1.F_R_IND = 2\n oe1.R_ATTENUATION_OBJ = 0.0\n oe1.R_IND_OBJ = 1.0\n oe1.FILE_R_IND_IMA = b'prerefl.dat'\n else:\n oe1.F_R_IND = 0\n oe1.R_IND_OBJ = 1.0\n oe1.R_IND_IMA = 0.9999923264754235\n oe1.R_ATTENUATION_OBJ = 0.0\n oe1.R_ATTENUATION_IMA = 150.727\n\n oe1.FMIRR = 10\n oe1.FWRITE = 3\n oe1.F_EXT = 1\n oe1.F_REFRAC = 1\n oe1.T_INCIDENCE = 0.0\n oe1.T_REFLECTION = 180.0\n oe1.T_SOURCE = 4700.9 / user_units_to_cm\n oe1.T_IMAGE = 0.01 / user_units_to_cm\n oe1.DUMMY = user_units_to_cm\n\n oe2.CCC = numpy.array([0.0, 292.67523*user_units_to_cm**2, 0.0045013279*user_units_to_cm**2, 0.0, 0.0, 0.0, 0.0, 0.0, 0.13418387*user_units_to_cm, 0.0])\n oe2.FCYL = 1\n if use_prerefl:\n oe2.F_R_IND = 1\n oe2.FILE_R_IND_OBJ = b'prerefl.dat'\n oe2.R_ATTENUATION_IMA = 0.0\n oe2.R_IND_IMA = 1.0\n else:\n oe2.F_R_IND = 0\n oe2.R_IND_OBJ = 0.9999923264754235\n oe2.R_IND_IMA = 1.0\n oe2.R_ATTENUATION_OBJ = 150.727\n oe2.R_ATTENUATION_IMA = 0.0\n\n oe2.FMIRR = 10\n oe2.FWRITE = 3\n oe2.F_EXT = 1\n oe2.F_REFRAC = 1\n oe2.T_INCIDENCE = 0.0\n oe2.T_REFLECTION = 180.0\n oe2.T_SOURCE = 0.0 / user_units_to_cm\n oe2.T_IMAGE = 30.065 / user_units_to_cm\n oe2.DUMMY = user_units_to_cm\n\n\n\n beam.genSource(oe0)\n\n\n #\n #run optical element 1\n #\n print(\" Running optical element: %d\"%(1))\n\n beam.traceOE(oe1,1)\n\n\n #\n #run optical element 2\n #\n print(\" Running optical element: %d\"%(2))\n\n beam.traceOE(oe2,2)\n\n\n # print(oe0.sourcinfo())\n # print(oe1.mirinfo())\n # print(oe2.mirinfo())\n\n return beam\n\n\ndef test_lens():\n\n\n #\n # inputs\n #\n cm_or_mm = 1 # 0=using cm, 1=using mm\n use_prerefl = 0 # 0=No, 1=Yes\n\n\n if cm_or_mm == 0:\n user_units_to_cm = 1.0\n title = \"Units are cm\"\n elif cm_or_mm == 1:\n user_units_to_cm = 0.1\n title = \"Units are mm\"\n else:\n print(\"No way...\")\n\n\n #\n # run prerefl\n #\n if use_prerefl:\n import xraylib\n symbol = \"Si\"\n density = xraylib.ElementDensity(xraylib.SymbolToAtomicNumber(symbol))\n Shadow.ShadowPreprocessorsXraylib.prerefl(interactive=0,SYMBOL=symbol,DENSITY=density,FILE=\"prerefl.dat\",E_MIN=5000.0,E_MAX=15000.0,E_STEP=100.0)\n\n #\n # run SHADOW\n #\n beam = run_example_lens(user_units_to_cm=user_units_to_cm)\n\n\n tkt = Shadow.ShadowTools.plotxy(beam,3,6,ref=0,nolost=1,nbins=301,title=\"Z,Z' \"+title)\n print(\"Intensity: %f \"%tkt[\"intensity\"])\n print(\"Number of rays: %d, number of GOOD rays: %d \"%(beam.nrays(nolost=0),beam.nrays(nolost=1)))\n\n\n #numpy.testing.assert_almost_equal(sh100,xrl100,2)\n\n\nif __name__ == \"__main__\":\n test_lens()\n"
] | [
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
dibyajyotidash/https-github.com-kylejusticemagnuson-pyti | [
"08532970f9d2b163f1223599e3ac80f6c51533e4"
] | [
"tests/test_average_true_range_percent.py"
] | [
"from __future__ import absolute_import\nimport unittest\nimport numpy as np\n\nfrom tests.sample_data import SampleData\nfrom pyti import average_true_range_percent\n\n\nclass TestAverageTrueRangePercent(unittest.TestCase):\n def setUp(self):\n \"\"\"Create data to use for testing.\"\"\"\n self.close_data = SampleData().get_sample_close_data()\n\n self.atr_period_6_expected = [np.nan, np.nan, np.nan, np.nan, np.nan,\n np.nan, np.nan, np.nan, np.nan, np.nan, 1.189235578986088,\n 1.1523554512220486, 1.1278525931922065, 1.1546224092640733,\n 1.1660380962839427, 1.465357027466913, 1.7831881894142803,\n 2.3561329806184581, 2.6708206943371162, 3.2466120755686263,\n 3.3784546239726194, 3.3564621491521369, 3.2791301980772869,\n 3.2778865256303997, 3.2875442760137483, 3.2810676552694984,\n 3.0012226151326331, 2.7233687488098011, 2.5062178027349966,\n 2.2774730211707057, 2.1306723573292059, 2.0231111698118602,\n 2.4639048069082961, 2.7153248878733027, 2.9415900735797162,\n 3.457810754140358, 4.0649377298167551, 4.6505410623216603,\n 4.8377005165939497, 4.7010401069556149, 4.5393599025684406,\n 4.3416370097985153, 4.1909513300536148, 4.2334214723046726,\n 4.2994054993189517, 4.244940888039114, 3.9739765293353395,\n 3.7984682769968288, 3.5821945386433534, 3.3670297979975179,\n 3.0716656116914933, 2.8662794746678979, 3.0289151976072608,\n 2.9969860158644486, 2.9760460695914741, 2.9289691288143112,\n 2.8058612079021295, 2.531556736800797, 2.4252616931651314,\n 2.2944282121480746, 2.1964244646895756, 2.1062390474088564,\n 2.0476395013091233, 1.7748361482743773, 1.558061265928161,\n 1.4856536290363038, 1.4497927574913438, 1.4352358669002241,\n 1.4299189209362686, 1.4620245560453282, 1.5102324721906708,\n 1.6037560819721852, 1.7746556607866535, 1.9035211913074188,\n 2.0074893237351557, 2.0029061884391339, 1.9371230450535861,\n 1.8548689401186171, 1.8355003791530897, 1.8003331288038178,\n 1.8931540501005137, 1.9806126301955329, 2.0822871750835494,\n 2.1587399768435973, 2.1858863683758751, 2.1992145124735707,\n 2.2042274600601361, 1.9903770888121171, 1.7884145439862129,\n 1.6114041799566228, 1.4484765868823961, 1.3246773786986321,\n 1.2742050031825125, 1.2954614666198452, 1.3205653492681662,\n 1.2899663246832471, 1.2549300623614186, 1.197182571361552,\n 1.1407924958934879, 1.1008057151615109, 1.0691600335312013,\n 0.96093180817465618, 0.8664228618513774, 0.96576000827190556,\n 1.0376009347982038, 1.0764636750622629, 1.0975646487156931,\n 1.2540789775805865, 1.8437302592780713, 2.3966411426581957,\n 2.9608753508340118, 3.423129872873973, 3.5883658875288575,\n 3.2621236585354922, 2.8752781621886734, 2.5375908547247414,\n 2.2497857207671332, 2.4554221153770741, 2.5315780677888444,\n 2.7585119334766222, 2.8337261439349244, 2.9745745527293854,\n 2.9297633150649793, 3.1503331074467429, 3.212529671651343,\n 3.3456605064982394, 3.2905345939522999]\n\n self.atr_period_8_expected = [np.nan, np.nan, np.nan, np.nan, np.nan,\n np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan,\n 1.4180925319661521, 1.6202706653923087, 1.841626084216712,\n 2.3148854933979575, 2.6901735299560841, 3.2282310244121613,\n 3.563083750221574, 3.7982037524565646, 3.7546634785721498,\n 3.7323220510040827, 3.6914812566023922, 3.6379421910796386,\n 3.5587099948976539, 3.5146074512555128, 3.3287123687477114,\n 3.0890446215528855, 2.8876354582425368, 2.7197748421358332,\n 2.9957755812395579, 3.0918928706039539, 3.2034849639589456,\n 3.5276212120453141, 3.966956483762083, 4.4299504506994678,\n 4.9204122583250323, 5.2383912644056707, 5.1851996464032979,\n 4.932742857755783, 4.7691968174243575, 4.7104555366635612,\n 4.7209742731687623, 4.7303883735587853, 4.8062829601892965,\n 4.8770730470382375, 4.786932261959409, 4.5940745527992979,\n 4.2712108603228502, 4.0426131987685459, 4.0492483355737523,\n 4.0367369950162013, 4.0266065104420958, 3.8442225538659289,\n 3.7281167468319927, 3.5969454050028618, 3.5246336505629778,\n 3.284458875889694, 3.0905268522063674, 2.9085376948755512,\n 2.7680000175672808, 2.6252958389957679, 2.5159579023784877,\n 2.3306698200373246, 2.197229157817036, 2.1031142412351342,\n 2.0360589047455808, 2.0179156129481299, 1.9962963663924316,\n 2.010951331437755, 2.0924060195314591, 2.1470029836845206,\n 2.1917407916945137, 2.3469908853240153, 2.4897011782528256,\n 2.4061646855957806, 2.3351333133106342, 2.230276487867163,\n 2.2408826576806198, 2.2629816480494824, 2.3143268379407238,\n 2.3476629061550369, 2.3674721414695301, 2.374550948419341,\n 2.352947385951865, 2.351910270923812, 2.3499424768917128,\n 2.1608124958654997, 1.9893774678680414, 1.851281037063653,\n 1.7449273052921825, 1.6992086324724789, 1.7010190503124114,\n 1.7165471586824528, 1.6847862993283729, 1.634206765480277,\n 1.6018940222973894, 1.5378290457744153, 1.4602893936269465,\n 1.3946452189065861, 1.3308265877060355, 1.3548427859710599,\n 1.3588718015896448, 1.3628282348400853, 1.366672736225832,\n 1.4652930518912579, 1.9954910663104219, 2.4924846364624273,\n 2.9075743465183366, 3.261046890754919, 3.5616237891147984,\n 3.90750519846529, 4.0167530144468557, 3.7380084557614692,\n 3.4172149917994443, 3.4633450788377629, 3.4315003707559737,\n 3.5392138594642271, 3.5549856117004808, 3.6469399018473312,\n 3.8534409701377266, 4.2338496480817174, 4.1988778641402176,\n 4.2434190220063472, 4.1710674834485006]\n\n self.atr_period_10_expected = [np.nan, np.nan, np.nan, np.nan, np.nan,\n np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan,\n np.nan, np.nan, np.nan, np.nan, 2.7186399732723401, 3.1897417785103217,\n 3.5720003140004968, 3.8243907563025203, 3.9543745819821638,\n 4.14966762493796, 4.1567426181748255, 4.0952266131322199,\n 3.9774055776649679, 3.9005313973245412, 3.8740200478862201,\n 3.7608260441925863, 3.5854123216136311, 3.4311560353359063,\n 3.6153121546372664, 3.6024786252080254, 3.6288964245394193,\n 3.8135185789216313, 4.1288199078722743, 4.4946788398373076,\n 4.9058770233302322, 5.1603606395261004, 5.3529137258920727,\n 5.445088414102206, 5.3705628277712574, 5.2556944589666976,\n 5.2134596631940662, 5.1637259824115125, 5.1838624309051387,\n 5.2791302264027182, 5.4683909738033964, 5.5120358790272803,\n 5.2939445953362574, 5.1115798997200201, 5.0421404425151195,\n 4.9375845577616317, 4.8404418486438017, 4.7051409062810361,\n 4.6372061837056462, 4.432524791048948, 4.3857899092249255,\n 4.2640474851745038, 4.1058549092701808, 3.8557539531622109,\n 3.6529206682490591, 3.4440864860929081, 3.2769697024906264,\n 3.0924922423109011, 2.9510365216850634, 2.8116170875265167,\n 2.6711372457210696, 2.5919272122011878, 2.5577863035116755,\n 2.5192993537277801, 2.5383732677201931, 2.5348033661300149,\n 2.5286463785226361, 2.6292596767417997, 2.7167970175342209,\n 2.7550799044874084, 2.8604804641492096, 2.7345280164604793,\n 2.6868520965130984, 2.6599028030438059, 2.6726181141515055,\n 2.6688858692968398, 2.6540997015450611, 2.6291880295217065,\n 2.6002059836457319, 2.5941937083210851, 2.5700615442727237,\n 2.535308379517017, 2.5013101557717698, 2.339251886928428,\n 2.220771727829487, 2.1516737476710861, 2.1053990350980456,\n 2.0811011642229702, 2.0438008059797284, 2.0320916025384799,\n 2.0069284521724975, 1.9527756899172914, 1.9026101646939952,\n 1.8269161484400693, 1.7312268984763945, 1.7180069351756679,\n 1.6836641042431297, 1.6535040163297123, 1.6269651536833674,\n 1.6987531568883674, 2.136689374875739, 2.5300549519920104,\n 2.9404650888832795, 3.2991327451858363, 3.5341944123641578,\n 3.8180814412294013, 4.0387527580573863, 4.2834486458410144,\n 4.3625514336653879, 4.4307414948453614, 4.3447237418697808,\n 4.3526388061920898, 4.3079912365795749, 4.3466474073655306,\n 4.510844263106514, 4.8245544999792642, 4.9825704530372255,\n 5.1956243905869313, 5.0759722759771062]\n\n def test_atrp_period_6(self):\n period = 6\n atrp = average_true_range_percent.average_true_range_percent(self.close_data, period)\n np.testing.assert_array_equal(atrp, self.atr_period_6_expected)\n\n def test_atrp_period_8(self):\n period = 8\n atrp = average_true_range_percent.average_true_range_percent(self.close_data, period)\n np.testing.assert_array_equal(atrp, self.atr_period_8_expected)\n\n def test_atrp_period_10(self):\n period = 10\n atrp = average_true_range_percent.average_true_range_percent(self.close_data, period)\n np.testing.assert_array_equal(atrp, self.atr_period_10_expected)\n\n def test_atrp_invalid_period(self):\n period = 128\n with self.assertRaises(Exception) as cm:\n average_true_range_percent.average_true_range_percent(self.close_data, period)\n expected = \"Error: data_len < period\"\n self.assertEqual(str(cm.exception), expected)\n"
] | [
[
"numpy.testing.assert_array_equal"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
JaakkoAhola/LES-scripting | [
"1ebe99ce4292e58581bf50615cb8e0aa3d0c0af2"
] | [
"iceScripts/isdacProfileModifications_dry_above.py"
] | [
"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Aug 28 13:38:05 2018\n\n@author: aholaj\n\"\"\"\n\nimport numpy as np\n\nimport sound_in_profiles as sp\n\nimport PythonMethods as pm\n\nimport ModDataPros as mdp\n\nfrom copy import deepcopy\n\nfrom FindCloudBase import calc_rh_profile\nfrom ECLAIR_calcs import calc_rw\n\nimport time\n\nstart = time.time()\n\n\nisdac = sp.Profiles(\"sound_in3.5\",\"bin/case_isdac\")\n\nrh = isdac.getRH()\n\nq = isdac.getQ()\n\nz = isdac.getZ()\n\nt = isdac.getT()\n\np = isdac.getPS()\n\nu = isdac.getU()\n\nv = isdac.getV()\n\n\nosc=rh-100.*np.ones(np.shape(rh))\nab=osc[0];\nfor s in range(len(osc)):\n if np.sign(ab)*np.sign(osc[s]) == -1:\n print(s)\n ab = osc[s]\n \n\ndry0 = 236\ndryL =364\n#size = 40\nrh_target = 20\nrh_mod = deepcopy(rh)\n\nrh_mod[dry0:(dryL+1)] = rh_target\n#\n#\n\n#for kk in xrange(dry0,(dryL+1)):\n# \n# q_mod[kk] = 1000.*calc_rw( rh[kk], t[kk], p[kk] ) \n\n\nz[0] = isdac.getP0()\n#\nq_mod = deepcopy(q)\nq_mod = np.multiply(q_mod, 1.e-3)\nrh_temp = 100000.*np.ones(np.shape(rh))\n\n\n\nfor i in range(dry0,dryL+1): #size\n k = 1\n incr = 1. #1.e-6\n incr = incr*1.e-3\n etumerkki = 1.\n print('z', i)\n vanha = np.abs( rh_temp[i] - rh_mod[i] )\n switchCount = 0\n while (( vanha > 0.01 ) and (switchCount < 300)): #and (k < 10000)\n \n q_mod[i] = np.max(q_mod[i]-etumerkki*k*incr,0)\n \n rh_temp, b = calc_rh_profile( t, q_mod, z )\n uusi = np.abs( rh_temp[i] - rh_mod[i] )\n \n if uusi-vanha > 0:\n print('switch')\n etumerkki = -1*etumerkki\n incr = incr*1.e-1\n switchCount += 1\n incr = max(incr, 1.e-9) \n vanha = uusi \n k += 1\n print(uusi, rh_temp[i], rh_mod[i])\n \n \n print('q[i]', q[i], 'q_mod[i]', q_mod[i]*1.e+3)\n print(' ') \n\nrh_iter, ps_iter = calc_rh_profile( t, q_mod, z )\n\n\nq_mod = np.multiply(q_mod, 1.e3)\n#isdac.writeNewSoundIn(\"sound_in3.5_rh_dry_above_\"+str(rh_target), z, t, q_mod, u, v)\n \n\n#####################\n### plotting ########\n####################\nz[0] = 0.\n\nfig, ax = mdp.plottaa( rh, z, tit = 'Relative humidity', xl = 'rel. humid. [%]', yl = 'height [m]', markers=False, uusikuva = True, LEGEND = True, omavari = 'k' )\n\nfig, ax = mdp.plottaa( rh_mod[dry0-1:(dryL+1)+1], z[dry0-1:(dryL+1)+1], tit = 'Relative humidity dry-above', xl = 'rel. humid. [%]', yl = 'height [m]', markers=False, uusikuva = False, LEGEND = True, omavari = 'r' )\n\n#mdp.plottaa( rh_iter[dry0:(dryL+1)], z[dry0:(dryL+1)], tit = 'Relative humidity dry-above iterated', xl = 'rel. humid. [%]', yl = 'height [m]', markers=False, uusikuva = False, LEGEND = True, omavari = 'b' )\nxticks = list(range(0, 111, 10))\nxlabels = list(map(str, xticks))\nax.set_xticks( xticks )\nax.set_xticklabels( xlabels )\n\n####################\nmdp.plottaa( q, z, tit = 'Total water mixing ratio', xl = 'mix. rat. [g/kg]', yl = 'height [m]', markers=False, uusikuva = True, LEGEND = True, omavari = 'k' )\n\nmdp.plottaa( q_mod[dry0:(dryL+1)], z[dry0:(dryL+1)], tit = 'Total water mixing ratio dry-below', xl = 'mix. rat. [g/kg]', yl = 'height [m]', markers=False, uusikuva = False, LEGEND = True, omavari = 'b' )\n\n\n\nmdp.plot_lopetus()\n#\n\n\n\n\nend = time.time()\n\nprint('suoritusaika', end-start)"
] | [
[
"numpy.abs",
"numpy.multiply",
"numpy.sign",
"numpy.max",
"numpy.shape"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
steerapi/webdnn | [
"1df51cc094e5a528cfd3452c264905708eadb491",
"1df51cc094e5a528cfd3452c264905708eadb491",
"1df51cc094e5a528cfd3452c264905708eadb491",
"1df51cc094e5a528cfd3452c264905708eadb491"
] | [
"test/runtime/frontend_test/chainer_test/functions_test/connection_test/convolution_2d_function_test.py",
"test/runtime/operators_test/sigmoid_test.py",
"src/graph_transpiler/webdnn/graph/operators/tile.py",
"test/runtime/frontend_test/onnx_test/defs_test/reduction_test/reduce_mean_test.py"
] | [
"import chainer\nimport numpy as np\n\nfrom test.util import generate_kernel_test_case, wrap_template\nfrom webdnn.graph.placeholder import Placeholder\nfrom webdnn.frontend.chainer.converter import ChainerConverter\nfrom webdnn.frontend.chainer.placeholder_variable import PlaceholderVariable\n\n\n@wrap_template\ndef template(n=2, c_in=4, h_in=6, w_in=8, c_out=10, ksize=3, stride=1, pad=0, nobias=True, description=\"\"):\n link = chainer.links.Convolution2D(c_in, c_out, ksize=ksize, stride=stride, pad=pad, nobias=nobias)\n vx = chainer.Variable(np.random.rand(n, c_in, h_in, w_in).astype(np.float32))\n vy = link(vx)\n\n graph = ChainerConverter().convert([vx], [vy])\n\n x = graph.inputs[0]\n y = graph.outputs[0]\n\n generate_kernel_test_case(\n description=f\"[chainer] L.Convolution2D {description}\",\n graph=graph,\n inputs={x: vx.data},\n expected={y: vy.data},\n EPS=1e-2\n )\n\n\ndef test():\n template()\n\n\ndef test_nobias():\n template(nobias=True)\n\n\ndef test_nopadding():\n template(pad=0)\n\n\ndef test_irregular_kernel_size():\n template(ksize=(3, 4))\n\n\ndef test_irregular_stride_size():\n template(stride=(2, 3))\n\n\ndef test_irregular_padding_size1():\n template(pad=(1, 2))\n\n\ndef test_irregular_padding_size2():\n template(pad=2)\n\n\ndef test_irregular_padding_size3():\n template(pad=2, ksize=5)\n\n\ndef test_irregular_padding_size4():\n template(pad=(1, 0))\n\n\ndef test_irregular_size():\n template(ksize=(3, 5), stride=(2, 3), pad=(1, 3))\n\n\ndef test_special_size():\n # https://github.com/mil-tokyo/webdnn/issues/525\n # In case that the max position index (=n*c_in*h_in*w_in*ksize*ksize) > 1<<23\n template(n=1, c_in=1 << 6, h_in=1 << 7, w_in=1 << 7, c_out=3, ksize=(1 << 2) + 1, pad=1 << 1)\n\n\ndef test_with_placeholder():\n link = chainer.links.Convolution2D(None, 16, ksize=3, stride=1, pad=1)\n vx = chainer.Variable(np.random.rand(1, 3, 16, 16).astype(np.float32))\n vy = link(vx)\n\n N = Placeholder(label=\"N\")\n H = Placeholder(label=\"H\")\n W = Placeholder(label=\"W\")\n px = PlaceholderVariable([N, 3, H, W])\n py = link(px)\n\n graph = ChainerConverter().convert([px], [py])\n\n x = graph.inputs[0]\n y = graph.outputs[0]\n\n N.value = 1\n H.value = 16\n W.value = 16\n generate_kernel_test_case(\n description=f\"[chainer] L.Convolution2D with placeholder\",\n graph=graph,\n backend=[\"webgpu\", \"webassembly\"],\n inputs={x: vx.data},\n expected={y: vy.data},\n EPS=1e-2\n )\n",
"import numpy as np\n\nfrom test.util import generate_kernel_test_case, wrap_template\nfrom webdnn.graph.graph import Graph\nfrom webdnn.graph.operators.sigmoid import Sigmoid\nfrom webdnn.graph.order import OrderCNHW, OrderNHWC\nfrom webdnn.graph.variable import Variable\n\n\n@wrap_template\ndef template(r=1.0, x_order=OrderNHWC, y_order=OrderNHWC, description: str = \"\"):\n vx = (np.random.rand(2, 3, 4, 5) - 0.5) * r\n vy = 1 / (1 + np.exp(-vx))\n # This produces very small positive value (< 1e-7) when vx is negative large.\n # Actual implementation uses tanh(0.5f * x0) * 0.5f + 0.5f\n # In the case tanh is used, the result saturates to 0.0 when vs is negative large.\n # ABS_EPS is set to allow such case.\n\n x = Variable(vx.shape, order=OrderNHWC)\n y, = Sigmoid(None)(x)\n\n x.change_order(x_order)\n y.change_order(y_order)\n\n generate_kernel_test_case(\n description=f\"Sigmoid {description}\",\n graph=Graph([x], [y]),\n inputs={x: np.transpose(vx, [OrderNHWC.axes_dict[a] for a in x.order.axes])},\n expected={y: np.transpose(vy, [OrderNHWC.axes_dict[a] for a in y.order.axes])},\n ABS_EPS=1e-7\n )\n\n\ndef test():\n template()\n\n\ndef test_different_order():\n template(x_order=OrderCNHW)\n\n\ndef test_large_range():\n template(r=1e3)\n",
"from typing import Optional\n\nimport numpy as np\n\nfrom webdnn.graph.axis import AxisKeyDict\nfrom webdnn.graph.graph import Graph\nfrom webdnn.graph.operator import Operator\nfrom webdnn.graph.optimize_rule import OptimizeRule\nfrom webdnn.graph.variable import Variable\nfrom webdnn.graph.variables.constant_variable import ConstantVariable\n\n\nclass Tile(Operator):\n \"\"\"Tile(name)\n Repeat input variable.\n\n Args:\n name (str): Operator name.\n multiplier (tuple of int): number of repeat\n\n Signature\n .. code::\n\n y, = op(x)\n\n - **x** - Input variable.\n - **y** - Output variable.\n\n \"\"\"\n\n def __init__(self, name: Optional[str], multiplier: AxisKeyDict[int]):\n super().__init__(name)\n self.parameters[\"multiplier\"] = multiplier\n\n def __call__(self, x: Variable):\n assert x.ndim == len(self.multiplier), f\"\"\"\n[Tile] Number of multiplier must be same as # of dimension of x:\n (x.ndim)={x.ndim}\n (len(self.multiplier))={len(self.multiplier)}\"\"\"\n\n y_shape = [self.multiplier[a] * x.shape_dict[a] for a in x.order.axes]\n y = Variable(y_shape, x.order)\n\n self.append_input(\"x\", x)\n self.append_output(\"y\", y)\n return y,\n\n @property\n def multiplier(self) -> AxisKeyDict[int]:\n return self.parameters[\"multiplier\"]\n\n def fold_constance(self, graph: Graph):\n x = self.inputs[\"x\"] # type: ConstantVariable\n y = self.outputs[\"y\"]\n\n new_y = ConstantVariable(np.tile(x.data, [self.multiplier[a] for a in x.order.axes]), x.order)\n new_y.change_order(y.order)\n OptimizeRule.replace_variable(graph, y, new_y)\n self.remove_all()\n",
"import numpy as np\n\nfrom test.runtime.frontend_test.onnx_test.util import make_node, make_tensor_value_info, make_model\nfrom test.util import wrap_template, generate_kernel_test_case\nfrom webdnn.frontend.onnx import ONNXConverter\n\n\n@wrap_template\ndef template(x_shape, axes, keepdims=None, description: str = \"\"):\n vx = np.random.rand(*x_shape)\n vy = np.mean(vx, axis=tuple(axes), keepdims=True if keepdims is None else keepdims)\n\n x = make_tensor_value_info(\"x\", vx.shape)\n y = make_tensor_value_info(\"y\", vy.shape)\n\n kwargs = {\"axes\": axes}\n if keepdims is not None:\n kwargs[\"keepdims\"] = keepdims\n operator = make_node(\"ReduceMean\", [\"x\"], [\"y\"], **kwargs)\n\n model = make_model([operator], [x], [y])\n\n graph = ONNXConverter().convert(model)\n\n assert tuple(vy.shape) == tuple(graph.outputs[0].shape), f\"vy: {vy.shape}, graph.outputs[0]: {graph.outputs[0].shape}\"\n generate_kernel_test_case(\n description=f\"[ONNX] ReduceMean {description}\",\n graph=graph,\n backend=[\"webgpu\", \"webgl\", \"webassembly\"],\n inputs={graph.inputs[0]: vx},\n expected={graph.outputs[0]: vy},\n )\n\n\ndef test():\n template(x_shape=[1, 3, 4, 5], axes=[2])\n\n\ndef test_keepdim():\n template(x_shape=[1, 3, 4, 5], axes=[2], keepdims=True)\n\n\ndef test_not_keepdim():\n template(x_shape=[1, 3, 4, 5], axes=[2], keepdims=False)\n\n\ndef test_multi_axes():\n template(x_shape=[1, 3, 4, 5], axes=[2, 3])\n\n\ndef test_all_axes():\n template(x_shape=[1, 3, 4, 5], axes=[0, 1, 2, 3])\n"
] | [
[
"numpy.random.rand"
],
[
"numpy.exp",
"numpy.random.rand",
"numpy.transpose"
],
[
"numpy.tile"
],
[
"numpy.random.rand"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
YurongYou/MODEST | [
"cfc0465ed737f6c3166e6b5d08231880073b4552"
] | [
"downstream/OpenPCDet/pcdet/utils/calibration_kitti.py"
] | [
"import numpy as np\n\n\ndef get_calib_from_file(calib_file):\n with open(calib_file) as f:\n lines = f.readlines()\n\n obj = lines[2].strip().split(' ')[1:]\n P2 = np.array(obj, dtype=np.float32)\n obj = lines[3].strip().split(' ')[1:]\n P3 = np.array(obj, dtype=np.float32)\n obj = lines[4].strip().split(' ')[1:]\n R0 = np.array(obj, dtype=np.float32)\n obj = lines[5].strip().split(' ')[1:]\n Tr_velo_to_cam = np.array(obj, dtype=np.float32)\n\n return {'P2': P2.reshape(3, 4),\n 'P3': P3.reshape(3, 4),\n 'R0': R0.reshape(3, 3),\n 'Tr_velo2cam': Tr_velo_to_cam.reshape(3, 4)}\n\n\nclass Calibration(object):\n def __init__(self, calib_file):\n if not isinstance(calib_file, dict):\n calib = get_calib_from_file(calib_file)\n else:\n calib = calib_file\n\n self.P2 = calib['P2'] # 3 x 4\n self.R0 = calib['R0'] # 3 x 3\n self.V2C = calib['Tr_velo2cam'] # 3 x 4\n\n # Camera intrinsics and extrinsics\n self.cu = self.P2[0, 2]\n self.cv = self.P2[1, 2]\n self.fu = self.P2[0, 0]\n self.fv = self.P2[1, 1]\n self.tx = self.P2[0, 3] / (-self.fu)\n self.ty = self.P2[1, 3] / (-self.fv)\n\n def cart_to_hom(self, pts):\n \"\"\"\n :param pts: (N, 3 or 2)\n :return pts_hom: (N, 4 or 3)\n \"\"\"\n pts_hom = np.hstack((pts, np.ones((pts.shape[0], 1), dtype=np.float32)))\n return pts_hom\n\n def rect_to_lidar(self, pts_rect):\n \"\"\"\n :param pts_lidar: (N, 3)\n :return pts_rect: (N, 3)\n \"\"\"\n pts_rect_hom = self.cart_to_hom(pts_rect) # (N, 4)\n R0_ext = np.hstack((self.R0, np.zeros((3, 1), dtype=np.float32))) # (3, 4)\n R0_ext = np.vstack((R0_ext, np.zeros((1, 4), dtype=np.float32))) # (4, 4)\n R0_ext[3, 3] = 1\n V2C_ext = np.vstack((self.V2C, np.zeros((1, 4), dtype=np.float32))) # (4, 4)\n V2C_ext[3, 3] = 1\n\n pts_lidar = np.dot(pts_rect_hom, np.linalg.inv(np.dot(R0_ext, V2C_ext).T))\n return pts_lidar[:, 0:3]\n\n def lidar_to_rect(self, pts_lidar):\n \"\"\"\n :param pts_lidar: (N, 3)\n :return pts_rect: (N, 3)\n \"\"\"\n pts_lidar_hom = self.cart_to_hom(pts_lidar)\n pts_rect = np.dot(pts_lidar_hom, np.dot(self.V2C.T, self.R0.T))\n # pts_rect = reduce(np.dot, (pts_lidar_hom, self.V2C.T, self.R0.T))\n return pts_rect\n\n def rect_to_img(self, pts_rect):\n \"\"\"\n :param pts_rect: (N, 3)\n :return pts_img: (N, 2)\n \"\"\"\n pts_rect_hom = self.cart_to_hom(pts_rect)\n pts_2d_hom = np.dot(pts_rect_hom, self.P2.T)\n # pts_rect_hom[:, 2][np.isclose(pts_rect_hom[:, 2], 0)] = 1e-6\n pts_img = (pts_2d_hom[:, 0:2].T / pts_rect_hom[:, 2]).T # (N, 2)\n pts_rect_depth = pts_2d_hom[:, 2] - self.P2.T[3, 2] # depth in rect camera coord\n return pts_img, pts_rect_depth\n\n def lidar_to_img(self, pts_lidar):\n \"\"\"\n :param pts_lidar: (N, 3)\n :return pts_img: (N, 2)\n \"\"\"\n pts_rect = self.lidar_to_rect(pts_lidar)\n pts_img, pts_depth = self.rect_to_img(pts_rect)\n return pts_img, pts_depth\n\n def img_to_rect(self, u, v, depth_rect):\n \"\"\"\n :param u: (N)\n :param v: (N)\n :param depth_rect: (N)\n :return:\n \"\"\"\n x = ((u - self.cu) * depth_rect) / self.fu + self.tx\n y = ((v - self.cv) * depth_rect) / self.fv + self.ty\n pts_rect = np.concatenate((x.reshape(-1, 1), y.reshape(-1, 1), depth_rect.reshape(-1, 1)), axis=1)\n return pts_rect\n\n def corners3d_to_img_boxes(self, corners3d):\n \"\"\"\n :param corners3d: (N, 8, 3) corners in rect coordinate\n :return: boxes: (None, 4) [x1, y1, x2, y2] in rgb coordinate\n :return: boxes_corner: (None, 8) [xi, yi] in rgb coordinate\n \"\"\"\n sample_num = corners3d.shape[0]\n corners3d_hom = np.concatenate((corners3d, np.ones((sample_num, 8, 1))), axis=2) # (N, 8, 4)\n\n img_pts = np.matmul(corners3d_hom, self.P2.T) # (N, 8, 3)\n\n x, y = img_pts[:, :, 0] / img_pts[:, :, 2], img_pts[:, :, 1] / img_pts[:, :, 2]\n x1, y1 = np.min(x, axis=1), np.min(y, axis=1)\n x2, y2 = np.max(x, axis=1), np.max(y, axis=1)\n\n boxes = np.concatenate((x1.reshape(-1, 1), y1.reshape(-1, 1), x2.reshape(-1, 1), y2.reshape(-1, 1)), axis=1)\n boxes_corner = np.concatenate((x.reshape(-1, 8, 1), y.reshape(-1, 8, 1)), axis=2)\n\n return boxes, boxes_corner\n"
] | [
[
"numpy.dot",
"numpy.min",
"numpy.matmul",
"numpy.ones",
"numpy.max",
"numpy.array",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
roman-baldaev/course-project | [
"b65ba018c16697224f15916b08ce7f09634d1f8c"
] | [
"model/src/DataModel.py"
] | [
"import numpy as np\nimport pandas as pd\n\nclass DataModel:\n \"\"\"\n This class implements a data model - values at time points and provides methods for working with these data.\n \"\"\"\n\n def __init__(self, n=0, values=None, times=None):\n \"\"\"\n A constructor that takes values and a time point.\n\n :param values: Array of values process\n :param times: Array of a time points\n \"\"\"\n if (values is None) or (times is None):\n self._times = np.zeros((n, ))\n self._values = np.zeros((n, ))\n else:\n if len(values) != len(times):\n print(\"Different size of values and times\")\n else:\n self._times = np.array(times, dtype=float)\n self._values = np.array(values, dtype=float)\n\n def print(self, n=None):\n if n is not None:\n _n = n\n elif self._times.shape:\n _n = self._times.shape[0]\n for i in range(_n):\n print(\"Time: {}___Value: {}\".format(self._times[i], self._values[i]))\n\n @property\n def mean(self):\n \"\"\"\n :return: Mean of values\n \"\"\"\n return self._times.mean()\n\n def get_values(self):\n return self._values\n\n def get_times(self):\n return self._times\n\n def add_value(self, value, index):\n # self._values.__add__(value)\n self._values[index] = value\n\n def add_time(self, time, index):\n # self._times.__add__(time)\n self._times[index] = time\n\n def get_value(self, index):\n return self._values[index]\n\n def get_time(self, index):\n return self._times[index]"
] | [
[
"numpy.array",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
icecube-pixel/grok-auto-complete | [
"747aab90f846410f444914713d238034fcf767a2"
] | [
"get_grok_repos.py"
] | [
"import logging\r\nfrom github import Github\r\nfrom typing import Dict, Tuple, List\r\nimport os\r\nimport argparse\r\nimport traceback\r\nfrom collections import Counter\r\nfrom tenacity import retry, stop_after_attempt, wait_exponential\r\nfrom time import sleep\r\nimport pandas as pd\r\n\r\nlogging.basicConfig(format='%(asctime)s,%(msecs)d %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s',\r\n datefmt='%Y-%m-%d:%H:%M:%S',\r\n level=logging.INFO)\r\n\r\nlogger = logging.getLogger(__name__)\r\n\r\n# https://docs.github.com/en/github/searching-for-information-on-github/searching-on-github/searching-for-repositories#search-by-when-a-repository-was-created-or-last-updated\r\n\r\ndef get_query_string_to_exclude()->str:\r\n \"\"\"\r\n Generates query string instead of hard-coding and appends to the query string\r\n :return:\r\n \"\"\"\r\n logger.info(\"Inside function to generate query to hit API\")\r\n languages_to_exclude = ['Jinja', 'Shell', 'YAML', 'INI', 'Perl', 'Haskell']\r\n exclude_languages = \" \".join([\"NOT language:{}\".format(language) for language in languages_to_exclude])\r\n return \" \" + exclude_languages\r\n\r\n\r\ndef get_matching_code(args: Dict)->None:\r\n \"\"\"\r\n Gets the top matches of code based on pattern where grok is used and is of not YAML etc\r\n \"\"\"\r\n logger.info(\"Inside to get top repositories function\")\r\n master_data = []\r\n observed_licences = []\r\n try:\r\n g_obj = Github(args['token'], timeout=3000) # Overriding timeout of 3000 seconds\r\n pattern_file_extension = '\"grok\" in:file extension:j2'\r\n lang_to_exclude = get_query_string_to_exclude()\r\n _query_str = f\"{pattern_file_extension}{lang_to_exclude}\"\r\n logger.info(f\"Processing query {_query_str}\")\r\n sleep(10)\r\n results = g_obj.search_code(_query_str)\r\n for repo in results:\r\n master_data.append(vars(repo))\r\n\r\n observed_licences.append(repo.license)\r\n file_name = str(repo).split(\"ContentFile(path=\")[1].replace('\"',\"\")[:-1].replace(\"/\", \"_\")\r\n path_to_dump = os.path.join(os.getcwd(), \"data\", file_name)\r\n logger.info(\"Dumping file {}\".format(file_name))\r\n with open(path_to_dump, \"wb\") as f:\r\n f.write(repo.decoded_content)\r\n logger.info(Counter(observed_licences))\r\n except Exception as e:\r\n logger.error(e)\r\n logger.error(traceback.format_exc())\r\n pd.DataFrame(master_data).to_csv(\"RepoData.csv\", index=False)\r\n\r\ndef get_inputs()->Dict:\r\n \"\"\"Gets the username and password from the console \"\"\"\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument(\"--token\", dest=\"token\", help=\"Enter the oAuth token\", required=True)\r\n args = vars(parser.parse_args())\r\n return args\r\n\r\n\r\ndef main():\r\n logger.info(\"Inside Main\")\r\n args = get_inputs()\r\n get_matching_code(args=args)\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n"
] | [
[
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
PierreExeter/gym-reacher | [
"d58edeb93b4b703101dc0505232c883fd012dbad"
] | [
"scripts/test_DDPG.py"
] | [
"import gym\nimport numpy as np\nimport gym_reacher\n\nfrom stable_baselines.ddpg.policies import MlpPolicy\nfrom stable_baselines.common.noise import NormalActionNoise, OrnsteinUhlenbeckActionNoise, AdaptiveParamNoiseSpec\nfrom stable_baselines import DDPG\n\n# env = gym.make('MountainCarContinuous-v0')\nenv = gym.make('Reacher1Dof-v0')\n\n# the noise objects for DDPG\nn_actions = env.action_space.shape[-1]\nparam_noise = None\naction_noise = OrnsteinUhlenbeckActionNoise(mean=np.zeros(n_actions), sigma=float(0.5) * np.ones(n_actions))\n\nmodel = DDPG(MlpPolicy, env, verbose=1, param_noise=param_noise, action_noise=action_noise)\nmodel.learn(total_timesteps=10000)\nmodel.save(\"ddpg_mountain\")\n"
] | [
[
"numpy.zeros",
"numpy.ones"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
gonzalo-munillag/Exponential_Randomised_Response | [
"1ae2c867d77c6e92f1df0bb7120862e4f9aa15e4",
"1ae2c867d77c6e92f1df0bb7120862e4f9aa15e4"
] | [
"differential-privacy-library-main/tests/tools/test_histogramdd.py",
"Experiments/permute_and_flip_main/mechanisms.py"
] | [
"import numpy as np\nfrom unittest import TestCase\n\nfrom diffprivlib.accountant import BudgetAccountant\nfrom diffprivlib.tools.histograms import histogramdd\nfrom diffprivlib.utils import global_seed, PrivacyLeakWarning, BudgetError\n\n\nclass TestHistogramdd(TestCase):\n def test_no_params(self):\n a = np.array([1, 2, 3, 4, 5])\n with self.assertWarns(PrivacyLeakWarning):\n res = histogramdd(a)\n self.assertIsNotNone(res)\n\n def test_no_range(self):\n a = np.array([1, 2, 3, 4, 5])\n with self.assertWarns(PrivacyLeakWarning):\n res = histogramdd(a, epsilon=2)\n self.assertIsNotNone(res)\n\n def test_bins_instead_of_range(self):\n a = np.array([1, 2, 3, 4, 5])\n res = histogramdd([a, a], epsilon=2, bins=([0, 2, 6], [0, 2, 6]))\n self.assertIsNotNone(res)\n\n def test_same_edges(self):\n a = np.array([1, 2, 3, 4, 5])\n _, edges = np.histogramdd(a, bins=3, range=[(0, 10)])\n _, dp_edges = histogramdd(a, epsilon=1, bins=3, range=[(0, 10)])\n\n for i in range(len(edges)):\n self.assertTrue((edges[i] == dp_edges[i]).all())\n\n def test_different_result(self):\n global_seed(3141592653)\n a = np.array([1, 2, 3, 4, 5])\n hist, _ = np.histogramdd(a, bins=3, range=[(0, 10)])\n dp_hist, _ = histogramdd(a, epsilon=0.1, bins=3, range=[(0, 10)])\n\n # print(\"Non-private histogram: %s\" % hist)\n # print(\"Private histogram: %s\" % dp_hist)\n self.assertTrue((hist != dp_hist).any())\n\n def test_density_1d(self):\n global_seed(3141592653)\n a = np.array([1, 2, 3, 4, 5])\n dp_hist, _ = histogramdd(a, epsilon=10, bins=3, range=[(0, 10)], density=True)\n\n # print(dp_hist.sum())\n\n self.assertAlmostEqual(dp_hist.sum(), 1.0 * 3 / 10)\n\n def test_density_2d(self):\n global_seed(3141592653)\n a = np.array([[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]]).T\n dp_hist, _ = histogramdd(a, epsilon=10, bins=3, range=[(0, 10), (0, 10)], density=True)\n\n # print(dp_hist.sum())\n\n self.assertAlmostEqual(dp_hist.sum(), 1.0 * (3 / 10) ** 2)\n\n def test_accountant(self):\n acc = BudgetAccountant(1.5, 0)\n\n a = np.array([[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]]).T\n histogramdd(a, epsilon=1, bins=3, range=[(0, 10), (0, 10)], density=True, accountant=acc)\n\n with self.assertRaises(BudgetError):\n histogramdd(a, epsilon=1, bins=3, range=[(0, 10), (0, 10)], density=True, accountant=acc)\n\n def test_default_accountant(self):\n BudgetAccountant.pop_default()\n\n a = np.array([[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]]).T\n histogramdd(a, epsilon=1, bins=3, range=[(0, 10), (0, 10)], density=True)\n acc = BudgetAccountant.pop_default()\n self.assertEqual((1, 0), acc.total())\n self.assertEqual(acc.epsilon, float(\"inf\"))\n self.assertEqual(acc.delta, 1.0)\n\n histogramdd(a, epsilon=1, bins=3, range=[(0, 10), (0, 10)])\n\n self.assertEqual((1, 0), acc.total())\n",
"import numpy as np\nfrom collections import defaultdict\nfrom scipy.optimize import minimize_scalar, root_scalar, bracket\nfrom scipy.special import logsumexp\n\ndef em_worst_expected_error(n=2, eps=1, delta=1):\n def foo(p):\n return np.log(p) * (1 - 1 / (1 + (n-1)*p))\n a = -minimize_scalar(lambda p: foo(p), bounds=(0,1), method='bounded').fun\n return a * 2 * delta / eps\n\ndef pf_worst_expected_error(n=2, eps=1, delta=1):\n def foo(p):\n return np.log(p) * (1 - (1 - (1-p)**n) / (n*p))\n a = -minimize_scalar(lambda p: foo(p), bounds=(0,1), method='bounded').fun\n return a * 2 * delta / eps\n\ndef pf_pmf(q, eps=1.0, sensitivity=1.0, monotonic=False):\n coef = 1.0 if monotonic else 0.5\n p = np.exp(coef*eps/sensitivity*(q - q.max()))\n n = q.size\n \n # first we will calculate \n # sum(prod(p_i, i in S), |S| = k) for each k\n \n subsets = np.zeros(n)\n curr = np.cumsum(p)\n subsets[0] = curr[-1]\n for j in range(1,n):\n curr[j:] = np.cumsum(curr[j-1:-1]*p[j:])\n subsets[j] = curr[-1] \n \n # coefficient vector: (-1)^k / (k+1) for k = 1..n\n coef = (np.arange(n) % 2 * 2 - 1) / (np.arange(n)+2)\n \n # we will now calculate\n # sum(prod(p_i, i in S), |S| = k, r not in S)\n # and compute the final probabilities\n \n ans = np.zeros(n)\n for i in range(n):\n new = np.copy(subsets)\n new[0] -= p[i]\n for j in range(1,n):\n new[j] -= new[j-1]*p[i]\n \n ans[i] = p[i] * (1 + new @ coef)\n \n return ans\n\ndef em_pmf(q, eps=1.0, sensitivity=1.0, monotonic=False):\n coef = 1.0 if monotonic else 0.5\n q = q - q.max()\n logits = coef*eps/sensitivity*q\n return np.exp(logits - logsumexp(logits))\n #p = np.exp(coef*eps/sensitivity*q)\n #return p / p.sum()\n\ndef em(q, eps=1.0, sensitivity=1.0, prng=np.random, monotonic=False):\n \n coef = 1.0 if monotonic else 0.5\n\n q = q - q.max()\n p = np.exp(coef*eps/sensitivity*q)\n p /= p.sum()\n\n return prng.choice(p.size, p=p)\n\n\ndef pf(q, eps=1.0, sensitivity=1.0, prng=np.random, monotonic=False):\n \n coef = 1.0 if monotonic else 0.5\n\n q = q - q.max()\n p = np.exp(coef*eps/sensitivity*q)\n\n for i in prng.permutation(p.size):\n if prng.rand() <= p[i]:\n return i\n\ndef expected_error(q, eps, pmf=em_pmf):\n # compute the expected error of the mechanism (given it's probability mass function)\n ans = q.max() - pmf(q,eps) @ q\n maxerr = q.max() - q.mean()\n if ans > maxerr or ans < 0:\n return maxerr\n return ans\n\ndef variance(q, eps, pmf=em_pmf):\n e = expected_error(q, eps, pmf)\n return pmf(q, eps) @ (q.max() - q)**2 - e**2\n\ndef expected_epsilon(q, err, bounds=None, pmf=em_pmf):\n # computed the epsilon required to achieve given expected error\n foo = lambda eps: expected_error(q, eps, pmf) - err\n\n if bounds is None:\n eps = 1.0\n while foo(eps) > 0:\n eps *= 2\n while foo(eps) < 0:\n eps /= 2.0\n bounds = [eps,2*eps]\n\n return root_scalar(foo,bracket=bounds,method='bisect').root\n\ndef max_epsilon_ratio(q):\n def foo(eps):\n err = expected_error(q, eps, pf_pmf)\n eps2 = expected_epsilon(q, err, [eps, 2*eps])\n return -eps2/eps\n br = bracket(foo, 1e-3, 1.0)[0:3]\n ans = minimize_scalar(foo, bracket=br, method='brent')\n eps0 = ans.x\n err = expected_error(q, eps0, pf_pmf)\n eps1 = expected_epsilon(q, err, [eps0, 2*eps0])\n return eps0, err, eps1\n\n"
] | [
[
"numpy.array",
"numpy.histogramdd"
],
[
"numpy.log",
"numpy.arange",
"scipy.optimize.minimize_scalar",
"numpy.cumsum",
"numpy.copy",
"scipy.optimize.bracket",
"scipy.optimize.root_scalar",
"numpy.exp",
"numpy.zeros",
"scipy.special.logsumexp"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.6",
"1.10",
"1.4",
"1.9",
"1.5",
"1.2",
"1.7",
"1.3",
"1.8"
],
"tensorflow": []
}
] |
PengningChao/emdb-sphere | [
"d20ac81ab4fd744f87788bda46d3aa19598658ee"
] | [
"dualbound/Lagrangian/spatialProjopt_Zops_numpy.py"
] | [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon May 4 21:39:12 2020\n\n@author: pengning\n\nThis is part of the grad/Hess engine for spatial projection versions of the \noriginal global constraint <S|T>-<T|U|T>. The Lagrangian multipliers are distributed in \nthe order alphaP0_1, alphaP0_2, alphaP1_1, alphaP1_2 ... where P0 is just the identity\n\"\"\"\n\nimport numpy as np\n\n\ndef Z_TT(Lags, O, UPlist):\n #P0 is identity and UP0 is the original U matrix\n ZTT = np.zeros_like(O, dtype=np.complex)\n ZTT[:,:] = O[:,:]\n for i in range(len(UPlist)):\n SymUP = (UPlist[i]+UPlist[i].conj().T)/2\n AsymUP = (UPlist[i]-UPlist[i].conj().T)/(2j)\n ZTT += Lags[2*i]*SymUP + Lags[2*i+1]*AsymUP\n return ZTT\n\ndef grad_Z_TT(Lags, UPlist):\n gradZ = []\n for i in range(len(UPlist)):\n SymUP = (UPlist[i]+UPlist[i].conj().T)/2\n AsymUP = (UPlist[i]-UPlist[i].conj().T)/(2j)\n gradZ.append(SymUP)\n gradZ.append(AsymUP)\n return gradZ\n\n\ndef check_spatialProj_Lags_validity(Lags, Olist, UPlistlist):\n modenum = len(Olist)\n mineig = np.inf\n for mode in range(modenum):\n ZTT = Z_TT(Lags, Olist[mode], UPlistlist[mode])\n eigZTT = np.linalg.eigvalsh(ZTT)\n \n if eigZTT[0]<0:\n print('mineig', eigZTT[0])\n return eigZTT[0]\n mineig = min(mineig,eigZTT[0])\n return mineig\n\n\ndef find_singular_ZTT_eigv(Lags, Olist, UPlistlist):\n modenum = len(Olist)\n mineigw = np.inf\n mineigv = np.zeros(Olist[0].shape[0])\n \n modemineig = -1\n for i in range(modenum):\n ZTT = Z_TT(Lags, Olist[i], UPlistlist[i])\n eigw, eigv = np.linalg.eigh(ZTT)\n if eigw[0]<=0:\n modemineig = i\n mineigv = eigv[:,0]\n return modemineig, mineigv\n elif eigw[0]<mineigw:\n mineigw = eigw[0]\n mineigv = eigv[:,0]\n modemineig = i\n return modemineig, mineigv\n\n\ndef get_ZTT_mineig(Lags, Olist, UPlistlist, eigvals_only=False):\n modenum = len(Olist)\n mineigw = np.inf\n modemineig = -1\n \n if eigvals_only:\n for mode in range(modenum):\n ZTT = Z_TT(Lags, Olist[mode], UPlistlist[mode])\n eigw = np.linalg.eigvalsh(ZTT)\n if eigw[0]<=0:\n return mode, eigw[0]\n elif eigw[0]<mineigw:\n mineigw = eigw[0]\n modemineig = mode\n return modemineig, mineigw\n else:\n for mode in range(modenum):\n ZTT = Z_TT(Lags, Olist[mode], UPlistlist[mode])\n eigw, eigv = np.linalg.eigh(ZTT)\n if eigw[0]<=0:\n return mode, eigw[0], eigv[:,0]\n elif eigw[0]<mineigw:\n mineigw = eigw[0]\n mineigv = eigv[:,0]\n modemineig = mode\n return modemineig, mineigw, mineigv\n\ndef get_inc_ZTT_mineig(incLags, include, Olist, UPlistlist, eigvals_only=False):\n Lags = np.zeros(len(include))\n Lags[include] = incLags[:]\n return get_ZTT_mineig(Lags, Olist, UPlistlist, eigvals_only=eigvals_only)\n\n\n###method for finding derivatives of mineig of ZTT, to use for phase I (entering domain of duality) of optimization\n\ndef get_ZTT_mineig_grad(ZTT, gradZTT):\n eigw, eigv = np.linalg.eigh(ZTT)\n eiggrad = np.zeros(len(gradZTT))\n \n for i in range(len(eiggrad)):\n eiggrad[i] = np.real(np.vdot(eigv[:,0], gradZTT[i] @ eigv[:,0]))\n return eiggrad\n"
] | [
[
"numpy.linalg.eigh",
"numpy.zeros_like",
"numpy.linalg.eigvalsh",
"numpy.zeros",
"numpy.vdot"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
juniorcl/virtual-lockin-prototype | [
"5f75897a65620f6180f37bcaa3b4291d605aaf9f"
] | [
"auxiliary.py"
] | [
"############################# Helper #################################\n## This file was created to support the lock-in program ##\n######################################################################\n## These functions can be imported using: import lockin-auxiliary as aux\n## and put aux.<name of the function>\nfrom scipy.signal import bessel, filtfilt, butter\nimport numpy as np\nimport wave\n\ndef lowPassFilter(sinal, REFSIG, RATE, ORDER, ROLL, CUTOFF):\n \n y_fft0, x_fft0 = freq0fftPSD(sinal, REFSIG, RATE, ROLL)\n y_bessel, x_bessel = lowBesselPSD(sinal, REFSIG, RATE, CUTOFF, ORDER, ROLL)\n y_butter, x_butter = lowButterPSD(sinal, REFSIG, RATE, CUTOFF, ORDER, ROLL)\n\n return x_fft0, y_fft0, x_bessel, y_bessel, x_butter, y_butter\n\ndef refSignal(file, chunk):\n wf = wave.open(file, 'rb')\n sinalbit = np.frombuffer(wf.readframes(chunk), np.int16)\n return inVolt(sinalbit[::2])\n\ndef rmsFunction(signal):\n #Root-Mean-Square function\n f = lambda i: i**2/len(signal)\n soma = np.sum(list(map(f, signal)))\n return np.sqrt(soma)\n\ndef sigMultiply(signal, signal_ref, roll):\n #multiply the signal and referency signals\n sin_psd = np.multiply(signal, signal_ref)\n cos_psd = np.multiply(signal, np.roll(signal_ref, roll))\n return sin_psd, cos_psd\n\ndef lowButter(data, fs, cutoff, order):\n #this is a butter lowpass filter\n nyq = 0.5*fs\n normal_cutoff = cutoff/nyq\n b, a = butter(order, normal_cutoff, btype='low', analog=False)\n y = filtfilt(b, a, data)\n return y\n\ndef lowBessel(data, fs, cutoff, order):\n #this is a bessel lowpass filter\n nyq = 0.5*fs\n normal_cutoff = cutoff/nyq\n b, a = bessel(order, normal_cutoff, btype='low', analog=False)\n y = filtfilt(b, a, data)\n return y\n\ndef inVolt(signal):\n #converts bits to volts\n slope = 1.4286015335045335e-4 #slope found with minor error: 7.672327425854542e-09\n intercept = 20.975684328898847e-4 #intercept is the same of slope\n f = lambda bit: round(slope*bit + intercept, 6)\n return list(map(f, signal)) #6 decimal places\n\ndef fftFunction(signal, rate):\n signal_len = len(signal)\n fft = np.abs(np.fft.rfft(signal))/signal_len\n freqs = np.fft.rfftfreq(signal_len)*rate\n return fft, freqs\n\ndef freq0fftPSD(signal, signal_ref, rate, roll):\n #get just the amplitude at freq = 0\n sin_psd, cos_psd = sigMultiply(signal, signal_ref, roll)\n sin_psd_fft, _ = fftFunction(sin_psd, rate)\n cos_psd_fft, _ = fftFunction(cos_psd, rate)\n return sin_psd_fft[0], cos_psd_fft[0]\n\ndef fftPSD(signal, signal_ref, freq, rate, roll):\n #get the amplitude at freq = 0 and 2 * freq\n sin_psd, cos_psd = sigMultiply(signal, signal_ref, roll)\n sin_psd_fft, sin_psd_freqs = fftFunction(sin_psd, rate)\n cos_psd_fft, cos_psd_freqs = fftFunction(cos_psd, rate)\n y = sin_psd_fft[0] + dict(zip(sin_psd_freqs, sin_psd_fft))[2*freq]\n x = cos_psd_fft[0] + dict(zip(cos_psd_freqs, cos_psd_fft))[2*freq]\n return y, x\n\ndef lowButterPSD(signal, signal_ref, rate, cutoff, order, roll):\n #PSD using the butter low pass filter\n sin_psd, cos_psd = sigMultiply(signal, signal_ref, roll)\n y = rmsFunction(lowButter(sin_psd, rate, cutoff, order))\n x = rmsFunction(lowButter(cos_psd, rate, cutoff, order))\n return y, x\n\ndef lowBesselPSD(signal, signal_ref, rate, cutoff, order, roll):\n #PSD using the bessel low pass filter\n sin_psd, cos_psd = sigMultiply(signal, signal_ref, roll)\n y = rmsFunction(lowBessel(sin_psd, rate, cutoff, order))\n x = rmsFunction(lowBessel(cos_psd, rate, cutoff, order))\n return y, x\n"
] | [
[
"scipy.signal.bessel",
"scipy.signal.filtfilt",
"numpy.sqrt",
"numpy.multiply",
"numpy.fft.rfftfreq",
"numpy.fft.rfft",
"scipy.signal.butter",
"numpy.roll"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"1.3",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
yuyunliuhen/automatic-text-categorization | [
"6f8ca4f26d2ac684439cc265a4ec468ad9d30d20"
] | [
"cosine_categorize.py"
] | [
"# encoding=utf-8\nimport os\nimport sys\nimport math\nfrom util_tool import *\nimport numpy\n\ndef categorization_files(path):\n\tfiles = search_directory(path,'vec')\n\tfor input_name in files:\n\t\tcategorization_file(input_name)\n\t\t# compute only once, the same to them if using topic model for sample feather\n\t\tbreak\n\t\ndef categorization_file(vec_file):\n\thandle_froms = open(vec_file,'r')\t\n\tfinal_file = vec_file.replace('vec','final')\n\thandle_final = open(final_file,'w')\t\n\tresult_list = []\n\ttotal = 0\n\tfor from_line in handle_froms:\n\t\tfrom_data = from_line.split()\n\t\thandle_tos = open(vec_file,'r')\n\t\tfor to_line in handle_tos:\n\t\t\tto_data = to_line.split()\n\t\t\tif from_data[0] == to_data[0]:\n\t\t\t\tcontinue\n\t\t\tif from_data[0].split('/')[2][0:7] == to_data[0].split('/')[2][0:7]:\n\t\t\t\ttotal += 1\n\t\t\t# the first element is file name, skip it\n\t\t\tlen_from_data = len(from_data) - 1\n\t\t\tlen_to_data = len(to_data) - 1\n\t\t\tfrom_vec = transfer_vec(from_data[1:len_from_data])\n\t\t\tto_vec = transfer_vec(to_data[1:len_to_data])\n\t\t\tcosine_value = compute_cosine_value(from_vec,to_vec)\n\t\t\ttmp = [from_data[0],to_data[0],cosine_value]\n\t\t\tresult_list.append(tmp)\n\n\taccuracy_count = 0\n\tresult_list = sorted(result_list,key=lambda x:x[2],reverse=True)\n\tfor result in result_list:\n\n\t\tif result[0].split('/')[2][0:7] == result[1].split('/')[2][0:7] and result[2] > 0:\n\t\t\taccuracy_count += 1\n\n\taccuracy_rate = round(round(float(accuracy_count) / float(total),4) * 100 ,4) \n\thandle_final.write(\"total: \" + str(total) + \" accuracy_count: \" + str(accuracy_count) + \" accuracy_rate: \" + str(accuracy_rate) + \"%\\n\")\n\tfor result in result_list:\n\t\thandle_final.write(result[0] + \"\\t\" + result[1] + \"\\t\" + str(result[2]) + \"\\n\")\n\n\thandle_final.close()\n\ndef transfer_vec(vec):\n\t# conver string to int\n\tvec = [ int (vec) for vec in vec if vec ]\n\t# conver array to vector, if not do this, TypeError: can't multiply sequence by non-int of type 'list'\n\tvec = numpy.array(vec)\n\treturn vec\n\ndef compute_cosine_value(vec_a,vec_b):\n\t#\tcos(a,b)=a*b/(|a|+|b|)\n\tnumerator = numpy.sum(vec_a*vec_b) \n\tdenominator = float(numpy.sqrt(sum(numpy.square(vec_a))) * numpy.sqrt(sum(numpy.square(vec_b))))\n\tif 0 == denominator:\n\t\treturn 0\n\ttheta = round(numerator / denominator,4)\n\treturn theta\n\n#categorization_file(\"./text/C00000810.vec\")\ncategorization_files(\"./text\")\n\n"
] | [
[
"numpy.square",
"numpy.array",
"numpy.sum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
LucasHaug/MAP3121 | [
"90b69c5db20e6d56c0c3e3dd969d9e41d804e9be"
] | [
"EP2/tests.py"
] | [
"#!/usr/bin/env python3\n\nimport numpy as np\nfrom random import random\n\nimport crank_nicolson\n\n#################################################\n### Functions Definitions\n#################################################\n\ndef get_data(test_letter):\n if test_letter == \"a\":\n ut_array, uk_matrix, x_array, N = test_a()\n elif test_letter == \"b\":\n ut_array, uk_matrix, x_array, N = test_b()\n elif test_letter == \"c\":\n ut_array, uk_matrix, x_array, N = test_c()\n else:\n ut_array, uk_matrix, x_array, N = test_d()\n\n return ut_array, uk_matrix, x_array, N\n\n\n\ndef test_a():\n N = 128\n\n # Create heat sources positions array\n heat_sources_positions_array = [0.35]\n\n # Calculate uk matrix\n uk_matrix, scale_array = crank_nicolson.generate_uk(heat_sources_positions_array, N)\n\n uk_matrix = np.delete(uk_matrix, [0, N], axis=1)\n\n # Calculate ut array\n ut_array = np.array(uk_matrix[0]) * 7\n\n # Delete extremes from scale array\n scale_array = np.delete(scale_array, [0, N])\n\n return ut_array, uk_matrix, scale_array, N\n\n\n\ndef test_b():\n N = 128\n\n # Create heat sources positions array\n heat_sources_positions_array = [0.15, 0.3, 0.7, 0.8]\n\n # Calculate uk matrix\n uk_matrix, scale_array = crank_nicolson.generate_uk(heat_sources_positions_array, N)\n\n uk_matrix = np.delete(uk_matrix, [0, N], axis=1)\n\n # Calculate ut array\n ut_array = (np.array(uk_matrix[0]) * 2.3 + np.array(uk_matrix[1]) * 3.7 +\n np.array(uk_matrix[2]) * 0.3 + np.array(uk_matrix[3]) * 4.2)\n\n # Delete extremes from scale array\n scale_array = np.delete(scale_array, [0, N])\n\n return ut_array, uk_matrix, scale_array, N\n\n\n\ndef test_c():\n # Configuration\n N = int(input(\"Insira o valor de N: \"))\n\n mesh_size = 2048\n\n mesh_relation = int(mesh_size / N)\n\n test_file_name = \"teste.txt\"\n\n test_file = open(test_file_name, \"r\")\n file_lines = test_file.readlines()\n\n test_file.close()\n\n # Create heat sources positions array\n heat_sources_positions_array = [float(item) for item in (file_lines.pop(0).split())]\n\n # Calculate uk matrix\n uk_matrix, scale_array = crank_nicolson.generate_uk(heat_sources_positions_array, N)\n\n uk_matrix = np.delete(uk_matrix, [0, N], axis=1)\n\n # Create ut array\n ut_array = np.zeros(N - 1, dtype=float)\n\n for i in range(0, N - 1):\n ut_array[i] = file_lines[(i + 1) * mesh_relation]\n\n # Delete extremes from scale array\n scale_array = np.delete(scale_array, [0, N])\n\n return ut_array, uk_matrix, scale_array, N\n\n\n\ndef test_d():\n ut_array, uk_matrix, scale_array, N = test_c()\n\n ε = 0.01\n\n for i in range(0, N - 1):\n random_num = (random() - 0.5) * 2\n\n ut_array[i] *= (1 + random_num * ε)\n\n return ut_array, uk_matrix, scale_array, N\n"
] | [
[
"numpy.delete",
"numpy.array",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Crazychicken563/RhythmGameCharterAI | [
"d9647007010ecc9a7ecc93d998527e578d4b12c6"
] | [
"CloneHero/clone_hero_to_generic.py"
] | [
"import os\nimport re\nimport pickle as pkl\nimport soundfile as sf\nimport numpy as np\n\ndef safeAdd(src, key, val):\n if key in src:\n src[key].update(val)\n else:\n src[key] = val\n\nsource_dir = \"clone_hero_data/clonehero-win64/songs\"\ndef main():\n for (dirpath, dirnames, filenames) in os.walk(source_dir):\n name = os.path.relpath(dirpath, source_dir)\n audioFilePath = None\n if not filenames:\n continue\n if \"notes.mid\" in filenames:\n print(\"we aren't parsing midi files right now\")\n continue\n if not \"notes.chart\" in filenames:\n print(\"Chart data not found! \" + name)\n print(filenames)\n continue\n else:\n print(\"Parsing \" + name)\n foundOGG = False\n for filename in filenames:\n if (filename.endswith(\".ogg\")):\n foundOGG = True\n audioFilePath = os.path.abspath(source_dir + \"\\\\\" + name + \"\\\\\" + os.path.join(filename))\n if foundOGG == False:\n print(\"NO AUDIO FILE FOUND\")\n continue\n with open(os.path.join(dirpath, \"notes.chart\"), encoding=\"utf-8\") as notes:\n scanningHeader = False\n currSong = None\n currSongName = None\n try:\n currLine = notes.readline().strip()\n except UnicodeDecodeError as e:\n print(e)\n continue\n while currLine:\n if scanningHeader:\n if currLine == \"}\":\n scanningHeader = False\n samplerate = currSong['sr']\n songlength = currSong['sd'].shape[0]/samplerate\n # yeah not dealing with 48000 right now\n if samplerate == 44100:\n try:\n os.mkdir(\"clone_hero_data/output/\"+currSongName)\n timestamps = list(currSong['ts'].keys())\n for i in range(0, len(timestamps)) : \n timestamps[i] = int(timestamps[i])\n timestamps.sort() \n print(name, samplerate)\n beatrate = 441\n mapping = np.zeros(int(np.ceil(songlength*beatrate)))\n currBPM = 0\n for timestamp in timestamps:\n data = currSong['ts'][str(timestamp)]\n #print(\"{}\".format(data))\n if \"B\" in data:\n currBPM = data[\"B\"]\n print(\"currBPM {}\".format(currBPM))\n \n time = float(timestamp)/float(currBPM) * 60 #static \"60\" BPM to match up to music\n if \"N\" in data:\n #mapping[int(np.round(time*beatrate)), data[\"N\"][\"v\"]] = 1\n mapping[int(np.round(time*beatrate))] = data[\"N\"][\"v\"] + 1\n #print(int(np.round(time*beatrate)))\n for time in range(int(np.floor(songlength))):\n songwindow = currSong['sd'][time*samplerate:(time+1)*samplerate]\n mapwindow = mapping[time*beatrate:(time+1)*beatrate]\n \n with open(\"clone_hero_data/output/\"+currSongName+\"/\"+str(time)+\".pkl\", 'wb+') as f:\n pkl.dump({'name':name, 'time':time, 'window':songwindow, 'label':mapwindow}, f)\n except:\n print(\"We done Fucked up :(\")\n \n for timestamp in currSong['ts']:\n currSong['ts'][timestamp].pop(\"N\", None)\n currSong['ts'][timestamp].pop(\"S\", None)\n\n for timestamp in list(currSong['ts'].keys()):\n if len(currSong['ts'][timestamp].keys()) == 0:\n currSong['ts'].pop(str(timestamp))\n\n print(\"end of header for {}\".format(currSongName))\n else:\n (timestamp, data) = currLine.split(\"=\")\n timestamp = timestamp.strip()\n datums = data.strip().split(\" \")\n if datums[0] == \"N\":\n #These are the only things we care about for now\n value = int(datums[1].strip())\n duration = datums[2].strip()\n if value <= 4:\n # mnd will always be defined by this point since scanningHeader\n # can never be true without mnd being instantiated\n safeAdd(currSong['ts'], str(timestamp), {\n \"N\": {\n 'v': value,\n 'd': int(duration)\n }\n })\n #else:\n #print(\"Unknown value note {}\".format(datums))\n elif datums[0] == \"S\":\n # augments over 4 denote a unique type of note / note modifier\n # augment 7 means that the previous note has star power.\n # other augments currently unknown...\n #print(\"star power for duration: {}\".format(duration))\n safeAdd(currSong['ts'], str(timestamp), {\n \"S\": {\n 'v': 2,\n 'd': int(duration)\n }\n })\n else:\n #if any(header in currLine for header in [\"[Song]\"]):\n # print(\"Found Song header\")\n if any(header in currLine for header in [\"[SyncTrack]\"]):\n notes.readline() #Skip the \"{\"\n\n print(audioFilePath)\n songdata, samplerate = sf.read(audioFilePath)\n print(\"sample rate: {}\".format(samplerate))\n currSong = {\n 'ts': {},\n 'sd': np.asarray(songdata),\n 'sr': samplerate\n }\n\n currLine = notes.readline().strip()\n while currLine != \"}\":\n (timestamp, data) = currLine.split(\"=\")\n timestamp = timestamp.strip()\n datums = data.strip().split(\" \")\n if datums[0] == \"B\":\n #print(\"{}\".format(datums))\n #print(currSong)\n safeAdd(currSong['ts'], str(timestamp), {\n \"B\": int(datums[1].strip())\n })\n\n currLine = notes.readline().strip()\n elif any(header in currLine for header in [\"[ExpertSingle]\", \"[HardSingle]\", \"[MediumSingle]\", \"[EasySingle]\"]):\n print(\"Now scanning \" + currLine)\n notes.readline() #Skip the \"{\"\n scanningHeader = True\n mergedPathIntoName = name.replace(\"\\\\\", \"_\")\n currSongName = os.path.join(currLine + \"_\" + mergedPathIntoName)\n print(currSongName)\n\n currLine = notes.readline().strip()\n\nmain()"
] | [
[
"numpy.asarray",
"numpy.ceil",
"numpy.floor",
"numpy.round"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
DonghyunAhn/sadvirus | [
"cdcc98812d613962a7003ff0c6013d0805bde024"
] | [
"utils/siScore_utils.py"
] | [
"import glob\nimport torch\nimport numpy as np\nfrom skimage import io, transform\nfrom torchvision import transforms\nimport torchvision.transforms.functional as F \nfrom torch.utils.data import Dataset\nfrom PIL import Image\nimport random\n\nclass ClusterDataset(Dataset):\n def __init__(self, cluster_list, dir_name, transform=None):\n self.file_list = []\n self.transform = transform \n for cluster_num in cluster_list:\n self.file_list.extend(glob.glob('../data/{}/{}/*.png'.format(dir_name, cluster_num)))\n\n def __len__(self):\n return len(self.file_list)\n\n def __getitem__(self, idx):\n image = io.imread(self.file_list[idx]) / 255.0\n if self.transform:\n image = self.transform(np.stack([image])).squeeze()\n return image\n\n \nclass RandomRotate(object):\n def __call__(self, images):\n rotated = np.stack([self.random_rotate(x) for x in images])\n return rotated\n \n def random_rotate(self, image):\n rand_num = np.random.randint(0, 4)\n if rand_num == 0:\n return np.rot90(image, k=1, axes=(0, 1))\n elif rand_num == 1:\n return np.rot90(image, k=2, axes=(0, 1))\n elif rand_num == 2:\n return np.rot90(image, k=3, axes=(0, 1)) \n else:\n return image\n \n \nclass Normalize(object):\n def __init__(self, mean, std, inplace=False):\n self.mean = mean\n self.std = std\n self.inplace = inplace\n\n def __call__(self, images):\n normalized = np.stack([F.normalize(x, self.mean, self.std, self.inplace) for x in images]) \n return normalized\n \n\n \nclass Grayscale(object):\n def __init__(self, prob = 1):\n self.prob = prob\n\n def __call__(self, images): \n random_num = np.random.randint(100, size=1)[0]\n if random_num <= self.prob * 100:\n gray_images = (images[:, 0, :, :] + images[:, 1, :, :] + images[:, 2, :, :]) / 3\n gray_scaled = gray_images.unsqueeze(1).repeat(1, 3, 1, 1)\n return gray_scaled\n else:\n return images\n \n\n \n\nclass ToTensor(object):\n def __call__(self, images):\n images = images.transpose((0, 3, 1, 2))\n return torch.from_numpy(images).float()\n\nclass AverageMeter(object):\n def __init__(self):\n self.reset()\n \n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0 \n\n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n self.avg = self.sum / self.count"
] | [
[
"numpy.rot90",
"torch.from_numpy",
"numpy.stack",
"numpy.random.randint"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
kiuthed/qutip | [
"b6fb8e5bbd9ffeae117b54e56313e8617038deab",
"b6fb8e5bbd9ffeae117b54e56313e8617038deab"
] | [
"qutip/tests/test_tensor.py",
"qutip/bloch.py"
] | [
"# This file is part of QuTiP: Quantum Toolbox in Python.\n#\n# Copyright (c) 2011 and later, Paul D. Nation and Robert J. Johansson.\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are\n# met:\n#\n# 1. Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n#\n# 2. Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n#\n# 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names\n# of its contributors may be used to endorse or promote products derived\n# from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A\n# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n###############################################################################\n\nfrom numpy.testing import assert_equal, assert_, run_module_suite\n\nfrom qutip.operators import identity\nfrom qutip.superop_reps import to_super\nfrom qutip.tensor import (\n tensor_contract, flatten, enumerate_flat, deep_remove, unflatten\n)\n\n\ndef test_flatten():\n l = [[[0], 1], 2]\n assert_equal(flatten(l), [0, 1, 2])\n\n\ndef test_enumerate_flat():\n l = [[[10], [20, 30]], 40]\n labels = enumerate_flat(l)\n assert_equal(labels, [[[0], [1, 2]], 3])\n\n\ndef test_deep_remove():\n l = [[[0], 1], 2]\n l = deep_remove(l, 1)\n assert_equal(l, [[[0]], 2])\n\n # Harder case...\n l = [[[[0, 1, 2]], [3, 4], [5], [6, 7]]]\n l = deep_remove(l, 0, 5)\n assert l == [[[[1, 2]], [3, 4], [], [6, 7]]]\n\n\ndef test_unflatten():\n l = [[[10, 20, 30], [40, 50, 60]], [[70, 80, 90], [100, 110, 120]]]\n labels = enumerate_flat(l)\n assert unflatten(flatten(l), labels) == l\n\n\ndef test_tensor_contract():\n qobj = identity([2, 3, 4])\n ans = 3 * identity([2, 4])\n\n assert_(ans == tensor_contract(qobj, (1, 4)))\n\n # Now try for superoperators.\n # For now, we just ensure the dims are correct.\n sqobj = to_super(qobj)\n correct_dims = [[[2, 4], [2, 4]], [[2, 4], [2, 4]]]\n assert_equal(correct_dims, tensor_contract(sqobj, (1, 4), (7, 10)).dims)\n\nif __name__ == \"__main__\":\n run_module_suite()",
"# This file is part of QuTiP: Quantum Toolbox in Python.\n#\n# Copyright (c) 2011 and later, Paul D. Nation and Robert J. Johansson.\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are\n# met:\n#\n# 1. Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n#\n# 2. Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n#\n# 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names\n# of its contributors may be used to endorse or promote products derived\n# from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A\n# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n###############################################################################\n\n__all__ = ['Bloch']\n\nimport os\n\nfrom numpy import (ndarray, array, linspace, pi, outer, cos, sin, ones, size,\n sqrt, real, mod, append, ceil, arange)\n\nfrom qutip.qobj import Qobj\nfrom qutip.expect import expect\nfrom qutip.operators import sigmax, sigmay, sigmaz\n\ntry:\n import matplotlib.pyplot as plt\n from mpl_toolkits.mplot3d import Axes3D\n from matplotlib.patches import FancyArrowPatch\n from mpl_toolkits.mplot3d import proj3d\n\n class Arrow3D(FancyArrowPatch):\n def __init__(self, xs, ys, zs, *args, **kwargs):\n FancyArrowPatch.__init__(self, (0, 0), (0, 0), *args, **kwargs)\n\n self._verts3d = xs, ys, zs\n\n def draw(self, renderer):\n xs3d, ys3d, zs3d = self._verts3d\n xs, ys, zs = proj3d.proj_transform(xs3d, ys3d, zs3d, renderer.M)\n\n self.set_positions((xs[0], ys[0]), (xs[1], ys[1]))\n FancyArrowPatch.draw(self, renderer)\nexcept:\n pass\n\n\nclass Bloch():\n \"\"\"Class for plotting data on the Bloch sphere. Valid data can be\n either points, vectors, or qobj objects.\n\n Attributes\n ----------\n\n axes : instance {None}\n User supplied Matplotlib axes for Bloch sphere animation.\n fig : instance {None}\n User supplied Matplotlib Figure instance for plotting Bloch sphere.\n font_color : str {'black'}\n Color of font used for Bloch sphere labels.\n font_size : int {20}\n Size of font used for Bloch sphere labels.\n frame_alpha : float {0.1}\n Sets transparency of Bloch sphere frame.\n frame_color : str {'gray'}\n Color of sphere wireframe.\n frame_width : int {1}\n Width of wireframe.\n point_color : list {[\"b\",\"r\",\"g\",\"#CC6600\"]}\n List of colors for Bloch sphere point markers to cycle through.\n i.e. By default, points 0 and 4 will both be blue ('b').\n point_marker : list {[\"o\",\"s\",\"d\",\"^\"]}\n List of point marker shapes to cycle through.\n point_size : list {[25,32,35,45]}\n List of point marker sizes. Note, not all point markers look\n the same size when plotted!\n sphere_alpha : float {0.2}\n Transparency of Bloch sphere itself.\n sphere_color : str {'#FFDDDD'}\n Color of Bloch sphere.\n figsize : list {[7,7]}\n Figure size of Bloch sphere plot. Best to have both numbers the same;\n otherwise you will have a Bloch sphere that looks like a football.\n vector_color : list {[\"g\",\"#CC6600\",\"b\",\"r\"]}\n List of vector colors to cycle through.\n vector_width : int {5}\n Width of displayed vectors.\n vector_style : str {'-|>', 'simple', 'fancy', ''}\n Vector arrowhead style (from matplotlib's arrow style).\n vector_mutation : int {20}\n Width of vectors arrowhead.\n view : list {[-60,30]}\n Azimuthal and Elevation viewing angles.\n xlabel : list {[\"$x$\",\"\"]}\n List of strings corresponding to +x and -x axes labels, respectively.\n xlpos : list {[1.1,-1.1]}\n Positions of +x and -x labels respectively.\n ylabel : list {[\"$y$\",\"\"]}\n List of strings corresponding to +y and -y axes labels, respectively.\n ylpos : list {[1.2,-1.2]}\n Positions of +y and -y labels respectively.\n zlabel : list {[r'$\\\\left|0\\\\right>$',r'$\\\\left|1\\\\right>$']}\n List of strings corresponding to +z and -z axes labels, respectively.\n zlpos : list {[1.2,-1.2]}\n Positions of +z and -z labels respectively.\n\n\n \"\"\"\n def __init__(self, fig=None, axes=None, view=None, figsize=None,\n background=False):\n\n # Figure and axes\n self.fig = fig\n self.axes = axes\n # Background axes, default = False\n self.background = background\n # The size of the figure in inches, default = [5,5].\n self.figsize = figsize if figsize else [5, 5]\n # Azimuthal and Elvation viewing angles, default = [-60,30].\n self.view = view if view else [-60, 30]\n # Color of Bloch sphere, default = #FFDDDD\n self.sphere_color = '#FFDDDD'\n # Transparency of Bloch sphere, default = 0.2\n self.sphere_alpha = 0.2\n # Color of wireframe, default = 'gray'\n self.frame_color = 'gray'\n # Width of wireframe, default = 1\n self.frame_width = 1\n # Transparency of wireframe, default = 0.2\n self.frame_alpha = 0.2\n # Labels for x-axis (in LaTex), default = ['$x$', '']\n self.xlabel = ['$x$', '']\n # Position of x-axis labels, default = [1.2, -1.2]\n self.xlpos = [1.2, -1.2]\n # Labels for y-axis (in LaTex), default = ['$y$', '']\n self.ylabel = ['$y$', '']\n # Position of y-axis labels, default = [1.1, -1.1]\n self.ylpos = [1.2, -1.2]\n # Labels for z-axis (in LaTex),\n # default = [r'$\\left|0\\right>$', r'$\\left|1\\right>$']\n self.zlabel = [r'$\\left|0\\right>$', r'$\\left|1\\right>$']\n # Position of z-axis labels, default = [1.2, -1.2]\n self.zlpos = [1.2, -1.2]\n # ---font options---\n # Color of fonts, default = 'black'\n self.font_color = 'black'\n # Size of fonts, default = 20\n self.font_size = 20\n\n # ---vector options---\n # List of colors for Bloch vectors, default = ['b','g','r','y']\n self.vector_color = ['g', '#CC6600', 'b', 'r']\n #: Width of Bloch vectors, default = 5\n self.vector_width = 3\n #: Style of Bloch vectors, default = '-|>' (or 'simple')\n self.vector_style = '-|>'\n #: Sets the width of the vectors arrowhead\n self.vector_mutation = 20\n\n # ---point options---\n # List of colors for Bloch point markers, default = ['b','g','r','y']\n self.point_color = ['b', 'r', 'g', '#CC6600']\n # Size of point markers, default = 25\n self.point_size = [25, 32, 35, 45]\n # Shape of point markers, default = ['o','^','d','s']\n self.point_marker = ['o', 's', 'd', '^']\n\n # ---data lists---\n # Data for point markers\n self.points = []\n # Data for Bloch vectors\n self.vectors = []\n # Data for annotations\n self.annotations = []\n # Number of times sphere has been saved\n self.savenum = 0\n # Style of points, 'm' for multiple colors, 's' for single color\n self.point_style = []\n\n # status of rendering\n self._rendered = False\n\n def set_label_convention(self, convention):\n \"\"\"Set x, y and z labels according to one of conventions.\n\n Parameters\n ----------\n convention : string\n One of the following:\n - \"original\"\n - \"xyz\"\n - \"sx sy sz\"\n - \"01\"\n - \"polarization jones\"\n - \"polarization jones letters\"\n see also: http://en.wikipedia.org/wiki/Jones_calculus\n - \"polarization stokes\"\n see also: http://en.wikipedia.org/wiki/Stokes_parameters\n \"\"\"\n ketex = \"$\\\\left.|%s\\\\right\\\\rangle$\"\n # \\left.| is on purpose, so that every ket has the same size\n\n if convention == \"original\":\n self.xlabel = ['$x$', '']\n self.ylabel = ['$y$', '']\n self.zlabel = ['$\\\\left|0\\\\right>$', '$\\\\left|1\\\\right>$']\n elif convention == \"xyz\":\n self.xlabel = ['$x$', '']\n self.ylabel = ['$y$', '']\n self.zlabel = ['$z$', '']\n elif convention == \"sx sy sz\":\n self.xlabel = ['$s_x$', '']\n self.ylabel = ['$s_y$', '']\n self.zlabel = ['$s_z$', '']\n elif convention == \"01\":\n self.xlabel = ['', '']\n self.ylabel = ['', '']\n self.zlabel = ['$\\\\left|0\\\\right>$', '$\\\\left|1\\\\right>$']\n elif convention == \"polarization jones\":\n self.xlabel = [ketex % \"\\\\nearrow\\\\hspace{-1.46}\\\\swarrow\",\n ketex % \"\\\\nwarrow\\\\hspace{-1.46}\\\\searrow\"]\n self.ylabel = [ketex % \"\\\\circlearrowleft\", ketex %\n \"\\\\circlearrowright\"]\n self.zlabel = [ketex % \"\\\\leftrightarrow\", ketex % \"\\\\updownarrow\"]\n elif convention == \"polarization jones letters\":\n self.xlabel = [ketex % \"D\", ketex % \"A\"]\n self.ylabel = [ketex % \"L\", ketex % \"R\"]\n self.zlabel = [ketex % \"H\", ketex % \"V\"]\n elif convention == \"polarization stokes\":\n self.ylabel = [\"$\\\\nearrow\\\\hspace{-1.46}\\\\swarrow$\",\n \"$\\\\nwarrow\\\\hspace{-1.46}\\\\searrow$\"]\n self.zlabel = [\"$\\\\circlearrowleft$\", \"$\\\\circlearrowright$\"]\n self.xlabel = [\"$\\\\leftrightarrow$\", \"$\\\\updownarrow$\"]\n else:\n raise Exception(\"No such convention.\")\n\n def __str__(self):\n s = \"\"\n s += \"Bloch data:\\n\"\n s += \"-----------\\n\"\n s += \"Number of points: \" + str(len(self.points)) + \"\\n\"\n s += \"Number of vectors: \" + str(len(self.vectors)) + \"\\n\"\n s += \"\\n\"\n s += \"Bloch sphere properties:\\n\"\n s += \"------------------------\\n\"\n s += \"font_color: \" + str(self.font_color) + \"\\n\"\n s += \"font_size: \" + str(self.font_size) + \"\\n\"\n s += \"frame_alpha: \" + str(self.frame_alpha) + \"\\n\"\n s += \"frame_color: \" + str(self.frame_color) + \"\\n\"\n s += \"frame_width: \" + str(self.frame_width) + \"\\n\"\n s += \"point_color: \" + str(self.point_color) + \"\\n\"\n s += \"point_marker: \" + str(self.point_marker) + \"\\n\"\n s += \"point_size: \" + str(self.point_size) + \"\\n\"\n s += \"sphere_alpha: \" + str(self.sphere_alpha) + \"\\n\"\n s += \"sphere_color: \" + str(self.sphere_color) + \"\\n\"\n s += \"figsize: \" + str(self.figsize) + \"\\n\"\n s += \"vector_color: \" + str(self.vector_color) + \"\\n\"\n s += \"vector_width: \" + str(self.vector_width) + \"\\n\"\n s += \"vector_style: \" + str(self.vector_style) + \"\\n\"\n s += \"vector_mutation: \" + str(self.vector_mutation) + \"\\n\"\n s += \"view: \" + str(self.view) + \"\\n\"\n s += \"xlabel: \" + str(self.xlabel) + \"\\n\"\n s += \"xlpos: \" + str(self.xlpos) + \"\\n\"\n s += \"ylabel: \" + str(self.ylabel) + \"\\n\"\n s += \"ylpos: \" + str(self.ylpos) + \"\\n\"\n s += \"zlabel: \" + str(self.zlabel) + \"\\n\"\n s += \"zlpos: \" + str(self.zlpos) + \"\\n\"\n return s\n\n def _repr_png_(self):\n from IPython.core.pylabtools import print_figure\n self.render()\n fig_data = print_figure(self.fig, 'png')\n plt.close(self.fig)\n return fig_data\n\n def _repr_svg_(self):\n from IPython.core.pylabtools import print_figure\n self.render()\n fig_data = print_figure(self.fig, 'svg').decode('utf-8')\n plt.close(self.fig)\n return fig_data\n\n def clear(self):\n \"\"\"Resets Bloch sphere data sets to empty.\n \"\"\"\n self.points = []\n self.vectors = []\n self.point_style = []\n self.annotations = []\n\n def add_points(self, points, meth='s'):\n \"\"\"Add a list of data points to bloch sphere.\n\n Parameters\n ----------\n points : array/list\n Collection of data points.\n\n meth : str {'s', 'm', 'l'}\n Type of points to plot, use 'm' for multicolored, 'l' for points\n connected with a line.\n\n \"\"\"\n if not isinstance(points[0], (list, ndarray)):\n points = [[points[0]], [points[1]], [points[2]]]\n points = array(points)\n if meth == 's':\n if len(points[0]) == 1:\n pnts = array([[points[0][0]], [points[1][0]], [points[2][0]]])\n pnts = append(pnts, points, axis=1)\n else:\n pnts = points\n self.points.append(pnts)\n self.point_style.append('s')\n elif meth == 'l':\n self.points.append(points)\n self.point_style.append('l')\n else:\n self.points.append(points)\n self.point_style.append('m')\n\n def add_states(self, state, kind='vector'):\n \"\"\"Add a state vector Qobj to Bloch sphere.\n\n Parameters\n ----------\n state : qobj\n Input state vector.\n\n kind : str {'vector','point'}\n Type of object to plot.\n\n \"\"\"\n if isinstance(state, Qobj):\n state = [state]\n\n for st in state:\n vec = [expect(sigmax(), st),\n expect(sigmay(), st),\n expect(sigmaz(), st)]\n\n if kind == 'vector':\n self.add_vectors(vec)\n elif kind == 'point':\n self.add_points(vec)\n\n def add_vectors(self, vectors):\n \"\"\"Add a list of vectors to Bloch sphere.\n\n Parameters\n ----------\n vectors : array/list\n Array with vectors of unit length or smaller.\n\n \"\"\"\n if isinstance(vectors[0], (list, ndarray)):\n for vec in vectors:\n self.vectors.append(vec)\n else:\n self.vectors.append(vectors)\n\n def add_annotation(self, state_or_vector, text, **kwargs):\n \"\"\"Add a text or LaTeX annotation to Bloch sphere,\n parametrized by a qubit state or a vector.\n\n Parameters\n ----------\n state_or_vector : Qobj/array/list/tuple\n Position for the annotaion.\n Qobj of a qubit or a vector of 3 elements.\n\n text : str/unicode\n Annotation text.\n You can use LaTeX, but remember to use raw string\n e.g. r\"$\\\\langle x \\\\rangle$\"\n or escape backslashes\n e.g. \"$\\\\\\\\langle x \\\\\\\\rangle$\".\n\n **kwargs :\n Options as for mplot3d.axes3d.text, including:\n fontsize, color, horizontalalignment, verticalalignment.\n \"\"\"\n if isinstance(state_or_vector, Qobj):\n vec = [expect(sigmax(), state_or_vector),\n expect(sigmay(), state_or_vector),\n expect(sigmaz(), state_or_vector)]\n elif isinstance(state_or_vector, (list, ndarray, tuple)) \\\n and len(state_or_vector) == 3:\n vec = state_or_vector\n else:\n raise Exception(\"Position needs to be specified by a qubit \" +\n \"state or a 3D vector.\")\n self.annotations.append({'position': vec,\n 'text': text,\n 'opts': kwargs})\n\n def make_sphere(self):\n \"\"\"\n Plots Bloch sphere and data sets.\n \"\"\"\n self.render(self.fig, self.axes)\n\n def render(self, fig=None, axes=None):\n \"\"\"\n Render the Bloch sphere and its data sets in on given figure and axes.\n \"\"\"\n if self._rendered:\n self.axes.clear()\n\n self._rendered = True\n\n # Figure instance for Bloch sphere plot\n if not fig:\n self.fig = plt.figure(figsize=self.figsize)\n\n if not axes:\n self.axes = Axes3D(self.fig, azim=self.view[0], elev=self.view[1])\n\n if self.background:\n self.axes.clear()\n self.axes.set_xlim3d(-1.3, 1.3)\n self.axes.set_ylim3d(-1.3, 1.3)\n self.axes.set_zlim3d(-1.3, 1.3)\n else:\n self.plot_axes()\n self.axes.set_axis_off()\n self.axes.set_xlim3d(-0.7, 0.7)\n self.axes.set_ylim3d(-0.7, 0.7)\n self.axes.set_zlim3d(-0.7, 0.7)\n\n self.axes.grid(False)\n self.plot_back()\n self.plot_points()\n self.plot_vectors()\n self.plot_front()\n self.plot_axes_labels()\n self.plot_annotations()\n\n def plot_back(self):\n # back half of sphere\n u = linspace(0, pi, 25)\n v = linspace(0, pi, 25)\n x = outer(cos(u), sin(v))\n y = outer(sin(u), sin(v))\n z = outer(ones(size(u)), cos(v))\n self.axes.plot_surface(x, y, z, rstride=2, cstride=2,\n color=self.sphere_color, linewidth=0,\n alpha=self.sphere_alpha)\n # wireframe\n self.axes.plot_wireframe(x, y, z, rstride=5, cstride=5,\n color=self.frame_color,\n alpha=self.frame_alpha)\n # equator\n self.axes.plot(1.0 * cos(u), 1.0 * sin(u), zs=0, zdir='z',\n lw=self.frame_width, color=self.frame_color)\n self.axes.plot(1.0 * cos(u), 1.0 * sin(u), zs=0, zdir='x',\n lw=self.frame_width, color=self.frame_color)\n\n def plot_front(self):\n # front half of sphere\n u = linspace(-pi, 0, 25)\n v = linspace(0, pi, 25)\n x = outer(cos(u), sin(v))\n y = outer(sin(u), sin(v))\n z = outer(ones(size(u)), cos(v))\n self.axes.plot_surface(x, y, z, rstride=2, cstride=2,\n color=self.sphere_color, linewidth=0,\n alpha=self.sphere_alpha)\n # wireframe\n self.axes.plot_wireframe(x, y, z, rstride=5, cstride=5,\n color=self.frame_color,\n alpha=self.frame_alpha)\n # equator\n self.axes.plot(1.0 * cos(u), 1.0 * sin(u),\n zs=0, zdir='z', lw=self.frame_width,\n color=self.frame_color)\n self.axes.plot(1.0 * cos(u), 1.0 * sin(u),\n zs=0, zdir='x', lw=self.frame_width,\n color=self.frame_color)\n\n def plot_axes(self):\n # axes\n span = linspace(-1.0, 1.0, 2)\n self.axes.plot(span, 0 * span, zs=0, zdir='z', label='X',\n lw=self.frame_width, color=self.frame_color)\n self.axes.plot(0 * span, span, zs=0, zdir='z', label='Y',\n lw=self.frame_width, color=self.frame_color)\n self.axes.plot(0 * span, span, zs=0, zdir='y', label='Z',\n lw=self.frame_width, color=self.frame_color)\n\n def plot_axes_labels(self):\n # axes labels\n opts = {'fontsize': self.font_size,\n 'color': self.font_color,\n 'horizontalalignment': 'center',\n 'verticalalignment': 'center'}\n self.axes.text(0, -self.xlpos[0], 0, self.xlabel[0], **opts)\n self.axes.text(0, -self.xlpos[1], 0, self.xlabel[1], **opts)\n\n self.axes.text(self.ylpos[0], 0, 0, self.ylabel[0], **opts)\n self.axes.text(self.ylpos[1], 0, 0, self.ylabel[1], **opts)\n\n self.axes.text(0, 0, self.zlpos[0], self.zlabel[0], **opts)\n self.axes.text(0, 0, self.zlpos[1], self.zlabel[1], **opts)\n\n for a in (self.axes.w_xaxis.get_ticklines() +\n self.axes.w_xaxis.get_ticklabels()):\n a.set_visible(False)\n for a in (self.axes.w_yaxis.get_ticklines() +\n self.axes.w_yaxis.get_ticklabels()):\n a.set_visible(False)\n for a in (self.axes.w_zaxis.get_ticklines() +\n self.axes.w_zaxis.get_ticklabels()):\n a.set_visible(False)\n\n def plot_vectors(self):\n # -X and Y data are switched for plotting purposes\n for k in range(len(self.vectors)):\n\n xs3d = self.vectors[k][1] * array([0, 1])\n ys3d = -self.vectors[k][0] * array([0, 1])\n zs3d = self.vectors[k][2] * array([0, 1])\n\n color = self.vector_color[mod(k, len(self.vector_color))]\n\n if self.vector_style == '':\n # simple line style\n self.axes.plot(xs3d, ys3d, zs3d,\n zs=0, zdir='z', label='Z',\n lw=self.vector_width, color=color)\n else:\n # decorated style, with arrow heads\n a = Arrow3D(xs3d, ys3d, zs3d,\n mutation_scale=self.vector_mutation,\n lw=self.vector_width,\n arrowstyle=self.vector_style,\n color=color)\n\n self.axes.add_artist(a)\n\n def plot_points(self):\n # -X and Y data are switched for plotting purposes\n for k in range(len(self.points)):\n num = len(self.points[k][0])\n dist = [sqrt(self.points[k][0][j] ** 2 +\n self.points[k][1][j] ** 2 +\n self.points[k][2][j] ** 2) for j in range(num)]\n if any(abs(dist - dist[0]) / dist[0] > 1e-12):\n # combine arrays so that they can be sorted together\n zipped = list(zip(dist, range(num)))\n zipped.sort() # sort rates from lowest to highest\n dist, indperm = zip(*zipped)\n indperm = array(indperm)\n else:\n indperm = arange(num)\n if self.point_style[k] == 's':\n self.axes.scatter(\n real(self.points[k][1][indperm]),\n - real(self.points[k][0][indperm]),\n real(self.points[k][2][indperm]),\n s=self.point_size[mod(k, len(self.point_size))],\n alpha=1,\n edgecolor='none',\n zdir='z',\n color=self.point_color[mod(k, len(self.point_color))],\n marker=self.point_marker[mod(k, len(self.point_marker))])\n\n elif self.point_style[k] == 'm':\n pnt_colors = array(self.point_color *\n ceil(num / float(len(self.point_color))))\n\n pnt_colors = pnt_colors[0:num]\n pnt_colors = list(pnt_colors[indperm])\n marker = self.point_marker[mod(k, len(self.point_marker))]\n s = self.point_size[mod(k, len(self.point_size))]\n self.axes.scatter(real(self.points[k][1][indperm]),\n -real(self.points[k][0][indperm]),\n real(self.points[k][2][indperm]),\n s=s, alpha=1, edgecolor='none',\n zdir='z', color=pnt_colors,\n marker=marker)\n\n elif self.point_style[k] == 'l':\n color = self.point_color[mod(k, len(self.point_color))]\n self.axes.plot(real(self.points[k][1]),\n -real(self.points[k][0]),\n real(self.points[k][2]),\n alpha=0.75, zdir='z',\n color=color)\n\n def plot_annotations(self):\n # -X and Y data are switched for plotting purposes\n for annotation in self.annotations:\n vec = annotation['position']\n opts = {'fontsize': self.font_size,\n 'color': self.font_color,\n 'horizontalalignment': 'center',\n 'verticalalignment': 'center'}\n opts.update(annotation['opts'])\n self.axes.text(vec[1], -vec[0], vec[2],\n annotation['text'], **opts)\n\n def show(self):\n \"\"\"\n Display Bloch sphere and corresponding data sets.\n \"\"\"\n self.render(self.fig, self.axes)\n if self.fig:\n plt.show(self.fig)\n\n def save(self, name=None, format='png', dirc=None):\n \"\"\"Saves Bloch sphere to file of type ``format`` in directory ``dirc``.\n\n Parameters\n ----------\n\n name : str\n Name of saved image. Must include path and format as well.\n i.e. '/Users/Paul/Desktop/bloch.png'\n This overrides the 'format' and 'dirc' arguments.\n format : str\n Format of output image.\n dirc : str\n Directory for output images. Defaults to current working directory.\n\n Returns\n -------\n File containing plot of Bloch sphere.\n\n \"\"\"\n self.render(self.fig, self.axes)\n if dirc:\n if not os.path.isdir(os.getcwd() + \"/\" + str(dirc)):\n os.makedirs(os.getcwd() + \"/\" + str(dirc))\n if name is None:\n if dirc:\n self.fig.savefig(os.getcwd() + \"/\" + str(dirc) + '/bloch_' +\n str(self.savenum) + '.' + format)\n else:\n self.fig.savefig(os.getcwd() + '/bloch_' + str(self.savenum) +\n '.' + format)\n else:\n self.fig.savefig(name)\n self.savenum += 1\n if self.fig:\n plt.close(self.fig)\n\n\ndef _hide_tick_lines_and_labels(axis):\n '''\n Set visible property of ticklines and ticklabels of an axis to False\n '''\n for a in axis.get_ticklines() + axis.get_ticklabels():\n a.set_visible(False)\n"
] | [
[
"numpy.testing.assert_equal",
"numpy.testing.run_module_suite"
],
[
"numpy.sqrt",
"numpy.linspace",
"matplotlib.pyplot.figure",
"numpy.arange",
"numpy.cos",
"numpy.sin",
"numpy.append",
"numpy.size",
"numpy.real",
"matplotlib.pyplot.close",
"matplotlib.patches.FancyArrowPatch.__init__",
"numpy.array",
"matplotlib.pyplot.show",
"matplotlib.patches.FancyArrowPatch.draw"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
BastienArcelin/IPU-GPU | [
"dde946686478ce77a06821a1517b5b8206ab8de9"
] | [
"scripts/ipu/inference_gen_galaxy.py"
] | [
"## Load necessary librairies\nimport numpy as np\n\nimport tensorflow as tf\nimport tensorflow_probability as tfp\ntfk = tf.keras\ntfkl = tfk.layers\ntfd = tfp.distributions\ntfb = tfp.bijectors\n\nimport time\nimport sys\nsys.path.insert(0,'')\nfrom flow import *\nimport utils_vae\n\n# IPU \nfrom tensorflow.compiler.plugin.poplar.ops import gen_ipu_ops\nfrom tensorflow.python import ipu\nfrom tensorflow.python.ipu.scopes import ipu_scope\ncfg = ipu.utils.create_ipu_config()#profiling=True,\n #profile_execution=True,\n #report_directory='fixed_fullModel'\ncfg = ipu.utils.auto_select_ipus(cfg, 1)\nipu.utils.configure_ipu_system(cfg)\n\n\n## Define the normalizing flow\nhidden_dim = [256,256]\nlayers =8\nbijectors = []\n\n# IPU\n# Create an IPU distribution strategy\nstrategy = ipu.ipu_strategy.IPUStrategy()\n#with ipu_scope(\"/device:IPU:0\"):\nwith strategy.scope():\n for i in range(0, layers):\n made = make_network(32, hidden_dim,2)\n bijectors.append(MAF(made))\n bijectors.append(tfb.Permute(permutation=[31,30,29,28,27,26,25,24,23,22,21,20,19,18,17,16,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0]))\n \n bijectors = tfb.Chain(bijectors=list(reversed(bijectors[:-1])))\n\n distribution = tfd.TransformedDistribution(\n distribution=tfd.Normal(loc=0., scale=1.),\n bijector=bijectors,\n event_shape=[32]\n )\n\n x_ = tfkl.Input(shape=(32,), dtype=tf.float32)\n log_prob_ = distribution.log_prob(x_)\n model = tfk.Model(x_, log_prob_)\n\n model.compile(optimizer=tf.optimizers.Adam(), loss=lambda _, log_prob: -log_prob)\n print('flow defined')\n\n ## Load weights\n loading_path = '../../nflow_weights/'\n latest = tf.train.latest_checkpoint(loading_path)\n model.load_weights(latest)\n\n ## Define VAE and load weights decoder VAE\n vae_lsst_conv,vae_lsst_utils, encoder_LSST, decoder_LSST, Dkl = utils_vae.load_vae_full('../../vae_weights/weights_mse_noisy_v4.513-0.00.ckpt',6, folder= False)\n\n ### Do inference\n ## Warm-up \n samples = distribution.sample(100)\n out = decoder_LSST(samples)\n print('warm-up over')\n n_gal = 1000\n print(n_gal)\n ## Actual inference\n t0 = time.time()\n samples = distribution.sample(n_gal)\n out = decoder_LSST(samples)\n t1 = time.time()\n\nprint('time for inference:' + str(t1-t0))\n"
] | [
[
"tensorflow.train.latest_checkpoint",
"tensorflow.python.ipu.utils.configure_ipu_system",
"tensorflow.python.ipu.ipu_strategy.IPUStrategy",
"tensorflow.python.ipu.utils.create_ipu_config",
"tensorflow.python.ipu.utils.auto_select_ipus",
"tensorflow.optimizers.Adam"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ibm-developer-skills-network/oroir-Build-a-Personal-Movie-Recommender-with-Django | [
"fbc681cdea067c0cee91c158c632f83cff9db936"
] | [
"recommender/movierecommender/management/commands/load_movies.py"
] | [
"import csv\nimport pandas as pd\nfrom django.core.management import BaseCommand\nfrom ...models import Movie\n\n\nclass Command(BaseCommand):\n help = 'Load a movie csv file into the database'\n\n def add_arguments(self, parser):\n parser.add_argument('--path', type=str)\n\n def handle(self, *args, **kwargs):\n print(\"Clean old movie data\")\n Movie.objects.all().delete()\n path = kwargs['path']\n movie_df = pd.read_csv(path)\n for index, row in movie_df.iterrows():\n imdb_id = row[\"imdb_id\"]\n genres = row[\"genres\"]\n release_date = row[\"release_date\"]\n original_language = row[\"original_language\"]\n original_title = row[\"original_title\"]\n overview = row[\"overview\"]\n vote_average = row[\"vote_average\"]\n vote_count = row[\"vote_count\"]\n poster_path = row[\"poster_path\"]\n #print(f\"{imdb_id} {original_title} {genres} {overview} {vote_average} {poster_path}\")\n movie = Movie(imdb_id=imdb_id,\n genres=genres,\n original_title=original_title,\n original_language=original_language,\n release_date=release_date,\n overview=overview,\n vote_average=vote_average,\n vote_count=vote_count,\n poster_path=poster_path)\n movie.save()\n print(f\"{imdb_id} saved...\")\n\n# python manage.py load_movies --path movies.csv"
] | [
[
"pandas.read_csv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
ContactEngineering/Adhesion | [
"acc46ad9bfe49fec667cb9a116ebde426faa38c4"
] | [
"helpers/Testing_augmented_Lagrangian.py"
] | [
"#\n# Copyright 2020 Antoine Sanner\n# 2020 Lars Pastewka\n# 2015-2016 Till Junge\n#\n# ### MIT license\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n#\n\n# coding: utf-8\n\n## Testing the Augmented Lagrangian of Adhesion\n\n# The implementation of the augmented Lagrangian in Tools follows closely the description of the `LANCELOT` algorithm described in Bierlaire (2006)\n\n# The function `augmented_lagrangian` has the form of custom minimizer for [scipy.optimize.minimize](http://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.optimize.minimize.html)\n\n# In[4]:\n\nimport sys\nimport os\nimport numpy as np\n\nimport scipy.optimize\nsys.path.append(os.path.join(os.getcwd(), \"../PyCo/Tools/\"))\nfrom AugmentedLagrangian import augmented_lagrangian\n\n\n### Book example\n\n# Example 20.5: Minimise the fuction $f(x)$\n# $$\\min_{x\\in\\mathbb{R}^2} 2(x_1^2+x_2^2 -1)-x_1$$\n# under the constraint\n# $$ x_1^2 + x_2^2 = 1$$\n\n# ugly workaround to get a fresh AugmentedLagrangian without module loads\n\n# In[9]:\n\n# fname = \"../PyCo/Tools/AugmentedLagrangian.py\"\n# with open(fname) as filehandle:\n# content = ''.join((line for line in filehandle))\n# exec(content)\n\n\n# In[11]:\n\ndef fun(x):\n return (x[0]**2 + x[1]**2 - 1) - x[0]\ndef constraint(x):\n return x[0]**2 + x[1]**2 - 1\ntol = 1.e-2\nresult = scipy.optimize.minimize(fun, x0=np.array((-1, .1)),\n \t constraints={'type':'eq','fun':constraint},\n\t method=augmented_lagrangian, tol=tol,\n\t options={'multiplier0': np.array((0.)),\n 'disp': True,\n 'store_iterates': 'iterate'})\n\nprint(result)\n\n"
] | [
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ian-shepherd/bball_sim | [
"119696eda8d1c1c96da4113c3a41659e1472ebc2"
] | [
"bball_sim/app.py"
] | [
"# Packages\r\nimport streamlit as st\r\nimport pandas as pd\r\nimport seaborn as sns\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib.patches as mpatches\r\nimport util\r\n\r\n# Configure page\r\nst.set_page_config(page_title='End of Game NBA Simulator',\r\n page_icon='https://raw.githubusercontent.com/papagorgio23/Python101/master/newlogo.png',\r\n layout=\"centered\")\r\n\r\n\r\n# Load data and convert to list of players\r\ncols = ['Player', 'bbref_id']\r\nplayers = pd.read_csv('./player_data.csv', usecols=cols)\r\nplayerList = players['Player'].tolist()\r\n\r\n\r\n# Simulation function\r\ndef baseSimulation(n, t, diff, fouls1, fouls2, ot_prob):\r\n \"\"\"\r\n primary simulation to determine number of games won by each strategy\r\n returns a dataframe of strategy, result (number of won wins), number of sims, and mean point difference\r\n \"\"\"\r\n\r\n # Generate empty lists\r\n simTypeList = []\r\n resultList = []\r\n overtimeList = []\r\n pointDiffList = []\r\n\r\n # Simulation\r\n for i in range(0, n):\r\n\r\n # 2 pt simulation\r\n result, overtime, pointDiff = util.runSim(2, \r\n df1, \r\n df2, \r\n rbPct1, \r\n rbPct2, \r\n timeLeftInitial=t, \r\n pointDiffInitial=diff, \r\n teamFouls1Initial=fouls1, \r\n teamFouls2Initial=fouls2, \r\n overtimeProb=ot_prob)\r\n simTypeList.append('2pt')\r\n resultList.append(result)\r\n overtimeList.append(overtime)\r\n pointDiffList.append(pointDiff)\r\n\r\n # 3 pt simulation\r\n result, overtime, pointDiff = util.runSim(3, \r\n df1, \r\n df2, \r\n rbPct1, \r\n rbPct2, \r\n timeLeftInitial=t, \r\n pointDiffInitial=diff, \r\n teamFouls1Initial=fouls1, \r\n teamFouls2Initial=fouls2, \r\n overtimeProb=ot_prob)\r\n simTypeList.append('3pt')\r\n resultList.append(result)\r\n overtimeList.append(overtime)\r\n pointDiffList.append(pointDiff)\r\n\r\n\r\n # Output dataframe\r\n df = pd.DataFrame(zip(simTypeList, resultList, overtimeList, pointDiffList),\r\n columns=['Strategy', 'Result', 'Overtime', 'Point_diff'])\r\n df = df.groupby(['Strategy'])[['Result']].sum().reset_index()\r\n df['Sims'] = n\r\n\r\n\r\n # Generate plot\r\n # set plot style: grey grid in the background:\r\n sns.set(style=\"darkgrid\")\r\n\r\n # set the figure size\r\n # plt.figure(figsize=(14, 10))\r\n fig = plt.figure(figsize=(12, 8))\r\n\r\n # plot bars\r\n bar1 = sns.barplot(x='Strategy', y='Sims', data=df, estimator=sum, ci=None, color='lightcoral')\r\n bar2 = sns.barplot(x='Strategy', y='Result', data=df, color='dodgerblue')\r\n\r\n # legend\r\n top_bar = mpatches.Patch(color='lightcoral', label='Loss')\r\n bottom_bar = mpatches.Patch(color='dodgerblue', label='Win')\r\n plt.legend(bbox_to_anchor=(1,1), borderaxespad=0, frameon=False, ncol=2, handles=[bottom_bar, top_bar])\r\n\r\n # formatting\r\n plt.ylabel(\"# of Simulations\")\r\n plt.title(\"Result of \" + str(n) + \" Simulations by Strategy\")\r\n\r\n st.pyplot(fig)\r\n\r\n # Print % of sims won\r\n st.write(str(round(df.loc[0,'Result'] / n * 100, 1)) + '% of 2pt strategy similations won')\r\n st.write(str(round(df.loc[1,'Result'] / n * 100, 1)) + '% of 3pt strategy similations won')\r\n\r\n return df\r\n\r\n\r\n# Configure page\r\nst.title(\"End of NBA Game Simulator\")\r\nst.subheader(\r\n \"_Adjust the inputs in the sidebar and click apply to view the results of the simulation_\"\r\n)\r\n\r\n\r\n\r\n# Configure sidebar\r\nbuton1 = st.sidebar.button(\"Run\")\r\n\r\n# game state inputs\r\nn = st.sidebar.number_input(\"number of simulations\", min_value=100, max_value=1000000, value=1000)\r\nt = st.sidebar.number_input(\"seconds remaining\", min_value=1, max_value=60, value=30)\r\ndiff = st.sidebar.number_input(\"point differential\", min_value=-10, max_value=0, value=-3)\r\nfouls1 = st.sidebar.number_input(\"fouls committed by leading team\", min_value=0, max_value=10, value=5)\r\nfouls2 = st.sidebar.number_input(\"fouls committed by trailing team\", min_value=0, max_value=10, value=5)\r\not_prob = st.sidebar.number_input(\"overtime win probability (%)\", min_value=0, max_value=100, value=50) / 100\r\n\r\n# trailing team players\r\nst.sidebar.write(\"\")\r\nst.sidebar.write(\"Trailing Team\")\r\nplayer1 = st.sidebar.selectbox(\"player1\", playerList, playerList.index(\"Kemba Walker\\\\walkeke02\"))\r\nplayer2 = st.sidebar.selectbox(\"player2\", playerList, playerList.index(\"Marcus Smart\\\\smartma01\"))\r\nplayer3 = st.sidebar.selectbox(\"player3\", playerList, playerList.index(\"Jaylen Brown\\\\brownja02\"))\r\nplayer4 = st.sidebar.selectbox(\"player4\", playerList, playerList.index(\"Jayson Tatum\\\\tatumja01\"))\r\nplayer5 = st.sidebar.selectbox(\"player5\", playerList, playerList.index(\"Grant Williams\\\\willigr01\"))\r\n\r\n# leading team players\r\nst.sidebar.write(\"Leading Team\")\r\nplayer6 = st.sidebar.selectbox(\"player6\", playerList, playerList.index(\"Ben Simmons\\\\simmobe01\"))\r\nplayer7 = st.sidebar.selectbox(\"player7\", playerList, playerList.index(\"Seth Curry\\\\curryse01\"))\r\nplayer8 = st.sidebar.selectbox(\"player8\", playerList, playerList.index(\"Danny Green\\\\greenda02\"))\r\nplayer9 = st.sidebar.selectbox(\"player9\", playerList, playerList.index(\"Tobias Harris\\\\harrito02\"))\r\nplayer10 = st.sidebar.selectbox(\"player10\", playerList, playerList.index(\"Joel Embiid\\\\embiijo01\"))\r\n\r\n\r\n# Run simulations\r\n# if st.sidebar.button('Apply'):\r\nif buton1:\r\n with st.spinner(\"Running simulations...\"):\r\n team1 = [player1.rsplit('\\\\',1)[1], player2.rsplit('\\\\',1)[1], player3.rsplit('\\\\',1)[1], player4.rsplit('\\\\',1)[1], player5.rsplit('\\\\',1)[1]]\r\n team2 = [player6.rsplit('\\\\',1)[1], player7.rsplit('\\\\',1)[1], player8.rsplit('\\\\',1)[1], player9.rsplit('\\\\',1)[1], player10.rsplit('\\\\',1)[1]]\r\n df1, df2, rbPct1, rbPct2 = util.prepSim(team1, team2)\r\n baseSimulation(n, t, diff, fouls1, fouls2, ot_prob)\r\n\r\n\r\nabout = st.expander('Simulation Info')\r\nwith about:\r\n \"\"\"\r\n This is an end of NBA game simulator based on player statistics for the 2020-2021 NBA season. You can select the same \r\n player to both teams but you cannot put a player on the same team twice. There are also dummy players that act as a \r\n representative player of that position. The simulator assumes the outcome of every possession is a made shot, missed \r\n shot with the potential of a rebound, or intentional foul. It will not account for turnovers or blocks. The time taken \r\n by each possession is based on a normal distribution accounting for what is in the best interest of the team. For example, \r\n the simulation assumes the trailing team will take an average of 4 seconds but if the game is tied, that team will try \r\n and maximize the amount of time taken so that mean is changed to the time remaining - 1.5 seconds. The shooter is also \r\n determined by a composite rating that ranks players by number of that specific shot (free throw, 2 pt, 3 pt) taken per \r\n game and their success rate. Players are then assigned a probability of being the selected shooter. Rebounds on the other \r\n hand are determined by a team liklihood that compares the rebounding of the two teams to determine each team's liklihood \r\n of successfully getting a rebound.\r\n \"\"\""
] | [
[
"matplotlib.pyplot.legend",
"matplotlib.patches.Patch",
"pandas.read_csv",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.ylabel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
poltextlab/nyt_hybrid_classification_workflow | [
"3f676938b08f4373be3a83e975ee51dfa5ce6bf5",
"3f676938b08f4373be3a83e975ee51dfa5ce6bf5",
"3f676938b08f4373be3a83e975ee51dfa5ce6bf5",
"3f676938b08f4373be3a83e975ee51dfa5ce6bf5",
"3f676938b08f4373be3a83e975ee51dfa5ce6bf5",
"3f676938b08f4373be3a83e975ee51dfa5ce6bf5"
] | [
"spark_cluster/04_5_HV_activeLearn/HV_v4_activeLearn_NYT_sim2_and_sim3_to_sim1/6100_ML2_HV_v4_activeLearn_NYT_sim2_and_sim3_to_sim1_round5.py",
"spark_cluster/04_2_HV_basic/HV_v1_NYT_sim2_and_sim3_to_sim1/6200_ML2_HV_v1_NYT_sim2_and_sim3_to_sim1_round1_human_validation.py",
"spark_cluster/04_2_HV_basic/HV_v1_NYT_sim1_and_sim3_to_sim2/6200_ML2_HV_v1_NYT_sim1_and_sim3_to_sim2_round5_human_validation.py",
"spark_cluster/04_3_HV_enhanced_basic/HV_v2_savNegInf_NYT_sim1_and_sim3_to_sim2/6100_ML2_HV_v2_savNegInf_NYT_sim1_and_sim3_to_sim2_round4.py",
"spark_cluster/04_4_HV_stepwise_balanced/HV_v3_stepwise_NYT_sim1_and_sim2_to_sim3/6200_ML2_HV_v3_stepwise_NYT_sim1_and_sim2_to_sim3_round1_human_validation.py",
"spark_cluster/04_4_HV_stepwise_balanced/HV_v3_stepwise_NYT_sim2_and_sim3_to_sim1/6100_ML2_HV_v3_stepwise_NYT_sim2_and_sim3_to_sim1_round4.py"
] | [
"# import libraries\nfrom pyspark.sql import SparkSession\nfrom pyspark import SparkConf\nfrom pyspark.sql.types import *\n\nfrom pyspark.sql.functions import col, count, when\n\nfrom pyspark.ml.classification import LinearSVC\n\nimport pandas as pd\n\n#################################################\n# spark config\n#################################################\nmtaMaster = \"spark://192.168.0.182:7077\"\n\nconf = SparkConf()\nconf.setMaster(mtaMaster)\n\nconf.set(\"spark.executor.memory\", \"24g\")\nconf.set(\"spark.driver.memory\", \"26g\")\nconf.set(\"spark.cores.max\", 96)\nconf.set(\"spark.driver.cores\", 8)\n\nconf.set(\"spark.serializer\", \"org.apache.spark.serializer.KryoSerializer\")\nconf.set(\"spark.kryoserializer.buffer\", \"256m\")\nconf.set(\"spark.kryoserializer.buffer.max\", \"256m\")\n\nconf.set(\"spark.default.parallelism\", 24)\n\nconf.set(\"spark.eventLog.enabled\", \"true\")\nconf.set(\"spark.eventLog.dir\", \"hdfs://192.168.0.182:9000/eventlog\")\nconf.set(\"spark.history.fs.logDirectory\", \"hdfs://192.168.0.182:9000/eventlog\")\n\nconf.set(\"spark.driver.maxResultSize\", \"2g\")\n\nconf.getAll()\n\n#################################################\n# create spark session\n#################################################\nspark = SparkSession.builder.appName('ML2_HV_v4_activeLearn_NYT_sim2_and_sim3_to_sim1_round5').config(conf=conf).getOrCreate()\n\nsc = spark.sparkContext\n\n# check things are working\nprint(sc)\nprint(sc.defaultParallelism)\nprint(\"SPARK CONTEXT IS RUNNING\")\n\n\n#################################################\n# define major topic codes\n#################################################\n\n# major topic codes for loop (NO 23 IN THE NYT CORPUS)\nmajortopic_codes = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 100]\n#majortopic_codes = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 23, 100]\n\n\n#################################################\n# loop starts here\n#################################################\n\nfor h in range(3):\n # read table from hdfs\n df_original = spark.read.parquet(\"hdfs://192.168.0.182:9000/input/ML2_HV_v4_activeLearn_NYT_round5_start.parquet\").repartition(50)\n\n # check loaded data \n print(df_original.printSchema())\n print(df_original.show())\n df_original.groupBy(\"majortopic\").count().show(30, False)\n\n #################################################\n # prepare to log sample numbers\n #################################################\n\n columns = [\"label\", \"non_label_all\", \"non_label_sample\", \"train_all\"]\n\n df_numbers = pd.DataFrame(index=majortopic_codes, columns=columns)\n\n for i in majortopic_codes:\n #################################################\n # prepare df for svm requirements\n #################################################\n print(\"majortopic is:\", i)\n\n # separate majortopic\n df_original = df_original.withColumn(\"label\", when(df_original[\"majortopic\"] == i, 1).otherwise(0))\n\n # label has to be double for SVM\n df_original = df_original.withColumn('label', df_original.label.cast(DoubleType()))\n\n #################################################\n # separate training and test sets\n #################################################\n\n df_train = df_original.where((col('train_r5') == 1) | (col('train_r2_neg') == i) | (col('train_r3_neg') == i) | (col('train_r4_neg') == i) | (col('train_r5_neg') == i))\n df_test = df_original.where((col('train_r5') == 0) & (col('train_r2_neg') != i) & (col('train_r3_neg') != i) & (col('train_r4_neg') != i) & (col('train_r5_neg') != i))\n\n # make training data proportional with regards to label occurrence frequency\n df_train_mtc = df_train.where(col('label') == 1)\n df_train_non_mtc = df_train.where(col('label') == 0)\n\n df_train_count = df_train.count()\n df_train_mtc_count = df_train_mtc.count()\n df_train_non_mtc_count = df_train_non_mtc.count()\n print(\"Rows in training DataFrame with label = \", df_train_mtc_count)\n print(\"Rows in training DataFrame without label = \", df_train_non_mtc_count)\n\n if df_train_mtc_count/df_train_non_mtc_count < 0.1:\n if df_train_mtc_count*10 < df_train_count//10:\n sample_num = df_train_count//10\n else: sample_num = df_train_mtc_count*10\n print(\"sample_num = \", sample_num)\n print(\"df_train_non_mtc = \", df_train_non_mtc_count)\n sampling_fraction = sample_num/df_train_non_mtc_count\n print(\"sampling_fraction = \", sampling_fraction)\n df_train_non_mtc = df_train_non_mtc.sample(False, sampling_fraction)\n df_train_non_mtc_sample = df_train_non_mtc.count()\n print(\"Rows in training DataFrame without label = \", df_train_non_mtc_sample)\n df_train = df_train_mtc.union(df_train_non_mtc)\n # numbers to logtable\n df_numbers[\"non_label_sample\"].loc[i] = df_train_non_mtc_sample\n df_numbers[\"train_all\"].loc[i] = df_train_mtc_count + df_train_non_mtc_sample\n else:\n # numbers to logtable\n df_numbers[\"non_label_sample\"].loc[i] = df_train_non_mtc_count\n df_numbers[\"train_all\"].loc[i] = df_train_count\n\n # numbers to logtable\n df_numbers[\"label\"].loc[i] = df_train_mtc_count\n df_numbers[\"non_label_all\"].loc[i] = df_train_non_mtc_count\n print(df_numbers)\n\n # NOTE: this type of copying wouldn't work in python, but does work in pyspark!\n df_train_orig = df_train\n df_test_orig = df_test\n df_loop = 0\n df_train_mtc = 0\n df_train_non_mtc = 0\n\n print(\"Rows in training DataFrame = \", df_train.count())\n print(\"Rows in test DataFrame = \", df_test.count())\n\n\n #################################################\n # SVM\n #################################################\n\n for j in range(3):\n df_train = df_train_orig\n df_test = df_test_orig\n\n # define svm\n lsvc = LinearSVC(featuresCol='features', labelCol='label', maxIter=10, regParam=0.1)\n\n # train the model.\n lsvcModel = lsvc.fit(df_train)\n\n print(\"fit model finished, starting scoring:\", j)\n\n # score the model on test data.\n predictions = lsvcModel.transform(df_test)\n\n df_train = 0\n df_test = 0\n lsvcModel = 0\n\n print(predictions.printSchema())\n print(predictions.show())\n\n df_write = predictions.select(\"doc_id\", \"prediction\")\n\n predictions = 0\n\n df_write = df_write.withColumn('prediction', df_write.prediction.cast(IntegerType()))\n df_write = df_write.withColumn('prediction', df_write.prediction * i)\n new_col_name = 'prediction_{i}'.format(i=i)\n df_write = df_write.withColumnRenamed('prediction', new_col_name)\n\n # write partial result to parquet\n dest_name = \"hdfs://192.168.0.182:9000/input/NYT_prediction_mtc{i}_{j}.parquet\".format(i=i, j=j)\n df_write.write.parquet(dest_name, mode=\"overwrite\")\n\n df_write = 0\n\n print(\"DONE\")\n\n print(\"ALL SVM DONE round5_{h}\".format(h=h+1))\n\n df_numbers.to_csv(\"ML2_HV_v4_activeLearn_NYT_round5_sample{h}_sample_numbers.csv\".format(h=h+1), index=False)\n\n # empty memory\n spark.catalog.clearCache()\n print(\"cache cleared\")\n\n #######################################################\n ### parquet to pandas\n #######################################################\n\n for j in range(3):\n # read from parquet format\n for i in majortopic_codes:\n source_name = \"hdfs://192.168.0.182:9000/input/NYT_prediction_mtc{i}_{j}.parquet\".format(i=i, j=j)\n df = spark.read.parquet(source_name).repartition(50)\n if i == 1:\n df_results = df\n else:\n df_results = df_results.join(df, 'doc_id', 'inner')\n\n df = df_results\n df_results = 0\n\n # convert prediction results to pandas df\n df = df.toPandas()\n\n df.to_csv(\"ML2_HV_v4_activeLearn_NYT_round5_sample{h}_svm{j}.csv\".format(h=h+1,j=j), index=False)\n\n\n#########################################################################\n# create results and leftovers tables\n#########################################################################\n\n# all of the following happen in pandas outside the spark context\nfor i in range(3):\n for j in range(3):\n df = pd.read_csv(\"ML2_HV_v4_activeLearn_NYT_round5_sample{i}_svm{j}.csv\".format(i=i+1, j=j))\n df = df.sort_values(by=['doc_id'])\n df = df.reset_index(drop=True)\n #print(df.head())\n if i == 0 and j == 0:\n df_results = df\n else:\n df_lemma = df_results.iloc[:,1:].add(df.iloc[:,1:])\n df_results = pd.concat([df_results[['doc_id']], df_lemma], axis=1)\n #print(df_results.head())\n\nfor i in majortopic_codes:\n df_results[[\"prediction_{i}\".format(i=i)]] = df_results[[\"prediction_{i}\".format(i=i)]].floordiv(i)\n\ndf_results[\"max_value\"] = df_results.iloc[:,1:].max(axis = 1, numeric_only = True)\ndf_results[\"how_many_9votes\"] = df_results.iloc[:,:-1].isin([9]).sum(1)\n\nprint(df_results.shape)\ndf_results = df_results.loc[df_results[\"max_value\"]==9]\nprint(df_results.shape)\n# first get table of multiple nine votes for active learning\ndf_activeLearn = df_results.loc[df_results[\"how_many_9votes\"]>1]\n# then get all simple verdicts\ndf_results = df_results.loc[df_results[\"how_many_9votes\"]==1]\nprint(df_results.shape)\n\n# prepare table for active learning\n# first get the full result table for further analysis later\ndf_activeLearn.to_csv(\"ML2_v4_activeLearn_NYT_r5_activeLearn_raw.csv\", index=False)\n\n# since this is a simulation a dummy value will suffice here\ndf_activeLearn[\"verdict\"] = \"dummy_value\"\ndf_activeLearn = df_activeLearn[[\"doc_id\", \"verdict\"]]\n\n# prepare table of single verdicts\ndf_results = df_results.drop(['max_value', 'how_many_9votes'], axis=1)\n\nprint(df_results.head())\n\nfor i in majortopic_codes:\n df_results[[\"prediction_{i}\".format(i=i)]] = df_results[[\"prediction_{i}\".format(i=i)]].floordiv(9)\n\nprint(df_results.head())\n\nfor i in majortopic_codes:\n df_results[[\"prediction_{i}\".format(i=i)]] = df_results[[\"prediction_{i}\".format(i=i)]]*i\n\n\ndf_results[\"verdict\"] = df_results.iloc[:,1:].sum(1)\n\ndf_results = df_results[[\"doc_id\", \"verdict\"]]\n\n# now we move back to the spark context!!\n# for that we need to move the pandas df into a spark df\ndf = spark.createDataFrame(df_results)\n# if there are no elements selected for active learning trying to move the empty pandas df into the\n# spark context will throw an error\nif df_activeLearn.empty:\n print(\"no elements selected for active learning\")\n df_al = pd.DataFrame({'col1': [1]})\n df_al = spark.createDataFrame(df_al)\nelse:\n df_al = spark.createDataFrame(df_activeLearn)\n\n# load df_original\ndf_original = spark.read.parquet(\"hdfs://192.168.0.182:9000/input/ML2_HV_v4_activeLearn_NYT_round5_start.parquet\").repartition(50)\n\n# create results table\ndf_results = df_original.join(df, \"doc_id\", \"inner\")\nif len(df_al.columns) == 1:\n df_results_al = df_al\nelse:\n df_results_al = df_original.join(df_al, \"doc_id\", \"inner\")\n\n# create table of non-classified and training elements\nids_drop = df.select(\"doc_id\")\ndf_original = df_original.join(ids_drop, \"doc_id\", \"left_anti\")\n# once more for those selected for active learning\nif len(df_al.columns) == 1:\n print(\"no elements selected for active learning\")\nelse:\n ids_drop = df_al.select(\"doc_id\")\n df_original = df_original.join(ids_drop, \"doc_id\", \"left_anti\")\n\n# write to parquet for use in human validation script\ndf_original.write.parquet(\"hdfs://192.168.0.182:9000/input/ML2_HV_v4_activeLearn_NYT_r5_train_and_remaining_NOTclassified.parquet\", mode=\"overwrite\")\ndf_results.write.parquet(\"hdfs://192.168.0.182:9000/input/ML2_HV_v4_activeLearn_NYT_r5_classified.parquet\", mode=\"overwrite\")\ndf_results_al.write.parquet(\"hdfs://192.168.0.182:9000/input/ML2_HV_v4_activeLearn_NYT_r5_activeLearn.parquet\", mode=\"overwrite\")\n\n# convert tables to pandas df and write to csv\ndf_original = df_original.drop(\"text\", \"words\", \"raw_features\", \"features\").toPandas()\ndf_results = df_results.drop(\"text\", \"words\", \"raw_features\", \"features\").toPandas()\nif len(df_al.columns) != 1:\n df_results_al = df_results_al.drop(\"text\", \"words\", \"raw_features\", \"features\").toPandas()\n\ndf_original.to_csv(\"ML2_HV_v4_activeLearn_NYT_r5_train_and_remaining_NOTclassified.csv\", index=False)\ndf_results.to_csv(\"ML2_HV_v4_activeLearn_NYT_r5_classified.csv\", index=False)\nif len(df_al.columns) != 1:\n df_results_al.to_csv(\"ML2_HV_v4_activeLearn_NYT_r5_activeLearn.csv\", index=False)\n\nprint(\"df_original: \", df_original.shape[0])\nprint(\"df_results: \", df_results.shape[0])\nif len(df_al.columns) != 1:\n print(\"df_results_activeLearn: \", df_results_al.shape[0])\nelse:\n print(\"df_results_activeLearn: 0\")\n\nsc.stop()\nspark.stop()\n",
"# import libraries\nfrom pyspark.sql import SparkSession\nfrom pyspark import SparkConf\nfrom pyspark.sql.types import *\n\nfrom pyspark.sql.functions import col, count, lit, rand, when\n\nimport pandas as pd\nfrom math import ceil\n\n#################################################\n# spark config\n#################################################\nmtaMaster = \"spark://192.168.0.182:7077\"\n\nconf = SparkConf()\nconf.setMaster(mtaMaster)\n\nconf.set(\"spark.executor.memory\", \"24g\")\nconf.set(\"spark.driver.memory\", \"26g\")\nconf.set(\"spark.cores.max\", 96)\nconf.set(\"spark.driver.cores\", 8)\n\nconf.set(\"spark.serializer\", \"org.apache.spark.serializer.KryoSerializer\")\nconf.set(\"spark.kryoserializer.buffer\", \"256m\")\nconf.set(\"spark.kryoserializer.buffer.max\", \"256m\")\n\nconf.set(\"spark.default.parallelism\", 24)\n\nconf.set(\"spark.eventLog.enabled\", \"true\")\nconf.set(\"spark.eventLog.dir\", \"hdfs://192.168.0.182:9000/eventlog\")\nconf.set(\"spark.history.fs.logDirectory\", \"hdfs://192.168.0.182:9000/eventlog\")\n\nconf.set(\"spark.driver.maxResultSize\", \"4g\")\n\nconf.getAll()\n\n#################################################\n# create spark session\n#################################################\nspark = SparkSession.builder.appName('ML2_HV_v1_NYT_sim2_and_sim3_to_sim1_round1_human_validation').config(conf=conf).getOrCreate()\n\nsc = spark.sparkContext\n\n# check things are working\nprint(sc)\nprint(sc.defaultParallelism)\nprint(\"SPARK CONTEXT IS RUNNING\")\n\n#################################################\n# define major topic codes\n#################################################\n\n# major topic codes for loop (NO 23 IN THE NYT CORPUS)\nmajortopic_codes = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 100]\n#majortopic_codes = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 23, 100]\n\n#################################################\n# read result data from round 1\n#################################################\n\ndf_results = spark.read.parquet(\"hdfs://192.168.0.182:9000/input/ML2_HV_v1_NYT_r1_classified.parquet\").repartition(50)\n\n# verdict to integer for the comparison with majortopic later\ndf_results = df_results.withColumn('verdict', df_results.verdict.cast(IntegerType()))\n\n#################################################\n# create table to store sample and validation numbers\n#################################################\n\ncolumns = [\"num_classified\", \"num_sample\", \"num_non_sample\", \"num_correct\", \"num_incorrect\", \"precision_in_sample\", \"num_added_to_training\"]\ndf_numbers = pd.DataFrame(index=majortopic_codes, columns=columns)\ndf_numbers = df_numbers.fillna(0)\n\n#################################################\n# create table of samples from results\n#################################################\n\n# constants for sample size calculation for 95% confidence with +-0.05 precision confidence interval:\nz = 1.96\ndelta = 0.05\nz_delta = z*z*0.5*0.5/(delta*delta)\nprint(\"z_delta :\", z_delta)\n\nfor i in majortopic_codes:\n df_classified = df_results.where(col('verdict') == i)\n num_classified = df_classified.count()\n df_numbers[\"num_classified\"].loc[i] = num_classified\n print(\"MTC:\", i, \"num_classified: \", num_classified)\n if num_classified > 100:\n sample_size = ceil(z_delta/(1+1/num_classified*(z_delta-1)))\n print(\"sample_size: \", sample_size)\n if sample_size < 100:\n sample_size = 100\n df_sample = df_classified.sort('doc_id').withColumn('random', rand()).sort('random').limit(sample_size).drop('random')\n df_sample_num = df_sample.count()\n print(\"df_sample: \", df_sample_num)\n # separate non-sample from sample elements\n ids_drop = df_sample.select(\"doc_id\")\n df_non_sample = df_classified.join(ids_drop, \"doc_id\", \"left_anti\")\n df_numbers[\"num_sample\"].loc[i] = df_sample_num\n df_numbers[\"num_non_sample\"].loc[i] = df_non_sample.count()\n else:\n df_numbers[\"num_sample\"].loc[i] = num_classified\n df_sample = df_classified\n df_non_sample = None\n\n # create table of all samples and add new sample to it\n if i == 1:\n df_sample_all = df_sample\n else:\n df_sample_all = df_sample_all.union(df_sample)\n #print(\"MTC:\", i, \"df_sample_all: \", df_sample_all.count())\n\n # create table of all non-samples and add new non-sample to it\n if i == 1:\n df_non_sample_all = None\n\n if df_non_sample != None and df_non_sample_all == None:\n df_non_sample_all = df_non_sample\n elif df_non_sample != None and df_non_sample_all != None:\n df_non_sample_all = df_non_sample_all.union(df_non_sample)\n #print(\"MTC:\", i, \"df_non_sample_all: \", df_non_sample_all.count())\n print(\"MTC:\", i)\n\n\n#################################################\n# check precision by majortopic codes\n#################################################\n\n# count correctly classified and precision for each majortopic code and write to table of numbers\ndf_correctly_classified = df_sample_all.where(col('majortopic') == col('verdict'))\nfor i in majortopic_codes:\n num_correct = df_correctly_classified.where(col('verdict') == i).count()\n df_numbers[\"num_correct\"].loc[i] = num_correct\n df_numbers[\"precision_in_sample\"].loc[i] = num_correct/df_numbers[\"num_sample\"].loc[i]\n\n# count incorrectly classified for debugging and checking\ndf_incorrectly_classified = df_sample_all.where(col('majortopic') != col('verdict'))\nfor i in majortopic_codes:\n num_incorrect = df_incorrectly_classified.where(col('verdict') == i).count()\n df_numbers[\"num_incorrect\"].loc[i] = num_incorrect\n\ndf_numbers['num_added_to_training'] = df_numbers['num_correct']\n\nprint(df_numbers)\n\n\n#################################################\n# add only validated positives to training set\n#################################################\n\n# sometimes there will be no non-sample elements\nif df_non_sample_all == None:\n df_non_sample_all = \"empty\"\n\n# the reason for creating these \"empty\" values, is because they will persist after we clear the\n# cache, and we can use them later in the workflow control\n\n# write all tables to parquet before clearing memory\ndf_correctly_classified.write.parquet(\"hdfs://192.168.0.182:9000/input/df_correct_replace_temp.parquet\", mode=\"overwrite\")\ndf_incorrectly_classified.write.parquet(\"hdfs://192.168.0.182:9000/input/df_wrong_replace_temp.parquet\", mode=\"overwrite\")\n# sometimes there will be no non-sample elements\nif df_non_sample_all != \"empty\":\n df_non_sample_all.write.parquet(\"hdfs://192.168.0.182:9000/input/df_non_sample_replace_temp.parquet\", mode=\"overwrite\")\n\n# write df_numbers to csv\ndf_numbers.to_csv(\"ML2_HV_v1_NYT_human_validation_numbers_r1.csv\", index=True)\n\n# empty memory\nspark.catalog.clearCache()\nprint(\"cache cleared\")\n\n#################################################\n# prepare df_original to add tables to it\n#################################################\n\ndf_original = spark.read.parquet(\"hdfs://192.168.0.182:9000/input/ML2_HV_v1_NYT_r1_train_and_remaining_NOTclassified.parquet\").repartition(50)\n# we need to create a new majortopic column, because we are now adding back in elements with\n# potentially new labels\ndf_original = df_original.withColumnRenamed('majortopic', 'mtc_original')\ndf_original = df_original.withColumn('majortopic', df_original['mtc_original'])\n# finally, create the new train id column\ndf_original = df_original.withColumn(\"train_r2\", when(df_original[\"sim\"] != 1, 1).otherwise(0))\n\n#################################################\n# add df_non_sample_replace back to df_original\n#################################################\n\nif df_non_sample_all != \"empty\":\n print(\"df_non_sample_replace is NOT empty\")\n\n df_non_sample_replace = spark.read.parquet(\"hdfs://192.168.0.182:9000/input/df_non_sample_replace_temp.parquet\").repartition(50)\n # we need to create a new majortopic column, because we are now adding back in elements with\n # potentially new labels\n df_non_sample_replace = df_non_sample_replace.withColumnRenamed('majortopic', 'mtc_original')\n df_non_sample_replace = df_non_sample_replace.withColumn('majortopic', df_non_sample_replace['mtc_original'])\n # create the new train id column\n df_non_sample_replace = df_non_sample_replace.withColumn(\"train_r2\", lit(0))\n # drop the extra columns to be able to add it back to df_original\n df_non_sample_replace = df_non_sample_replace.drop('verdict')\n\n # add df_non_sample_replace elements to df_original\n df_original = df_original.union(df_non_sample_replace)\n\nelse:\n print(\"df_non_sample_replace is empty\")\n\n#################################################\n# add df_correct_replace back to df_original\n#################################################\n\ndf_correct_replace = spark.read.parquet(\"hdfs://192.168.0.182:9000/input/df_correct_replace_temp.parquet\").repartition(50)\n# we need to create a new majortopic column, because we are now adding back in elements with\n# potentially new labels\ndf_correct_replace = df_correct_replace.withColumnRenamed('majortopic', 'mtc_original')\ndf_correct_replace = df_correct_replace.withColumn('majortopic', df_correct_replace['verdict'])\n# create the new train id column\ndf_correct_replace = df_correct_replace.withColumn(\"train_r2\", lit(1))\n# drop the extra columns to be able to add it back to df_original\ndf_correct_replace = df_correct_replace.drop('verdict')\n\n# add df_correct_replace elements to df_original\ndf_original = df_original.union(df_correct_replace)\n\n#################################################\n# add df_wrong_replace back to df_original\n#################################################\n\ndf_wrong_replace = spark.read.parquet(\"hdfs://192.168.0.182:9000/input/df_wrong_replace_temp.parquet\").repartition(50)\n# we need to create a new majortopic column, because we are now adding back in elements with\n# potentially new labels\ndf_wrong_replace = df_wrong_replace.withColumnRenamed('majortopic', 'mtc_original')\ndf_wrong_replace = df_wrong_replace.withColumn('majortopic', df_wrong_replace['mtc_original'])\n# create the new train id column\ndf_wrong_replace = df_wrong_replace.withColumn(\"train_r2\", lit(0))\n# drop the extra columns to be able to add it back to df_original\ndf_wrong_replace = df_wrong_replace.drop('verdict')\n\n# add df_wrong_replace elements to df_original\ndf_original = df_original.union(df_wrong_replace)\n\n#################################################\n# final write operations\n#################################################\n\ndf_original.write.parquet(\"hdfs://192.168.0.182:9000/input/ML2_HV_v1_NYT_round2_start.parquet\", mode=\"overwrite\")\n\ndf_original.groupBy(\"train_r2\").count().show(n=30)\n\n# empty memory\nspark.catalog.clearCache()\nprint(\"cache cleared\")\n\n# write to pandas and export to csv for debugging\ndf_original = spark.read.parquet(\"hdfs://192.168.0.182:9000/input/ML2_HV_v1_NYT_round2_start.parquet\").repartition(50)\ndf_original = df_original.drop('text', 'words', 'features', 'raw_features').toPandas()\ndf_original.to_csv(\"ML2_HV_v1_NYT_round2_starting_table.csv\", index=False)\n\nsc.stop()\nspark.stop()\n",
"# import libraries\nfrom pyspark.sql import SparkSession\nfrom pyspark import SparkConf\nfrom pyspark.sql.types import *\n\nfrom pyspark.sql.functions import col, count, lit, rand, when\n\nimport pandas as pd\nfrom math import ceil\n\n#################################################\n# spark config\n#################################################\nmtaMaster = \"spark://192.168.0.182:7077\"\n\nconf = SparkConf()\nconf.setMaster(mtaMaster)\n\nconf.set(\"spark.executor.memory\", \"24g\")\nconf.set(\"spark.driver.memory\", \"26g\")\nconf.set(\"spark.cores.max\", 96)\nconf.set(\"spark.driver.cores\", 8)\n\nconf.set(\"spark.serializer\", \"org.apache.spark.serializer.KryoSerializer\")\nconf.set(\"spark.kryoserializer.buffer\", \"256m\")\nconf.set(\"spark.kryoserializer.buffer.max\", \"256m\")\n\nconf.set(\"spark.default.parallelism\", 24)\n\nconf.set(\"spark.eventLog.enabled\", \"true\")\nconf.set(\"spark.eventLog.dir\", \"hdfs://192.168.0.182:9000/eventlog\")\nconf.set(\"spark.history.fs.logDirectory\", \"hdfs://192.168.0.182:9000/eventlog\")\n\nconf.set(\"spark.driver.maxResultSize\", \"4g\")\n\nconf.getAll()\n\n#################################################\n# create spark session\n#################################################\nspark = SparkSession.builder.appName('ML2_HV_v1_NYT_sim1_and_sim3_to_sim2_round5_human_validation').config(conf=conf).getOrCreate()\n\nsc = spark.sparkContext\n\n# check things are working\nprint(sc)\nprint(sc.defaultParallelism)\nprint(\"SPARK CONTEXT IS RUNNING\")\n\n#################################################\n# define major topic codes\n#################################################\n\n# major topic codes for loop (NO 23 IN THE NYT CORPUS)\nmajortopic_codes = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 100]\n#majortopic_codes = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 23, 100]\n\n#################################################\n# read result data from round 3\n#################################################\n\ndf_results = spark.read.parquet(\"hdfs://192.168.0.182:9000/input/ML2_HV_v1_NYT_r5_classified.parquet\").repartition(50)\n\n# verdict to integer for the comparison with majortopic later\ndf_results = df_results.withColumn('verdict', df_results.verdict.cast(IntegerType()))\n\n#################################################\n# create table to store sample and validation numbers\n#################################################\n\ncolumns = [\"num_classified\", \"num_sample\", \"num_non_sample\", \"num_correct\", \"num_incorrect\", \"precision_in_sample\", \"num_added_to_training\"]\ndf_numbers = pd.DataFrame(index=majortopic_codes, columns=columns)\ndf_numbers = df_numbers.fillna(0)\n\n#################################################\n# create table of samples from results\n#################################################\n\n# constants for sample size calculation for 95% confidence with +-0.05 precision confidence interval:\nz = 1.96\ndelta = 0.05\nz_delta = z*z*0.5*0.5/(delta*delta)\nprint(\"z_delta :\", z_delta)\n\nfor i in majortopic_codes:\n df_classified = df_results.where(col('verdict') == i)\n num_classified = df_classified.count()\n df_numbers[\"num_classified\"].loc[i] = num_classified\n print(\"MTC:\", i, \"num_classified: \", num_classified)\n if num_classified > 100:\n sample_size = ceil(z_delta/(1+1/num_classified*(z_delta-1)))\n print(\"sample_size: \", sample_size)\n if sample_size < 100:\n sample_size = 100\n df_sample = df_classified.sort('doc_id').withColumn('random', rand()).sort('random').limit(sample_size).drop('random')\n df_sample_num = df_sample.count()\n print(\"df_sample: \", df_sample_num)\n # separate non-sample from sample elements\n ids_drop = df_sample.select(\"doc_id\")\n df_non_sample = df_classified.join(ids_drop, \"doc_id\", \"left_anti\")\n df_numbers[\"num_sample\"].loc[i] = df_sample_num\n df_numbers[\"num_non_sample\"].loc[i] = df_non_sample.count()\n else:\n df_numbers[\"num_sample\"].loc[i] = num_classified\n df_sample = df_classified\n df_non_sample = None\n\n # create table of all samples and add new sample to it\n if i == 1:\n df_sample_all = df_sample\n else:\n df_sample_all = df_sample_all.union(df_sample)\n #print(\"MTC:\", i, \"df_sample_all: \", df_sample_all.count())\n\n # create table of all non-samples and add new non-sample to it\n if i == 1:\n df_non_sample_all = None\n\n if df_non_sample != None and df_non_sample_all == None:\n df_non_sample_all = df_non_sample\n elif df_non_sample != None and df_non_sample_all != None:\n df_non_sample_all = df_non_sample_all.union(df_non_sample)\n #print(\"MTC:\", i, \"df_non_sample_all: \", df_non_sample_all.count())\n print(\"MTC:\", i)\n\n\n#################################################\n# check precision by majortopic codes\n#################################################\n\n# count correctly classified and precision for each majortopic code and write to table of numbers\ndf_correctly_classified = df_sample_all.where(col('majortopic') == col('verdict'))\nfor i in majortopic_codes:\n num_correct = df_correctly_classified.where(col('verdict') == i).count()\n df_numbers[\"num_correct\"].loc[i] = num_correct\n df_numbers[\"precision_in_sample\"].loc[i] = num_correct/df_numbers[\"num_sample\"].loc[i]\n\n# count incorrectly classified for debugging and checking\ndf_incorrectly_classified = df_sample_all.where(col('majortopic') != col('verdict'))\nfor i in majortopic_codes:\n num_incorrect = df_incorrectly_classified.where(col('verdict') == i).count()\n df_numbers[\"num_incorrect\"].loc[i] = num_incorrect\n\nprint(df_numbers)\n\n\n#################################################\n# create tables of elements based on precision\n#################################################\n\n# create tables for sorting elements based on precision results\n# where precision is equal to or greater than 75%\n# NOTE: validated wrongly classified elements will NOT be added to the results with the wrong major\n# topic code, instead they will be added to the unclassified elements as in rounds 1&2\ndf_replace_all = None\n# where precision is less than 75%\ndf_non_sample_replace = None\ndf_correct_replace = None\ndf_wrong_replace = None\n\nfor i in majortopic_codes:\n print(\"create tables MTC:\", i)\n if df_numbers[\"precision_in_sample\"].loc[i] >= 0.75:\n # in this case add all elements from sample and non-sample to the training set with\n # new major topic code i, EXCEPT for validated negatives, those are added to back into the\n # test set\n # first add wrong sample elements to their table\n df_lemma = df_sample_all.where(col('verdict') == i).where(col('majortopic') != col('verdict'))\n if df_wrong_replace == None:\n df_wrong_replace = df_lemma\n else:\n df_wrong_replace = df_wrong_replace.union(df_lemma)\n # get doc_ids for these elements to remove them from the rest of the elements classified as\n # belonging to major topic i\n ids_drop = df_lemma.select(\"doc_id\")\n # get all elements classified as belonging to major topic code i\n df_lemma = df_results.where(col('verdict') == i)\n # remove wrongly classified from df_lemma\n df_lemma = df_lemma.join(ids_drop, \"doc_id\", \"left_anti\")\n # add df_lemma to df_replace_all\n if df_replace_all == None:\n df_replace_all = df_lemma\n else:\n df_replace_all = df_replace_all.union(df_lemma)\n # write numbers to df_numbers\n df_numbers[\"num_added_to_training\"].loc[i] = df_lemma.count()\n #print(\"MTC:\", i, \"df_replace_all: \", df_replace_all.count())\n else:\n # in this case add only correct elements from sample to training set, the rest go back in\n # the test set\n # first add non-sample elements to their table, BUT we have to check whether non-sample elements\n # exist\n if df_non_sample_all != None:\n df_lemma = df_non_sample_all.where(col('verdict') == i)\n if df_non_sample_replace == None:\n df_non_sample_replace = df_lemma\n else:\n df_non_sample_replace = df_non_sample_replace.union(df_lemma)\n else:\n df_non_sample_replace = None\n #print(\"MTC:\", i, \"df_non_sample_replace: \", df_non_sample_replace.count())\n # second add correct sample elements to their table\n df_lemma = df_sample_all.where(col('verdict') == i).where(col('majortopic') == col('verdict'))\n if df_correct_replace == None:\n df_correct_replace = df_lemma\n else:\n df_correct_replace = df_correct_replace.union(df_lemma)\n df_numbers[\"num_added_to_training\"].loc[i] = df_lemma.count()\n #print(\"MTC:\", i, \"df_correct_replace: \", df_correct_replace.count())\n # finally add wrong sample elements to their table\n df_lemma = df_sample_all.where(col('verdict') == i).where(col('majortopic') != col('verdict'))\n if df_wrong_replace == None:\n df_wrong_replace = df_lemma\n else:\n df_wrong_replace = df_wrong_replace.union(df_lemma)\n #print(\"MTC:\", i, \"df_wrong_replace: \", df_wrong_replace.count())\n\n# sometimes there will be no major topic code with precision => 75%\nif df_replace_all == None:\n df_replace_all = \"empty\"\n\n# sometimes there will be no non-sample elements\nif df_non_sample_replace == None:\n df_non_sample_replace = \"empty\"\n\n# the reason for creating these \"empty\" values, is because they will persist after we clear the\n# cache, and we can use them later in the workflow control\n\n# write all tables to parquet before clearing memory\ndf_correct_replace.write.parquet(\"hdfs://192.168.0.182:9000/input/df_correct_replace_temp.parquet\", mode=\"overwrite\")\ndf_wrong_replace.write.parquet(\"hdfs://192.168.0.182:9000/input/df_wrong_replace_temp.parquet\", mode=\"overwrite\")\n# sometimes there will be no non-sample elements\nif df_non_sample_replace != \"empty\":\n df_non_sample_replace.write.parquet(\"hdfs://192.168.0.182:9000/input/df_non_sample_replace_temp.parquet\", mode=\"overwrite\")\n# sometimes there will be no major topic code with precision => 75%\nif df_replace_all != \"empty\":\n df_replace_all.write.parquet(\"hdfs://192.168.0.182:9000/input/df_replace_all_temp.parquet\", mode=\"overwrite\")\n\n# write df_numbers to csv\ndf_numbers.to_csv(\"ML2_HV_v1_NYT_human_validation_numbers_r5.csv\", index=True)\n\n# empty memory\nspark.catalog.clearCache()\nprint(\"cache cleared\")\n\n#################################################\n# prepare df_original to add tables to it\n#################################################\n\ndf_original = spark.read.parquet(\"hdfs://192.168.0.182:9000/input/ML2_HV_v1_NYT_r5_train_and_remaining_NOTclassified.parquet\").repartition(50)\n# we need to create a new majortopic column, because we are now adding back in elements with\n# potentially new labels\ndf_original = df_original.withColumnRenamed('majortopic', 'mtc_after_r4')\ndf_original = df_original.withColumn('majortopic', df_original['mtc_after_r4'])\n# finally, create the new train id column\ndf_original = df_original.withColumn(\"train_r6\", when(df_original[\"train_r5\"] == 1, 1).otherwise(0))\n\n#################################################\n# add df_replace_all back to df_original\n#################################################\n\nif df_replace_all != \"empty\":\n print(\"df_replace_all is NOT empty\")\n\n df_replace_all = spark.read.parquet(\"hdfs://192.168.0.182:9000/input/df_replace_all_temp.parquet\").repartition(50)\n # we need to create a new majortopic column, because we are now adding back in elements with\n # potentially new labels\n df_replace_all = df_replace_all.withColumnRenamed('majortopic', 'mtc_after_r4')\n df_replace_all = df_replace_all.withColumn('majortopic', df_replace_all['verdict'])\n # create the new train id column\n df_replace_all = df_replace_all.withColumn(\"train_r6\", lit(1))\n # drop the extra columns to be able to add it back to df_original\n df_replace_all = df_replace_all.drop('verdict')\n\n # add df_replace_all elements to df_original\n df_original = df_original.union(df_replace_all)\n\nelse:\n print(\"df_replace_all is empty\")\n\n#################################################\n# add df_non_sample_replace back to df_original\n#################################################\n\nif df_non_sample_replace != \"empty\":\n print(\"df_non_sample_replace is NOT empty\")\n\n df_non_sample_replace = spark.read.parquet(\"hdfs://192.168.0.182:9000/input/df_non_sample_replace_temp.parquet\").repartition(50)\n # we need to create a new majortopic column, because we are now adding back in elements with\n # potentially new labels\n df_non_sample_replace = df_non_sample_replace.withColumnRenamed('majortopic', 'mtc_after_r4')\n df_non_sample_replace = df_non_sample_replace.withColumn('majortopic', df_non_sample_replace['mtc_after_r4'])\n # create the new train id column\n df_non_sample_replace = df_non_sample_replace.withColumn(\"train_r6\", lit(0))\n # drop the extra columns to be able to add it back to df_original\n df_non_sample_replace = df_non_sample_replace.drop('verdict')\n\n # add df_non_sample_replace elements to df_original\n df_original = df_original.union(df_non_sample_replace)\n\nelse:\n print(\"df_non_sample_replace is empty\")\n\n#################################################\n# add df_correct_replace back to df_original\n#################################################\n\ndf_correct_replace = spark.read.parquet(\"hdfs://192.168.0.182:9000/input/df_correct_replace_temp.parquet\").repartition(50)\n# we need to create a new majortopic column, because we are now adding back in elements with\n# potentially new labels\ndf_correct_replace = df_correct_replace.withColumnRenamed('majortopic', 'mtc_after_r4')\ndf_correct_replace = df_correct_replace.withColumn('majortopic', df_correct_replace['verdict'])\n# create the new train id column\ndf_correct_replace = df_correct_replace.withColumn(\"train_r6\", lit(1))\n# drop the extra columns to be able to add it back to df_original\ndf_correct_replace = df_correct_replace.drop('verdict')\n\n# add df_correct_replace elements to df_original\ndf_original = df_original.union(df_correct_replace)\n\n#################################################\n# add df_wrong_replace back to df_original\n#################################################\n\ndf_wrong_replace = spark.read.parquet(\"hdfs://192.168.0.182:9000/input/df_wrong_replace_temp.parquet\").repartition(50)\n# we need to create a new majortopic column, because we are now adding back in elements with\n# potentially new labels\ndf_wrong_replace = df_wrong_replace.withColumnRenamed('majortopic', 'mtc_after_r4')\ndf_wrong_replace = df_wrong_replace.withColumn('majortopic', df_wrong_replace['mtc_after_r4'])\n# create the new train id column\ndf_wrong_replace = df_wrong_replace.withColumn(\"train_r6\", lit(0))\n# drop the extra columns to be able to add it back to df_original\ndf_wrong_replace = df_wrong_replace.drop('verdict')\n\n# add df_wrong_replace elements to df_original\ndf_original = df_original.union(df_wrong_replace)\n\n#################################################\n# final write operations\n#################################################\n\ndf_original.write.parquet(\"hdfs://192.168.0.182:9000/input/ML2_HV_v1_NYT_round6_start.parquet\", mode=\"overwrite\")\n\ndf_original.groupBy(\"train_r6\").count().show(n=30)\n\n# empty memory\nspark.catalog.clearCache()\nprint(\"cache cleared\")\n\n# write to pandas and export to csv for debugging\ndf_original = spark.read.parquet(\"hdfs://192.168.0.182:9000/input/ML2_HV_v1_NYT_round6_start.parquet\").repartition(50)\ndf_original = df_original.drop('text', 'words', 'features', 'raw_features').toPandas()\ndf_original.to_csv(\"ML2_HV_v1_NYT_round6_starting_table.csv\", index=False)\n\nsc.stop()\nspark.stop()\n",
"# import libraries\nfrom pyspark.sql import SparkSession\nfrom pyspark import SparkConf\nfrom pyspark.sql.types import *\n\nfrom pyspark.sql.functions import col, count, when\n\nfrom pyspark.ml.classification import LinearSVC\n\nimport pandas as pd\n\n#################################################\n# spark config\n#################################################\nmtaMaster = \"spark://192.168.0.182:7077\"\n\nconf = SparkConf()\nconf.setMaster(mtaMaster)\n\nconf.set(\"spark.executor.memory\", \"24g\")\nconf.set(\"spark.driver.memory\", \"26g\")\nconf.set(\"spark.cores.max\", 96)\nconf.set(\"spark.driver.cores\", 8)\n\nconf.set(\"spark.serializer\", \"org.apache.spark.serializer.KryoSerializer\")\nconf.set(\"spark.kryoserializer.buffer\", \"256m\")\nconf.set(\"spark.kryoserializer.buffer.max\", \"256m\")\n\nconf.set(\"spark.default.parallelism\", 24)\n\nconf.set(\"spark.eventLog.enabled\", \"true\")\nconf.set(\"spark.eventLog.dir\", \"hdfs://192.168.0.182:9000/eventlog\")\nconf.set(\"spark.history.fs.logDirectory\", \"hdfs://192.168.0.182:9000/eventlog\")\n\nconf.set(\"spark.driver.maxResultSize\", \"2g\")\n\nconf.getAll()\n\n#################################################\n# create spark session\n#################################################\nspark = SparkSession.builder.appName('ML2_HV_v2_savNegInf_NYT_sim1_and_sim3_to_sim2_round4').config(conf=conf).getOrCreate()\n\nsc = spark.sparkContext\n\n# check things are working\nprint(sc)\nprint(sc.defaultParallelism)\nprint(\"SPARK CONTEXT IS RUNNING\")\n\n\n#################################################\n# define major topic codes\n#################################################\n\n# major topic codes for loop (NO 23 IN THE NYT CORPUS)\nmajortopic_codes = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 100]\n#majortopic_codes = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 23, 100]\n\n\n#################################################\n# loop starts here\n#################################################\n\nfor h in range(3):\n # read table from hdfs\n df_original = spark.read.parquet(\"hdfs://192.168.0.182:9000/input/ML2_HV_v2_savNegInf_NYT_round4_start.parquet\").repartition(50)\n\n # check loaded data \n print(df_original.printSchema())\n print(df_original.show())\n df_original.groupBy(\"majortopic\").count().show(30, False)\n\n #################################################\n # prepare to log sample numbers\n #################################################\n\n columns = [\"label\", \"non_label_all\", \"non_label_sample\", \"train_all\"]\n\n df_numbers = pd.DataFrame(index=majortopic_codes, columns=columns)\n\n for i in majortopic_codes:\n #################################################\n # prepare df for svm requirements\n #################################################\n print(\"majortopic is:\", i)\n\n # separate majortopic\n df_original = df_original.withColumn(\"label\", when(df_original[\"majortopic\"] == i, 1).otherwise(0))\n\n # label has to be double for SVM\n df_original = df_original.withColumn('label', df_original.label.cast(DoubleType()))\n\n #################################################\n # separate training and test sets\n #################################################\n\n df_train = df_original.where((col('train_r4') == 1) | (col('train_r2_neg') == i) | (col('train_r3_neg') == i) | (col('train_r4_neg') == i))\n df_test = df_original.where((col('train_r4') == 0) & (col('train_r2_neg') != i) & (col('train_r3_neg') != i) & (col('train_r4_neg') != i))\n\n # make training data proportional with regards to label occurrence frequency\n df_train_mtc = df_train.where(col('label') == 1)\n df_train_non_mtc = df_train.where(col('label') == 0)\n\n df_train_count = df_train.count()\n df_train_mtc_count = df_train_mtc.count()\n df_train_non_mtc_count = df_train_non_mtc.count()\n print(\"Rows in training DataFrame with label = \", df_train_mtc_count)\n print(\"Rows in training DataFrame without label = \", df_train_non_mtc_count)\n\n if df_train_mtc_count/df_train_non_mtc_count < 0.1:\n if df_train_mtc_count*10 < df_train_count//10:\n sample_num = df_train_count//10\n else: sample_num = df_train_mtc_count*10\n print(\"sample_num = \", sample_num)\n print(\"df_train_non_mtc = \", df_train_non_mtc_count)\n sampling_fraction = sample_num/df_train_non_mtc_count\n print(\"sampling_fraction = \", sampling_fraction)\n df_train_non_mtc = df_train_non_mtc.sample(False, sampling_fraction)\n df_train_non_mtc_sample = df_train_non_mtc.count()\n print(\"Rows in training DataFrame without label = \", df_train_non_mtc_sample)\n df_train = df_train_mtc.union(df_train_non_mtc)\n # numbers to logtable\n df_numbers[\"non_label_sample\"].loc[i] = df_train_non_mtc_sample\n df_numbers[\"train_all\"].loc[i] = df_train_mtc_count + df_train_non_mtc_sample\n else:\n # numbers to logtable\n df_numbers[\"non_label_sample\"].loc[i] = df_train_non_mtc_count\n df_numbers[\"train_all\"].loc[i] = df_train_count\n\n # numbers to logtable\n df_numbers[\"label\"].loc[i] = df_train_mtc_count\n df_numbers[\"non_label_all\"].loc[i] = df_train_non_mtc_count\n print(df_numbers)\n\n # NOTE: this type of copying wouldn't work in python, but does work in pyspark!\n df_train_orig = df_train\n df_test_orig = df_test\n df_loop = 0\n df_train_mtc = 0\n df_train_non_mtc = 0\n\n print(\"Rows in training DataFrame = \", df_train.count())\n print(\"Rows in test DataFrame = \", df_test.count())\n\n\n #################################################\n # SVM\n #################################################\n\n for j in range(3):\n df_train = df_train_orig\n df_test = df_test_orig\n\n # define svm\n lsvc = LinearSVC(featuresCol='features', labelCol='label', maxIter=10, regParam=0.1)\n\n # train the model.\n lsvcModel = lsvc.fit(df_train)\n\n print(\"fit model finished, starting scoring:\", j)\n\n # score the model on test data.\n predictions = lsvcModel.transform(df_test)\n\n df_train = 0\n df_test = 0\n lsvcModel = 0\n\n print(predictions.printSchema())\n print(predictions.show())\n\n df_write = predictions.select(\"doc_id\", \"prediction\")\n\n predictions = 0\n\n df_write = df_write.withColumn('prediction', df_write.prediction.cast(IntegerType()))\n df_write = df_write.withColumn('prediction', df_write.prediction * i)\n new_col_name = 'prediction_{i}'.format(i=i)\n df_write = df_write.withColumnRenamed('prediction', new_col_name)\n\n # write partial result to parquet\n dest_name = \"hdfs://192.168.0.182:9000/input/NYT_prediction_mtc{i}_{j}.parquet\".format(i=i, j=j)\n df_write.write.parquet(dest_name, mode=\"overwrite\")\n\n df_write = 0\n\n print(\"DONE\")\n\n print(\"ALL SVM DONE round4_{h}\".format(h=h+1))\n\n df_numbers.to_csv(\"ML2_HV_v2_savNegInf_NYT_round4_sample{h}_sample_numbers.csv\".format(h=h+1), index=False)\n\n # empty memory\n spark.catalog.clearCache()\n print(\"cache cleared\")\n\n #######################################################\n ### parquet to pandas\n #######################################################\n\n for j in range(3):\n # read from parquet format\n for i in majortopic_codes:\n source_name = \"hdfs://192.168.0.182:9000/input/NYT_prediction_mtc{i}_{j}.parquet\".format(i=i, j=j)\n df = spark.read.parquet(source_name).repartition(50)\n if i == 1:\n df_results = df\n else:\n df_results = df_results.join(df, 'doc_id', 'inner')\n\n df = df_results\n df_results = 0\n\n # convert prediction results to pandas df\n df = df.toPandas()\n\n df.to_csv(\"ML2_HV_v2_savNegInf_NYT_round4_sample{h}_svm{j}.csv\".format(h=h+1,j=j), index=False)\n\n\n#########################################################################\n# create results and leftovers tables\n#########################################################################\n\n# all of the following happen in pandas outside the spark context\nfor i in range(3):\n for j in range(3):\n df = pd.read_csv(\"ML2_HV_v2_savNegInf_NYT_round4_sample{i}_svm{j}.csv\".format(i=i+1, j=j))\n df = df.sort_values(by=['doc_id'])\n df = df.reset_index(drop=True)\n #print(df.head())\n if i == 0 and j == 0:\n df_results = df\n else:\n df_lemma = df_results.iloc[:,1:].add(df.iloc[:,1:])\n df_results = pd.concat([df_results[['doc_id']], df_lemma], axis=1)\n #print(df_results.head())\n\nfor i in majortopic_codes:\n df_results[[\"prediction_{i}\".format(i=i)]] = df_results[[\"prediction_{i}\".format(i=i)]].floordiv(i)\n\ndf_results[\"max_value\"] = df_results.iloc[:,1:].max(axis = 1, numeric_only = True)\ndf_results[\"how_many_9votes\"] = df_results.iloc[:,:-1].isin([9]).sum(1)\n\n# keep only rows with verdicts\nprint(df_results.shape)\ndf_results = df_results.loc[df_results[\"max_value\"]==9]\nprint(df_results.shape)\n# keep only rows with a single verdict\ndf_results = df_results.loc[df_results[\"how_many_9votes\"]==1]\nprint(df_results.shape)\n\n# prepare table of single verdicts\ndf_results = df_results.drop(['max_value', 'how_many_9votes'], axis=1)\n\nprint(df_results.head())\n\nfor i in majortopic_codes:\n df_results[[\"prediction_{i}\".format(i=i)]] = df_results[[\"prediction_{i}\".format(i=i)]].floordiv(9)\n\nprint(df_results.head())\n\nfor i in majortopic_codes:\n df_results[[\"prediction_{i}\".format(i=i)]] = df_results[[\"prediction_{i}\".format(i=i)]]*i\n\n\ndf_results[\"verdict\"] = df_results.iloc[:,1:].sum(1)\n\ndf_results = df_results[[\"doc_id\", \"verdict\"]]\n\n# now we move back to the spark context!!\n# for that we need to move the pandas df into a spark df\ndf = spark.createDataFrame(df_results)\n\n# load df_original\ndf_original = spark.read.parquet(\"hdfs://192.168.0.182:9000/input/ML2_HV_v2_savNegInf_NYT_round4_start.parquet\").repartition(50)\n\n# create results table\ndf_results = df_original.join(df, \"doc_id\", \"inner\")\n\n# create table of non-classified and training elements\nids_drop = df.select(\"doc_id\")\ndf_original = df_original.join(ids_drop, \"doc_id\", \"left_anti\")\n\n# write to parquet for use in human validation script\ndf_original.write.parquet(\"hdfs://192.168.0.182:9000/input/ML2_HV_v2_savNegInf_NYT_r4_train_and_remaining_NOTclassified.parquet\", mode=\"overwrite\")\ndf_results.write.parquet(\"hdfs://192.168.0.182:9000/input/ML2_HV_v2_savNegInf_NYT_r4_classified.parquet\", mode=\"overwrite\")\n\n# convert tables to pandas df and write to csv\ndf_original = df_original.drop(\"text\", \"words\", \"raw_features\", \"features\").toPandas()\ndf_results = df_results.drop(\"text\", \"words\", \"raw_features\", \"features\").toPandas()\n\ndf_original.to_csv(\"ML2_HV_v2_savNegInf_NYT_r4_train_and_remaining_NOTclassified.csv\", index=False)\ndf_results.to_csv(\"ML2_HV_v2_savNegInf_NYT_r4_classified.csv\", index=False)\n\nprint(\"df_original: \", df_original.shape[0])\nprint(\"df_results: \", df_results.shape[0])\n\nsc.stop()\nspark.stop()\n",
"# import libraries\nfrom pyspark.sql import SparkSession\nfrom pyspark import SparkConf\nfrom pyspark.sql.types import *\n\nfrom pyspark.sql.functions import col, count, lit, rand, when\n\nimport pandas as pd\nfrom math import ceil\n\n#################################################\n# spark config\n#################################################\nmtaMaster = \"spark://192.168.0.182:7077\"\n\nconf = SparkConf()\nconf.setMaster(mtaMaster)\n\nconf.set(\"spark.executor.memory\", \"24g\")\nconf.set(\"spark.driver.memory\", \"26g\")\nconf.set(\"spark.cores.max\", 96)\nconf.set(\"spark.driver.cores\", 8)\n\nconf.set(\"spark.serializer\", \"org.apache.spark.serializer.KryoSerializer\")\nconf.set(\"spark.kryoserializer.buffer\", \"256m\")\nconf.set(\"spark.kryoserializer.buffer.max\", \"256m\")\n\nconf.set(\"spark.default.parallelism\", 24)\n\nconf.set(\"spark.eventLog.enabled\", \"true\")\nconf.set(\"spark.eventLog.dir\", \"hdfs://192.168.0.182:9000/eventlog\")\nconf.set(\"spark.history.fs.logDirectory\", \"hdfs://192.168.0.182:9000/eventlog\")\n\nconf.set(\"spark.driver.maxResultSize\", \"4g\")\n\nconf.getAll()\n\n#################################################\n# create spark session\n#################################################\nspark = SparkSession.builder.appName('ML2_HV_v3_stepwise_NYT_sim1_and_sim2_to_sim3_round1_human_validation').config(conf=conf).getOrCreate()\n\nsc = spark.sparkContext\n\n# check things are working\nprint(sc)\nprint(sc.defaultParallelism)\nprint(\"SPARK CONTEXT IS RUNNING\")\n\n#################################################\n# define major topic codes\n#################################################\n\n# major topic codes for loop (NO 23 IN THE NYT CORPUS)\nmajortopic_codes = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 100]\n#majortopic_codes = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 23, 100]\n\n#################################################\n# read result data from round 1\n#################################################\n\ndf_results = spark.read.parquet(\"hdfs://192.168.0.182:9000/input/ML2_HV_v3_stepwise_NYT_r1_classified.parquet\").repartition(50)\n\n# verdict to integer for the comparison with majortopic later\ndf_results = df_results.withColumn('verdict', df_results.verdict.cast(IntegerType()))\n\n#################################################\n# create table to store sample and validation numbers\n#################################################\n\ncolumns = [\"num_classified\", \"num_sample\", \"num_non_sample\", \"num_correct\", \"num_incorrect\", \"precision_in_sample\", \"num_added_to_training\"]\ndf_numbers = pd.DataFrame(index=majortopic_codes, columns=columns)\ndf_numbers = df_numbers.fillna(0)\n\n#################################################\n# create table of samples from results\n#################################################\n\n# constants for sample size calculation for 95% confidence with +-0.05 precision confidence interval:\nz = 1.96\ndelta = 0.05\nz_delta = z*z*0.5*0.5/(delta*delta)\nprint(\"z_delta :\", z_delta)\n\nfor i in majortopic_codes:\n df_classified = df_results.where(col('verdict') == i)\n num_classified = df_classified.count()\n df_numbers[\"num_classified\"].loc[i] = num_classified\n print(\"MTC:\", i, \"num_classified: \", num_classified)\n if num_classified > 100:\n sample_size = ceil(z_delta/(1+1/num_classified*(z_delta-1)))\n print(\"sample_size: \", sample_size)\n if sample_size < 100:\n sample_size = 100\n df_sample = df_classified.sort('doc_id').withColumn('random', rand()).sort('random').limit(sample_size).drop('random')\n df_sample_num = df_sample.count()\n print(\"df_sample: \", df_sample_num)\n # separate non-sample from sample elements\n ids_drop = df_sample.select(\"doc_id\")\n df_non_sample = df_classified.join(ids_drop, \"doc_id\", \"left_anti\")\n df_numbers[\"num_sample\"].loc[i] = df_sample_num\n df_numbers[\"num_non_sample\"].loc[i] = df_non_sample.count()\n else:\n df_numbers[\"num_sample\"].loc[i] = num_classified\n df_sample = df_classified\n df_non_sample = None\n\n # create table of all samples and add new sample to it\n if i == 1:\n df_sample_all = df_sample\n else:\n df_sample_all = df_sample_all.union(df_sample)\n #print(\"MTC:\", i, \"df_sample_all: \", df_sample_all.count())\n\n # create table of all non-samples and add new non-sample to it\n if i == 1:\n df_non_sample_all = None\n\n if df_non_sample != None and df_non_sample_all == None:\n df_non_sample_all = df_non_sample\n elif df_non_sample != None and df_non_sample_all != None:\n df_non_sample_all = df_non_sample_all.union(df_non_sample)\n #print(\"MTC:\", i, \"df_non_sample_all: \", df_non_sample_all.count())\n print(\"MTC:\", i)\n\n\n#################################################\n# check precision by majortopic codes\n#################################################\n\n# count correctly classified and precision for each majortopic code and write to table of numbers\ndf_correctly_classified = df_sample_all.where(col('majortopic') == col('verdict'))\nfor i in majortopic_codes:\n num_correct = df_correctly_classified.where(col('verdict') == i).count()\n df_numbers[\"num_correct\"].loc[i] = num_correct\n df_numbers[\"precision_in_sample\"].loc[i] = num_correct/df_numbers[\"num_sample\"].loc[i]\n\n# count incorrectly classified for debugging and checking\ndf_incorrectly_classified = df_sample_all.where(col('majortopic') != col('verdict'))\nfor i in majortopic_codes:\n num_incorrect = df_incorrectly_classified.where(col('verdict') == i).count()\n df_numbers[\"num_incorrect\"].loc[i] = num_incorrect\n\nprint(df_numbers)\n\n\n#################################################\n# create tables of elements based on precision\n#################################################\n\n# create tables for sorting elements based on precision results\n# where precision is equal to or greater than 85%\n# NOTE: validated wrongly classified elements will NOT be added to the results with the wrong major\n# topic code, instead they will be added to the unclassified elements\ndf_replace_all = None\n# where precision is less than 85%\ndf_non_sample_replace = None\ndf_correct_replace = None\ndf_wrong_replace = None\n\nfor i in majortopic_codes:\n print(\"create tables MTC:\", i)\n if df_numbers[\"precision_in_sample\"].loc[i] >= 0.85:\n # in this case add all elements from sample and non-sample to the training set with\n # new major topic code i, EXCEPT for validated negatives, those are added to back into the\n # test set\n # first add wrong sample elements to their table\n df_lemma = df_sample_all.where(col('verdict') == i).where(col('majortopic') != col('verdict'))\n if df_wrong_replace == None:\n df_wrong_replace = df_lemma\n else:\n df_wrong_replace = df_wrong_replace.union(df_lemma)\n # get doc_ids for these elements to remove them from the rest of the elements classified as\n # belonging to major topic i\n ids_drop = df_lemma.select(\"doc_id\")\n # get all elements classified as belonging to major topic code i\n df_lemma = df_results.where(col('verdict') == i)\n # remove wrongly classified from df_lemma\n df_lemma = df_lemma.join(ids_drop, \"doc_id\", \"left_anti\")\n # add df_lemma to df_replace_all\n if df_replace_all == None:\n df_replace_all = df_lemma\n else:\n df_replace_all = df_replace_all.union(df_lemma)\n # write numbers to df_numbers\n df_numbers[\"num_added_to_training\"].loc[i] = df_lemma.count()\n #print(\"MTC:\", i, \"df_replace_all: \", df_replace_all.count())\n else:\n # in this case add only correct elements from sample to training set, the rest go back in\n # the test set\n # first add non-sample elements to their table, BUT we have to check whether non-sample elements\n # exist\n if df_non_sample_all != None:\n df_lemma = df_non_sample_all.where(col('verdict') == i)\n if df_non_sample_replace == None:\n df_non_sample_replace = df_lemma\n else:\n df_non_sample_replace = df_non_sample_replace.union(df_lemma)\n else:\n df_non_sample_replace = None\n #print(\"MTC:\", i, \"df_non_sample_replace: \", df_non_sample_replace.count())\n # second add correct sample elements to their table\n df_lemma = df_sample_all.where(col('verdict') == i).where(col('majortopic') == col('verdict'))\n if df_correct_replace == None:\n df_correct_replace = df_lemma\n else:\n df_correct_replace = df_correct_replace.union(df_lemma)\n df_numbers[\"num_added_to_training\"].loc[i] = df_lemma.count()\n #print(\"MTC:\", i, \"df_correct_replace: \", df_correct_replace.count())\n # finally add wrong sample elements to their table\n df_lemma = df_sample_all.where(col('verdict') == i).where(col('majortopic') != col('verdict'))\n if df_wrong_replace == None:\n df_wrong_replace = df_lemma\n else:\n df_wrong_replace = df_wrong_replace.union(df_lemma)\n #print(\"MTC:\", i, \"df_wrong_replace: \", df_wrong_replace.count())\n\n# sometimes there will be no major topic code with precision => 85%\nif df_replace_all == None:\n df_replace_all = \"empty\"\n\n# sometimes there will be no non-sample elements\nif df_non_sample_replace == None:\n df_non_sample_replace = \"empty\"\n\n# the reason for creating these \"empty\" values, is because they will persist after we clear the\n# cache, and we can use them later in the workflow control\n\n# write all tables to parquet before clearing memory\ndf_correct_replace.write.parquet(\"hdfs://192.168.0.182:9000/input/df_correct_replace_temp.parquet\", mode=\"overwrite\")\ndf_wrong_replace.write.parquet(\"hdfs://192.168.0.182:9000/input/df_wrong_replace_temp.parquet\", mode=\"overwrite\")\n# sometimes there will be no non-sample elements\nif df_non_sample_replace != \"empty\":\n df_non_sample_replace.write.parquet(\"hdfs://192.168.0.182:9000/input/df_non_sample_replace_temp.parquet\", mode=\"overwrite\")\n# sometimes there will be no major topic code with precision => 85%\nif df_replace_all != \"empty\":\n df_replace_all.write.parquet(\"hdfs://192.168.0.182:9000/input/df_replace_all_temp.parquet\", mode=\"overwrite\")\n\n# write df_numbers to csv\ndf_numbers.to_csv(\"ML2_HV_v3_stepwise_NYT_human_validation_numbers_r1.csv\", index=True)\n\n# empty memory\nspark.catalog.clearCache()\nprint(\"cache cleared\")\n\n#################################################\n# prepare df_original to add tables to it\n#################################################\n\ndf_original = spark.read.parquet(\"hdfs://192.168.0.182:9000/input/ML2_HV_v3_stepwise_NYT_r1_train_and_remaining_NOTclassified.parquet\").repartition(50)\n# create column for known negatives in df_original\ndf_original = df_original.withColumn('train_r2_neg', lit(0))\n# we need to create a new majortopic column, because we are now adding back in elements with\n# potentially new labels\ndf_original = df_original.withColumnRenamed('majortopic', 'mtc_original')\ndf_original = df_original.withColumn('majortopic', df_original['mtc_original'])\n# finally, create the new train id column\ndf_original = df_original.withColumn(\"train_r2\", when(df_original[\"sim\"] != 3, 1).otherwise(0))\n\n#################################################\n# add df_replace_all back to df_original\n#################################################\n\nif df_replace_all != \"empty\":\n print(\"df_replace_all is NOT empty\")\n\n df_replace_all = spark.read.parquet(\"hdfs://192.168.0.182:9000/input/df_replace_all_temp.parquet\").repartition(50)\n # create column for known negatives in df_replace_all\n df_replace_all = df_replace_all.withColumn('train_r2_neg', lit(0))\n # we need to create a new majortopic column, because we are now adding back in elements with\n # potentially new labels\n df_replace_all = df_replace_all.withColumnRenamed('majortopic', 'mtc_original')\n df_replace_all = df_replace_all.withColumn('majortopic', df_replace_all['verdict'])\n # create the new train id column\n df_replace_all = df_replace_all.withColumn(\"train_r2\", lit(1))\n # drop the extra columns to be able to add it back to df_original\n df_replace_all = df_replace_all.drop('verdict')\n\n # add df_replace_all elements to df_original\n df_original = df_original.union(df_replace_all)\n\nelse:\n print(\"df_replace_all is empty\")\n\n#################################################\n# add df_non_sample_replace back to df_original\n#################################################\n\nif df_non_sample_replace != \"empty\":\n print(\"df_non_sample_replace is NOT empty\")\n\n df_non_sample_replace = spark.read.parquet(\"hdfs://192.168.0.182:9000/input/df_non_sample_replace_temp.parquet\").repartition(50)\n # create column for known negatives in df_non_sample_replace\n df_non_sample_replace = df_non_sample_replace.withColumn('train_r2_neg', lit(0))\n # we need to create a new majortopic column, because we are now adding back in elements with\n # potentially new labels\n df_non_sample_replace = df_non_sample_replace.withColumnRenamed('majortopic', 'mtc_original')\n df_non_sample_replace = df_non_sample_replace.withColumn('majortopic', df_non_sample_replace['mtc_original'])\n # create the new train id column\n df_non_sample_replace = df_non_sample_replace.withColumn(\"train_r2\", lit(0))\n # drop the extra columns to be able to add it back to df_original\n df_non_sample_replace = df_non_sample_replace.drop('verdict')\n\n # add df_non_sample_replace elements to df_original\n df_original = df_original.union(df_non_sample_replace)\n\nelse:\n print(\"df_non_sample_replace is empty\")\n\n#################################################\n# add df_correct_replace back to df_original\n#################################################\n\ndf_correct_replace = spark.read.parquet(\"hdfs://192.168.0.182:9000/input/df_correct_replace_temp.parquet\").repartition(50)\n# create column for known negatives in df_correct_replace\ndf_correct_replace = df_correct_replace.withColumn('train_r2_neg', lit(0))\n# we need to create a new majortopic column, because we are now adding back in elements with\n# potentially new labels\ndf_correct_replace = df_correct_replace.withColumnRenamed('majortopic', 'mtc_original')\ndf_correct_replace = df_correct_replace.withColumn('majortopic', df_correct_replace['verdict'])\n# create the new train id column\ndf_correct_replace = df_correct_replace.withColumn(\"train_r2\", lit(1))\n# drop the extra columns to be able to add it back to df_original\ndf_correct_replace = df_correct_replace.drop('verdict')\n\n# add df_correct_replace elements to df_original\ndf_original = df_original.union(df_correct_replace)\n\n#################################################\n# add df_wrong_replace back to df_original\n#################################################\n\ndf_wrong_replace = spark.read.parquet(\"hdfs://192.168.0.182:9000/input/df_wrong_replace_temp.parquet\").repartition(50)\n# create column for known negatives in df_wrong_replace\ndf_wrong_replace = df_wrong_replace.withColumn('train_r2_neg', df_wrong_replace['verdict'])\n# we need to create a new majortopic column, because we are now adding back in elements with\n# potentially new labels\ndf_wrong_replace = df_wrong_replace.withColumnRenamed('majortopic', 'mtc_original')\ndf_wrong_replace = df_wrong_replace.withColumn('majortopic', df_wrong_replace['mtc_original'])\n# create the new train id column\ndf_wrong_replace = df_wrong_replace.withColumn(\"train_r2\", lit(0))\n# drop the extra columns to be able to add it back to df_original\ndf_wrong_replace = df_wrong_replace.drop('verdict')\n\n# add df_wrong_replace elements to df_original\ndf_original = df_original.union(df_wrong_replace)\n\n#################################################\n# final write operations\n#################################################\n\ndf_original.write.parquet(\"hdfs://192.168.0.182:9000/input/ML2_HV_v3_stepwise_NYT_round2_start.parquet\", mode=\"overwrite\")\n\ndf_original.groupBy(\"train_r2\").count().show(n=30)\n\n# empty memory\nspark.catalog.clearCache()\nprint(\"cache cleared\")\n\n# write to pandas and export to csv for debugging\ndf_original = spark.read.parquet(\"hdfs://192.168.0.182:9000/input/ML2_HV_v3_stepwise_NYT_round2_start.parquet\").repartition(50)\ndf_original = df_original.drop('text', 'words', 'features', 'raw_features').toPandas()\ndf_original.to_csv(\"ML2_HV_v3_stepwise_NYT_round2_starting_table.csv\", index=False)\n\nsc.stop()\nspark.stop()\n",
"# import libraries\nfrom pyspark.sql import SparkSession\nfrom pyspark import SparkConf\nfrom pyspark.sql.types import *\n\nfrom pyspark.sql.functions import col, count, when\n\nfrom pyspark.ml.classification import LinearSVC\n\nimport pandas as pd\n\n#################################################\n# spark config\n#################################################\nmtaMaster = \"spark://192.168.0.182:7077\"\n\nconf = SparkConf()\nconf.setMaster(mtaMaster)\n\nconf.set(\"spark.executor.memory\", \"24g\")\nconf.set(\"spark.driver.memory\", \"26g\")\nconf.set(\"spark.cores.max\", 96)\nconf.set(\"spark.driver.cores\", 8)\n\nconf.set(\"spark.serializer\", \"org.apache.spark.serializer.KryoSerializer\")\nconf.set(\"spark.kryoserializer.buffer\", \"256m\")\nconf.set(\"spark.kryoserializer.buffer.max\", \"256m\")\n\nconf.set(\"spark.default.parallelism\", 24)\n\nconf.set(\"spark.eventLog.enabled\", \"true\")\nconf.set(\"spark.eventLog.dir\", \"hdfs://192.168.0.182:9000/eventlog\")\nconf.set(\"spark.history.fs.logDirectory\", \"hdfs://192.168.0.182:9000/eventlog\")\n\nconf.set(\"spark.driver.maxResultSize\", \"2g\")\n\nconf.getAll()\n\n#################################################\n# create spark session\n#################################################\nspark = SparkSession.builder.appName('ML2_HV_v3_stepwise_NYT_sim2_and_sim3_to_sim1_round4').config(conf=conf).getOrCreate()\n\nsc = spark.sparkContext\n\n# check things are working\nprint(sc)\nprint(sc.defaultParallelism)\nprint(\"SPARK CONTEXT IS RUNNING\")\n\n\n#################################################\n# define major topic codes\n#################################################\n\n# major topic codes for loop (NO 23 IN THE NYT CORPUS)\nmajortopic_codes = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 100]\n#majortopic_codes = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 23, 100]\n\n\n#################################################\n# loop starts here\n#################################################\n\nfor h in range(3):\n # read table from hdfs\n df_original = spark.read.parquet(\"hdfs://192.168.0.182:9000/input/ML2_HV_v3_stepwise_NYT_round4_start.parquet\").repartition(50)\n\n # check loaded data \n print(df_original.printSchema())\n print(df_original.show())\n df_original.groupBy(\"majortopic\").count().show(30, False)\n\n #################################################\n # prepare to log sample numbers\n #################################################\n\n columns = [\"label\", \"non_label_all\", \"non_label_sample\", \"train_all\"]\n\n df_numbers = pd.DataFrame(index=majortopic_codes, columns=columns)\n\n for i in majortopic_codes:\n #################################################\n # prepare df for svm requirements\n #################################################\n print(\"majortopic is:\", i)\n\n # separate majortopic\n df_original = df_original.withColumn(\"label\", when(df_original[\"majortopic\"] == i, 1).otherwise(0))\n\n # label has to be double for SVM\n df_original = df_original.withColumn('label', df_original.label.cast(DoubleType()))\n\n #################################################\n # separate training and test sets\n #################################################\n\n df_train = df_original.where((col('train_r4') == 1) | (col('train_r2_neg') == i) | (col('train_r3_neg') == i) | (col('train_r4_neg') == i))\n df_test = df_original.where((col('train_r4') == 0) & (col('train_r2_neg') != i) & (col('train_r3_neg') != i) & (col('train_r4_neg') != i))\n\n # make training data proportional with regards to label occurrence frequency\n df_train_mtc = df_train.where(col('label') == 1)\n df_train_non_mtc = df_train.where(col('label') == 0)\n\n df_train_count = df_train.count()\n df_train_mtc_count = df_train_mtc.count()\n df_train_non_mtc_count = df_train_non_mtc.count()\n print(\"Rows in training DataFrame with label = \", df_train_mtc_count)\n print(\"Rows in training DataFrame without label = \", df_train_non_mtc_count)\n\n if df_train_mtc_count/df_train_non_mtc_count < 0.1:\n if df_train_mtc_count*10 < df_train_count//10:\n sample_num = df_train_count//10\n else: sample_num = df_train_mtc_count*10\n print(\"sample_num = \", sample_num)\n print(\"df_train_non_mtc = \", df_train_non_mtc_count)\n sampling_fraction = sample_num/df_train_non_mtc_count\n print(\"sampling_fraction = \", sampling_fraction)\n df_train_non_mtc = df_train_non_mtc.sample(False, sampling_fraction)\n df_train_non_mtc_sample = df_train_non_mtc.count()\n print(\"Rows in training DataFrame without label = \", df_train_non_mtc_sample)\n df_train = df_train_mtc.union(df_train_non_mtc)\n # numbers to logtable\n df_numbers[\"non_label_sample\"].loc[i] = df_train_non_mtc_sample\n df_numbers[\"train_all\"].loc[i] = df_train_mtc_count + df_train_non_mtc_sample\n else:\n # numbers to logtable\n df_numbers[\"non_label_sample\"].loc[i] = df_train_non_mtc_count\n df_numbers[\"train_all\"].loc[i] = df_train_count\n\n # numbers to logtable\n df_numbers[\"label\"].loc[i] = df_train_mtc_count\n df_numbers[\"non_label_all\"].loc[i] = df_train_non_mtc_count\n print(df_numbers)\n\n # NOTE: this type of copying wouldn't work in python, but does work in pyspark!\n df_train_orig = df_train\n df_test_orig = df_test\n df_loop = 0\n df_train_mtc = 0\n df_train_non_mtc = 0\n\n print(\"Rows in training DataFrame = \", df_train.count())\n print(\"Rows in test DataFrame = \", df_test.count())\n\n\n #################################################\n # SVM\n #################################################\n\n for j in range(3):\n df_train = df_train_orig\n df_test = df_test_orig\n\n # define svm\n lsvc = LinearSVC(featuresCol='features', labelCol='label', maxIter=10, regParam=0.1)\n\n # train the model.\n lsvcModel = lsvc.fit(df_train)\n\n print(\"fit model finished, starting scoring:\", j)\n\n # score the model on test data.\n predictions = lsvcModel.transform(df_test)\n\n df_train = 0\n df_test = 0\n lsvcModel = 0\n\n print(predictions.printSchema())\n print(predictions.show())\n\n df_write = predictions.select(\"doc_id\", \"prediction\")\n\n predictions = 0\n\n df_write = df_write.withColumn('prediction', df_write.prediction.cast(IntegerType()))\n df_write = df_write.withColumn('prediction', df_write.prediction * i)\n new_col_name = 'prediction_{i}'.format(i=i)\n df_write = df_write.withColumnRenamed('prediction', new_col_name)\n\n # write partial result to parquet\n dest_name = \"hdfs://192.168.0.182:9000/input/NYT_prediction_mtc{i}_{j}.parquet\".format(i=i, j=j)\n df_write.write.parquet(dest_name, mode=\"overwrite\")\n\n df_write = 0\n\n print(\"DONE\")\n\n print(\"ALL SVM DONE round4_{h}\".format(h=h+1))\n\n df_numbers.to_csv(\"ML2_HV_v3_stepwise_NYT_round4_sample{h}_sample_numbers.csv\".format(h=h+1), index=False)\n\n # empty memory\n spark.catalog.clearCache()\n print(\"cache cleared\")\n\n #######################################################\n ### parquet to pandas\n #######################################################\n\n for j in range(3):\n # read from parquet format\n for i in majortopic_codes:\n source_name = \"hdfs://192.168.0.182:9000/input/NYT_prediction_mtc{i}_{j}.parquet\".format(i=i, j=j)\n df = spark.read.parquet(source_name).repartition(50)\n if i == 1:\n df_results = df\n else:\n df_results = df_results.join(df, 'doc_id', 'inner')\n\n df = df_results\n df_results = 0\n\n # convert prediction results to pandas df\n df = df.toPandas()\n\n df.to_csv(\"ML2_HV_v3_stepwise_NYT_round4_sample{h}_svm{j}.csv\".format(h=h+1,j=j), index=False)\n\n\n#########################################################################\n# create results and leftovers tables\n#########################################################################\n\n# all of the following happen in pandas outside the spark context\nfor i in range(3):\n for j in range(3):\n df = pd.read_csv(\"ML2_HV_v3_stepwise_NYT_round4_sample{i}_svm{j}.csv\".format(i=i+1, j=j))\n df = df.sort_values(by=['doc_id'])\n df = df.reset_index(drop=True)\n #print(df.head())\n if i == 0 and j == 0:\n df_results = df\n else:\n df_lemma = df_results.iloc[:,1:].add(df.iloc[:,1:])\n df_results = pd.concat([df_results[['doc_id']], df_lemma], axis=1)\n #print(df_results.head())\n\nfor i in majortopic_codes:\n df_results[[\"prediction_{i}\".format(i=i)]] = df_results[[\"prediction_{i}\".format(i=i)]].floordiv(i)\n\ndf_results[\"max_value\"] = df_results.iloc[:,1:].max(axis = 1, numeric_only = True)\ndf_results[\"how_many_9votes\"] = df_results.iloc[:,:-1].isin([9]).sum(1)\n\n# keep only rows with verdicts\nprint(df_results.shape)\ndf_results = df_results.loc[df_results[\"max_value\"]==9]\nprint(df_results.shape)\n# keep only rows with a single verdict\ndf_results = df_results.loc[df_results[\"how_many_9votes\"]==1]\nprint(df_results.shape)\n\n# prepare table of single verdicts\ndf_results = df_results.drop(['max_value', 'how_many_9votes'], axis=1)\n\nprint(df_results.head())\n\nfor i in majortopic_codes:\n df_results[[\"prediction_{i}\".format(i=i)]] = df_results[[\"prediction_{i}\".format(i=i)]].floordiv(9)\n\nprint(df_results.head())\n\nfor i in majortopic_codes:\n df_results[[\"prediction_{i}\".format(i=i)]] = df_results[[\"prediction_{i}\".format(i=i)]]*i\n\n\ndf_results[\"verdict\"] = df_results.iloc[:,1:].sum(1)\n\ndf_results = df_results[[\"doc_id\", \"verdict\"]]\n\n# now we move back to the spark context!!\n# for that we need to move the pandas df into a spark df\ndf = spark.createDataFrame(df_results)\n\n# load df_original\ndf_original = spark.read.parquet(\"hdfs://192.168.0.182:9000/input/ML2_HV_v3_stepwise_NYT_round4_start.parquet\").repartition(50)\n\n# create results table\ndf_results = df_original.join(df, \"doc_id\", \"inner\")\n\n# create table of non-classified and training elements\nids_drop = df.select(\"doc_id\")\ndf_original = df_original.join(ids_drop, \"doc_id\", \"left_anti\")\n\n# write to parquet for use in human validation script\ndf_original.write.parquet(\"hdfs://192.168.0.182:9000/input/ML2_HV_v3_stepwise_NYT_r4_train_and_remaining_NOTclassified.parquet\", mode=\"overwrite\")\ndf_results.write.parquet(\"hdfs://192.168.0.182:9000/input/ML2_HV_v3_stepwise_NYT_r4_classified.parquet\", mode=\"overwrite\")\n\n# convert tables to pandas df and write to csv\ndf_original = df_original.drop(\"text\", \"words\", \"raw_features\", \"features\").toPandas()\ndf_results = df_results.drop(\"text\", \"words\", \"raw_features\", \"features\").toPandas()\n\ndf_original.to_csv(\"ML2_HV_v3_stepwise_NYT_r4_train_and_remaining_NOTclassified.csv\", index=False)\ndf_results.to_csv(\"ML2_HV_v3_stepwise_NYT_r4_classified.csv\", index=False)\n\nprint(\"df_original: \", df_original.shape[0])\nprint(\"df_results: \", df_results.shape[0])\n\nsc.stop()\nspark.stop()\n"
] | [
[
"pandas.concat",
"pandas.DataFrame"
],
[
"pandas.DataFrame"
],
[
"pandas.DataFrame"
],
[
"pandas.concat",
"pandas.DataFrame"
],
[
"pandas.DataFrame"
],
[
"pandas.concat",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
wolf-zchen/CarND-capstone | [
"b6b768bfd01f03a5256c2db4b84f9d7a42149de2"
] | [
"ros/src/waypoint_updater/waypoint_updater.py"
] | [
"#!/usr/bin/env python\nimport numpy as np\nimport rospy\nfrom std_msgs.msg import Int32\nfrom geometry_msgs.msg import PoseStamped\nfrom styx_msgs.msg import Lane, Waypoint\nfrom scipy.spatial import KDTree\n\nimport math\n\n'''\nThis node will publish waypoints from the car's current position to some `x` distance ahead.\n\nAs mentioned in the doc, you should ideally first implement a version which does not care\nabout traffic lights or obstacles.\n\nOnce you have created dbw_node, you will update this node to use the status of traffic lights too.\n\nPlease note that our simulator also provides the exact location of traffic lights and their\ncurrent status in `/vehicle/traffic_lights` message. You can use this message to build this node\nas well as to verify your TL classifier.\n\nTODO (for Yousuf and Aaron): Stopline location for each traffic light.\n'''\n\nLOOKAHEAD_WPS = 30 # Number of waypoints we will publish. You can change this number\nMAX_DECEL = 1\n\nclass WaypointUpdater(object):\n def __init__(self):\n rospy.init_node('waypoint_updater')\n\n rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb,queue_size = 1)\n rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb, queue_size = 1)\n rospy.Subscriber('/traffic_waypoint', Int32, self.traffic_cb, queue_size = 1)\n # TODO: Add a subscriber for /traffic_waypoint and /obstacle_waypoint below\n \n\n self.final_waypoints_pub = rospy.Publisher('final_waypoints', Lane, queue_size=1)\n\n # TODO: Add other member variables you need below\n self.pose = None\n self.base_waypoints = None\n self.waypoints_2d = None\n self.waypoint_tree = None\n self.stopline_wp_idx = -1\n \n self.loop()\n \n #rospy.spin()\n\n def loop(self):\n rate = rospy.Rate(50)\n while not rospy.is_shutdown():\n if self.pose and self.base_waypoints:\n #Get closest waypoint\n closest_waypoint_idx = self.get_closest_waypoint_idx()\n self.publish_waypoints(closest_waypoint_idx)\n rate.sleep()\n\n def get_closest_waypoint_idx(self):\n x = self.pose.pose.position.x\n y = self.pose.pose.position.y\n closest_idx = self.waypoint_tree.query([x,y],1)[1] \n \n #check if closet is ahead or behind vehicle\n closest_coord = self.waypoints_2d[closest_idx]\n prev_coord = self.waypoints_2d[closest_idx - 1]\n\n # Equation for hyperplane through closest_coords\n cl_vect = np.array(closest_coord)\n prev_vect = np.array(prev_coord)\n pos_vect = np.array([x,y])\n\n val = np.dot(cl_vect - prev_vect, pos_vect -cl_vect)\n\n if val > 0:\n closest_idx = (closest_idx + 1) % len(self.waypoints_2d)\n return closest_idx\n\n def publish_waypoints(self,closest_idx):\n #lane = Lane()\n #lane.header = self.base_waypoints.header\n #lane.waypoints = self.base_waypoints.waypoints[closest_idx:closest_idx + LOOKAHEAD_WPS]\n final_lane = self.generate_lane()\n self.final_waypoints_pub.publish(final_lane)\n\n def generate_lane(self):\n lane = Lane()\n closest_idx = self.get_closest_waypoint_idx()\n farthest_idx = closest_idx + LOOKAHEAD_WPS\n base_waypoints = self.base_waypoints.waypoints[closest_idx:farthest_idx]\n\n if self.stopline_wp_idx == -1 or (self.stopline_wp_idx >= farthest_idx):\n lane.waypoints = base_waypoints\n else:\n lane.waypoints = self.decelerate_waypoints(base_waypoints,closest_idx)\n\n return lane\n\n def decelerate_waypoints(self,waypoints,closest_idx):\n temp = []\n for i, wp in enumerate(waypoints):\n p = Waypoint()\n p.pose = wp.pose\n stop_idx = max(self.stopline_wp_idx - closest_idx - 3, 0)\n dist = self.distance(waypoints, i, stop_idx)\n vel = math.sqrt(2 * MAX_DECEL * dist)\n if vel < 1.0:\n vel = 0\n\n p.twist.twist.linear.x = min(vel, wp.twist.twist.linear.x)\n temp.append(p)\n return temp\n\n def pose_cb(self, msg):\n # TODO: Implement\n self.pose = msg\n\n def waypoints_cb(self, waypoints):\n # TODO: Implement\n self.base_waypoints = waypoints\n if not self.waypoints_2d:\n self.waypoints_2d = [[waypoint.pose.pose.position.x, waypoint.pose.pose.position.y] for waypoint in waypoints.waypoints]\n self.waypoint_tree = KDTree(self.waypoints_2d)\n\n def traffic_cb(self, msg):\n # TODO: Callback for /traffic_waypoint message. Implement\n self.stopline_wp_idx = msg.data\n\n def obstacle_cb(self, msg):\n # TODO: Callback for /obstacle_waypoint message. We will implement it later\n pass\n\n def get_waypoint_velocity(self, waypoint):\n return waypoint.twist.twist.linear.x\n\n def set_waypoint_velocity(self, waypoints, waypoint, velocity):\n waypoints[waypoint].twist.twist.linear.x = velocity\n\n def distance(self, waypoints, wp1, wp2):\n dist = 0\n dl = lambda a, b: math.sqrt((a.x-b.x)**2 + (a.y-b.y)**2 + (a.z-b.z)**2)\n for i in range(wp1, wp2+1):\n dist += dl(waypoints[wp1].pose.pose.position, waypoints[i].pose.pose.position)\n wp1 = i\n return dist\n\n\nif __name__ == '__main__':\n try:\n WaypointUpdater()\n except rospy.ROSInterruptException:\n rospy.logerr('Could not start waypoint updater node.')\n"
] | [
[
"numpy.dot",
"numpy.array",
"scipy.spatial.KDTree"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
undeadyequ/espnet | [
"8c3f85ce695153abcb9cf365180b1d7554ad565e",
"8c3f85ce695153abcb9cf365180b1d7554ad565e",
"8c3f85ce695153abcb9cf365180b1d7554ad565e",
"8c3f85ce695153abcb9cf365180b1d7554ad565e"
] | [
"espnet/nets/pytorch_backend/e2e_vc_transformer.py",
"test/test_e2e_mt_transformer.py",
"espnet2/main_funcs/calculate_all_attentions.py",
"test/espnet2/train/test_iterable_dataset.py"
] | [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n# Copyright 2020 Nagoya University (Wen-Chin Huang)\n# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)\n\n\"\"\"Voice Transformer Network (Transformer-VC) related modules.\"\"\"\n\nimport logging\n\nimport torch\nimport torch.nn.functional as F\n\nfrom espnet.nets.pytorch_backend.e2e_asr_transformer import subsequent_mask\nfrom espnet.nets.pytorch_backend.e2e_tts_tacotron2 import (\n Tacotron2Loss as TransformerLoss, # noqa: H301\n)\nfrom espnet.nets.pytorch_backend.nets_utils import make_non_pad_mask\nfrom espnet.nets.pytorch_backend.tacotron2.decoder import Postnet\nfrom espnet.nets.pytorch_backend.tacotron2.decoder import Prenet as DecoderPrenet\nfrom espnet.nets.pytorch_backend.tacotron2.encoder import Encoder as EncoderPrenet\nfrom espnet.nets.pytorch_backend.transformer.attention import MultiHeadedAttention\nfrom espnet.nets.pytorch_backend.transformer.decoder import Decoder\nfrom espnet.nets.pytorch_backend.transformer.embedding import PositionalEncoding\nfrom espnet.nets.pytorch_backend.transformer.embedding import ScaledPositionalEncoding\nfrom espnet.nets.pytorch_backend.transformer.encoder import Encoder\nfrom espnet.nets.pytorch_backend.transformer.initializer import initialize\nfrom espnet.nets.tts_interface import TTSInterface\nfrom espnet.utils.cli_utils import strtobool\nfrom espnet.utils.fill_missing_args import fill_missing_args\nfrom espnet.nets.pytorch_backend.e2e_tts_transformer import (\n GuidedMultiHeadAttentionLoss, # noqa: H301\n TTSPlot, # noqa: H301\n)\n\n\nclass Transformer(TTSInterface, torch.nn.Module):\n \"\"\"VC Transformer module.\n\n This is a module of the Voice Transformer Network\n (a.k.a. VTN or Transformer-VC) described in\n `Voice Transformer Network: Sequence-to-Sequence\n Voice Conversion Using Transformer with\n Text-to-Speech Pretraining`_,\n which convert the sequence of acoustic features\n into the sequence of acoustic features.\n\n .. _`Voice Transformer Network: Sequence-to-Sequence\n Voice Conversion Using Transformer with\n Text-to-Speech Pretraining`:\n https://arxiv.org/pdf/1912.06813.pdf\n\n \"\"\"\n\n @staticmethod\n def add_arguments(parser):\n \"\"\"Add model-specific arguments to the parser.\"\"\"\n group = parser.add_argument_group(\"transformer model setting\")\n # network structure related\n group.add_argument(\n \"--eprenet-conv-layers\",\n default=0,\n type=int,\n help=\"Number of encoder prenet convolution layers\",\n )\n group.add_argument(\n \"--eprenet-conv-chans\",\n default=0,\n type=int,\n help=\"Number of encoder prenet convolution channels\",\n )\n group.add_argument(\n \"--eprenet-conv-filts\",\n default=0,\n type=int,\n help=\"Filter size of encoder prenet convolution\",\n )\n group.add_argument(\n \"--transformer-input-layer\",\n default=\"linear\",\n type=str,\n help=\"Type of input layer (linear or conv2d)\",\n )\n group.add_argument(\n \"--dprenet-layers\",\n default=2,\n type=int,\n help=\"Number of decoder prenet layers\",\n )\n group.add_argument(\n \"--dprenet-units\",\n default=256,\n type=int,\n help=\"Number of decoder prenet hidden units\",\n )\n group.add_argument(\n \"--elayers\", default=3, type=int, help=\"Number of encoder layers\"\n )\n group.add_argument(\n \"--eunits\", default=1536, type=int, help=\"Number of encoder hidden units\"\n )\n group.add_argument(\n \"--adim\",\n default=384,\n type=int,\n help=\"Number of attention transformation dimensions\",\n )\n group.add_argument(\n \"--aheads\",\n default=4,\n type=int,\n help=\"Number of heads for multi head attention\",\n )\n group.add_argument(\n \"--dlayers\", default=3, type=int, help=\"Number of decoder layers\"\n )\n group.add_argument(\n \"--dunits\", default=1536, type=int, help=\"Number of decoder hidden units\"\n )\n group.add_argument(\n \"--positionwise-layer-type\",\n default=\"linear\",\n type=str,\n choices=[\"linear\", \"conv1d\", \"conv1d-linear\"],\n help=\"Positionwise layer type.\",\n )\n group.add_argument(\n \"--positionwise-conv-kernel-size\",\n default=1,\n type=int,\n help=\"Kernel size of positionwise conv1d layer\",\n )\n group.add_argument(\n \"--postnet-layers\", default=5, type=int, help=\"Number of postnet layers\"\n )\n group.add_argument(\n \"--postnet-chans\", default=256, type=int, help=\"Number of postnet channels\"\n )\n group.add_argument(\n \"--postnet-filts\", default=5, type=int, help=\"Filter size of postnet\"\n )\n group.add_argument(\n \"--use-scaled-pos-enc\",\n default=True,\n type=strtobool,\n help=\"Use trainable scaled positional encoding\"\n \"instead of the fixed scale one.\",\n )\n group.add_argument(\n \"--use-batch-norm\",\n default=True,\n type=strtobool,\n help=\"Whether to use batch normalization\",\n )\n group.add_argument(\n \"--encoder-normalize-before\",\n default=False,\n type=strtobool,\n help=\"Whether to apply layer norm before encoder block\",\n )\n group.add_argument(\n \"--decoder-normalize-before\",\n default=False,\n type=strtobool,\n help=\"Whether to apply layer norm before decoder block\",\n )\n group.add_argument(\n \"--encoder-concat-after\",\n default=False,\n type=strtobool,\n help=\"Whether to concatenate attention layer's input and output in encoder\",\n )\n group.add_argument(\n \"--decoder-concat-after\",\n default=False,\n type=strtobool,\n help=\"Whether to concatenate attention layer's input and output in decoder\",\n )\n group.add_argument(\n \"--reduction-factor\",\n default=1,\n type=int,\n help=\"Reduction factor (for decoder)\",\n )\n group.add_argument(\n \"--encoder-reduction-factor\",\n default=1,\n type=int,\n help=\"Reduction factor (for encoder)\",\n )\n group.add_argument(\n \"--spk-embed-dim\",\n default=None,\n type=int,\n help=\"Number of speaker embedding dimensions\",\n )\n group.add_argument(\n \"--spk-embed-integration-type\",\n type=str,\n default=\"add\",\n choices=[\"add\", \"concat\"],\n help=\"How to integrate speaker embedding\",\n )\n # training related\n group.add_argument(\n \"--transformer-init\",\n type=str,\n default=\"pytorch\",\n choices=[\n \"pytorch\",\n \"xavier_uniform\",\n \"xavier_normal\",\n \"kaiming_uniform\",\n \"kaiming_normal\",\n ],\n help=\"How to initialize transformer parameters\",\n )\n group.add_argument(\n \"--initial-encoder-alpha\",\n type=float,\n default=1.0,\n help=\"Initial alpha value in encoder's ScaledPositionalEncoding\",\n )\n group.add_argument(\n \"--initial-decoder-alpha\",\n type=float,\n default=1.0,\n help=\"Initial alpha value in decoder's ScaledPositionalEncoding\",\n )\n group.add_argument(\n \"--transformer-lr\",\n default=1.0,\n type=float,\n help=\"Initial value of learning rate\",\n )\n group.add_argument(\n \"--transformer-warmup-steps\",\n default=4000,\n type=int,\n help=\"Optimizer warmup steps\",\n )\n group.add_argument(\n \"--transformer-enc-dropout-rate\",\n default=0.1,\n type=float,\n help=\"Dropout rate for transformer encoder except for attention\",\n )\n group.add_argument(\n \"--transformer-enc-positional-dropout-rate\",\n default=0.1,\n type=float,\n help=\"Dropout rate for transformer encoder positional encoding\",\n )\n group.add_argument(\n \"--transformer-enc-attn-dropout-rate\",\n default=0.1,\n type=float,\n help=\"Dropout rate for transformer encoder self-attention\",\n )\n group.add_argument(\n \"--transformer-dec-dropout-rate\",\n default=0.1,\n type=float,\n help=\"Dropout rate for transformer decoder \"\n \"except for attention and pos encoding\",\n )\n group.add_argument(\n \"--transformer-dec-positional-dropout-rate\",\n default=0.1,\n type=float,\n help=\"Dropout rate for transformer decoder positional encoding\",\n )\n group.add_argument(\n \"--transformer-dec-attn-dropout-rate\",\n default=0.1,\n type=float,\n help=\"Dropout rate for transformer decoder self-attention\",\n )\n group.add_argument(\n \"--transformer-enc-dec-attn-dropout-rate\",\n default=0.1,\n type=float,\n help=\"Dropout rate for transformer encoder-decoder attention\",\n )\n group.add_argument(\n \"--eprenet-dropout-rate\",\n default=0.5,\n type=float,\n help=\"Dropout rate in encoder prenet\",\n )\n group.add_argument(\n \"--dprenet-dropout-rate\",\n default=0.5,\n type=float,\n help=\"Dropout rate in decoder prenet\",\n )\n group.add_argument(\n \"--postnet-dropout-rate\",\n default=0.5,\n type=float,\n help=\"Dropout rate in postnet\",\n )\n group.add_argument(\n \"--pretrained-model\", default=None, type=str, help=\"Pretrained model path\"\n )\n\n # loss related\n group.add_argument(\n \"--use-masking\",\n default=True,\n type=strtobool,\n help=\"Whether to use masking in calculation of loss\",\n )\n group.add_argument(\n \"--use-weighted-masking\",\n default=False,\n type=strtobool,\n help=\"Whether to use weighted masking in calculation of loss\",\n )\n group.add_argument(\n \"--loss-type\",\n default=\"L1\",\n choices=[\"L1\", \"L2\", \"L1+L2\"],\n help=\"How to calc loss\",\n )\n group.add_argument(\n \"--bce-pos-weight\",\n default=5.0,\n type=float,\n help=\"Positive sample weight in BCE calculation \"\n \"(only for use-masking=True)\",\n )\n group.add_argument(\n \"--use-guided-attn-loss\",\n default=False,\n type=strtobool,\n help=\"Whether to use guided attention loss\",\n )\n group.add_argument(\n \"--guided-attn-loss-sigma\",\n default=0.4,\n type=float,\n help=\"Sigma in guided attention loss\",\n )\n group.add_argument(\n \"--guided-attn-loss-lambda\",\n default=1.0,\n type=float,\n help=\"Lambda in guided attention loss\",\n )\n group.add_argument(\n \"--num-heads-applied-guided-attn\",\n default=2,\n type=int,\n help=\"Number of heads in each layer to be applied guided attention loss\"\n \"if set -1, all of the heads will be applied.\",\n )\n group.add_argument(\n \"--num-layers-applied-guided-attn\",\n default=2,\n type=int,\n help=\"Number of layers to be applied guided attention loss\"\n \"if set -1, all of the layers will be applied.\",\n )\n group.add_argument(\n \"--modules-applied-guided-attn\",\n type=str,\n nargs=\"+\",\n default=[\"encoder-decoder\"],\n help=\"Module name list to be applied guided attention loss\",\n )\n return parser\n\n @property\n def attention_plot_class(self):\n \"\"\"Return plot class for attention weight plot.\"\"\"\n return TTSPlot\n\n def __init__(self, idim, odim, args=None):\n \"\"\"Initialize Transformer-VC module.\n\n Args:\n idim (int): Dimension of the inputs.\n odim (int): Dimension of the outputs.\n args (Namespace, optional):\n - eprenet_conv_layers (int):\n Number of encoder prenet convolution layers.\n - eprenet_conv_chans (int):\n Number of encoder prenet convolution channels.\n - eprenet_conv_filts (int):\n Filter size of encoder prenet convolution.\n - transformer_input_layer (str): Input layer before the encoder.\n - dprenet_layers (int): Number of decoder prenet layers.\n - dprenet_units (int): Number of decoder prenet hidden units.\n - elayers (int): Number of encoder layers.\n - eunits (int): Number of encoder hidden units.\n - adim (int): Number of attention transformation dimensions.\n - aheads (int): Number of heads for multi head attention.\n - dlayers (int): Number of decoder layers.\n - dunits (int): Number of decoder hidden units.\n - postnet_layers (int): Number of postnet layers.\n - postnet_chans (int): Number of postnet channels.\n - postnet_filts (int): Filter size of postnet.\n - use_scaled_pos_enc (bool):\n Whether to use trainable scaled positional encoding.\n - use_batch_norm (bool):\n Whether to use batch normalization in encoder prenet.\n - encoder_normalize_before (bool):\n Whether to perform layer normalization before encoder block.\n - decoder_normalize_before (bool):\n Whether to perform layer normalization before decoder block.\n - encoder_concat_after (bool): Whether to concatenate\n attention layer's input and output in encoder.\n - decoder_concat_after (bool): Whether to concatenate\n attention layer's input and output in decoder.\n - reduction_factor (int): Reduction factor (for decoder).\n - encoder_reduction_factor (int): Reduction factor (for encoder).\n - spk_embed_dim (int): Number of speaker embedding dimenstions.\n - spk_embed_integration_type: How to integrate speaker embedding.\n - transformer_init (float): How to initialize transformer parameters.\n - transformer_lr (float): Initial value of learning rate.\n - transformer_warmup_steps (int): Optimizer warmup steps.\n - transformer_enc_dropout_rate (float):\n Dropout rate in encoder except attention & positional encoding.\n - transformer_enc_positional_dropout_rate (float):\n Dropout rate after encoder positional encoding.\n - transformer_enc_attn_dropout_rate (float):\n Dropout rate in encoder self-attention module.\n - transformer_dec_dropout_rate (float):\n Dropout rate in decoder except attention & positional encoding.\n - transformer_dec_positional_dropout_rate (float):\n Dropout rate after decoder positional encoding.\n - transformer_dec_attn_dropout_rate (float):\n Dropout rate in deocoder self-attention module.\n - transformer_enc_dec_attn_dropout_rate (float):\n Dropout rate in encoder-deocoder attention module.\n - eprenet_dropout_rate (float): Dropout rate in encoder prenet.\n - dprenet_dropout_rate (float): Dropout rate in decoder prenet.\n - postnet_dropout_rate (float): Dropout rate in postnet.\n - use_masking (bool):\n Whether to apply masking for padded part in loss calculation.\n - use_weighted_masking (bool):\n Whether to apply weighted masking in loss calculation.\n - bce_pos_weight (float): Positive sample weight in bce calculation\n (only for use_masking=true).\n - loss_type (str): How to calculate loss.\n - use_guided_attn_loss (bool): Whether to use guided attention loss.\n - num_heads_applied_guided_attn (int):\n Number of heads in each layer to apply guided attention loss.\n - num_layers_applied_guided_attn (int):\n Number of layers to apply guided attention loss.\n - modules_applied_guided_attn (list):\n List of module names to apply guided attention loss.\n - guided-attn-loss-sigma (float) Sigma in guided attention loss.\n - guided-attn-loss-lambda (float): Lambda in guided attention loss.\n\n \"\"\"\n # initialize base classes\n TTSInterface.__init__(self)\n torch.nn.Module.__init__(self)\n\n # fill missing arguments\n args = fill_missing_args(args, self.add_arguments)\n\n # store hyperparameters\n self.idim = idim\n self.odim = odim\n self.spk_embed_dim = args.spk_embed_dim\n if self.spk_embed_dim is not None:\n self.spk_embed_integration_type = args.spk_embed_integration_type\n self.use_scaled_pos_enc = args.use_scaled_pos_enc\n self.reduction_factor = args.reduction_factor\n self.encoder_reduction_factor = args.encoder_reduction_factor\n self.transformer_input_layer = args.transformer_input_layer\n self.loss_type = args.loss_type\n self.use_guided_attn_loss = args.use_guided_attn_loss\n if self.use_guided_attn_loss:\n if args.num_layers_applied_guided_attn == -1:\n self.num_layers_applied_guided_attn = args.elayers\n else:\n self.num_layers_applied_guided_attn = (\n args.num_layers_applied_guided_attn\n )\n if args.num_heads_applied_guided_attn == -1:\n self.num_heads_applied_guided_attn = args.aheads\n else:\n self.num_heads_applied_guided_attn = args.num_heads_applied_guided_attn\n self.modules_applied_guided_attn = args.modules_applied_guided_attn\n\n # use idx 0 as padding idx\n padding_idx = 0\n\n # get positional encoding class\n pos_enc_class = (\n ScaledPositionalEncoding if self.use_scaled_pos_enc else PositionalEncoding\n )\n\n # define transformer encoder\n if args.eprenet_conv_layers != 0:\n # encoder prenet\n encoder_input_layer = torch.nn.Sequential(\n EncoderPrenet(\n idim=idim,\n elayers=0,\n econv_layers=args.eprenet_conv_layers,\n econv_chans=args.eprenet_conv_chans,\n econv_filts=args.eprenet_conv_filts,\n use_batch_norm=args.use_batch_norm,\n dropout_rate=args.eprenet_dropout_rate,\n padding_idx=padding_idx,\n input_layer=torch.nn.Linear(\n idim * args.encoder_reduction_factor, idim\n ),\n ),\n torch.nn.Linear(args.eprenet_conv_chans, args.adim),\n )\n elif args.transformer_input_layer == \"linear\":\n encoder_input_layer = torch.nn.Linear(\n idim * args.encoder_reduction_factor, args.adim\n )\n else:\n encoder_input_layer = args.transformer_input_layer\n self.encoder = Encoder(\n idim=idim,\n attention_dim=args.adim,\n attention_heads=args.aheads,\n linear_units=args.eunits,\n num_blocks=args.elayers,\n input_layer=encoder_input_layer,\n dropout_rate=args.transformer_enc_dropout_rate,\n positional_dropout_rate=args.transformer_enc_positional_dropout_rate,\n attention_dropout_rate=args.transformer_enc_attn_dropout_rate,\n pos_enc_class=pos_enc_class,\n normalize_before=args.encoder_normalize_before,\n concat_after=args.encoder_concat_after,\n positionwise_layer_type=args.positionwise_layer_type,\n positionwise_conv_kernel_size=args.positionwise_conv_kernel_size,\n )\n\n # define projection layer\n if self.spk_embed_dim is not None:\n if self.spk_embed_integration_type == \"add\":\n self.projection = torch.nn.Linear(self.spk_embed_dim, args.adim)\n else:\n self.projection = torch.nn.Linear(\n args.adim + self.spk_embed_dim, args.adim\n )\n\n # define transformer decoder\n if args.dprenet_layers != 0:\n # decoder prenet\n decoder_input_layer = torch.nn.Sequential(\n DecoderPrenet(\n idim=odim,\n n_layers=args.dprenet_layers,\n n_units=args.dprenet_units,\n dropout_rate=args.dprenet_dropout_rate,\n ),\n torch.nn.Linear(args.dprenet_units, args.adim),\n )\n else:\n decoder_input_layer = \"linear\"\n self.decoder = Decoder(\n odim=-1,\n attention_dim=args.adim,\n attention_heads=args.aheads,\n linear_units=args.dunits,\n num_blocks=args.dlayers,\n dropout_rate=args.transformer_dec_dropout_rate,\n positional_dropout_rate=args.transformer_dec_positional_dropout_rate,\n self_attention_dropout_rate=args.transformer_dec_attn_dropout_rate,\n src_attention_dropout_rate=args.transformer_enc_dec_attn_dropout_rate,\n input_layer=decoder_input_layer,\n use_output_layer=False,\n pos_enc_class=pos_enc_class,\n normalize_before=args.decoder_normalize_before,\n concat_after=args.decoder_concat_after,\n )\n\n # define final projection\n self.feat_out = torch.nn.Linear(args.adim, odim * args.reduction_factor)\n self.prob_out = torch.nn.Linear(args.adim, args.reduction_factor)\n\n # define postnet\n self.postnet = (\n None\n if args.postnet_layers == 0\n else Postnet(\n idim=idim,\n odim=odim,\n n_layers=args.postnet_layers,\n n_chans=args.postnet_chans,\n n_filts=args.postnet_filts,\n use_batch_norm=args.use_batch_norm,\n dropout_rate=args.postnet_dropout_rate,\n )\n )\n\n # define loss function\n self.criterion = TransformerLoss(\n use_masking=args.use_masking,\n use_weighted_masking=args.use_weighted_masking,\n bce_pos_weight=args.bce_pos_weight,\n )\n if self.use_guided_attn_loss:\n self.attn_criterion = GuidedMultiHeadAttentionLoss(\n sigma=args.guided_attn_loss_sigma, alpha=args.guided_attn_loss_lambda,\n )\n\n # initialize parameters\n self._reset_parameters(\n init_type=args.transformer_init,\n init_enc_alpha=args.initial_encoder_alpha,\n init_dec_alpha=args.initial_decoder_alpha,\n )\n\n # load pretrained model\n if args.pretrained_model is not None:\n self.load_pretrained_model(args.pretrained_model)\n\n def _reset_parameters(self, init_type, init_enc_alpha=1.0, init_dec_alpha=1.0):\n # initialize parameters\n initialize(self, init_type)\n\n # initialize alpha in scaled positional encoding\n if self.use_scaled_pos_enc:\n self.encoder.embed[-1].alpha.data = torch.tensor(init_enc_alpha)\n self.decoder.embed[-1].alpha.data = torch.tensor(init_dec_alpha)\n\n def _add_first_frame_and_remove_last_frame(self, ys):\n ys_in = torch.cat(\n [ys.new_zeros((ys.shape[0], 1, ys.shape[2])), ys[:, :-1]], dim=1\n )\n return ys_in\n\n def forward(self, xs, ilens, ys, labels, olens, spembs=None, *args, **kwargs):\n \"\"\"Calculate forward propagation.\n\n Args:\n xs (Tensor): Batch of padded acoustic features (B, Tmax, idim).\n ilens (LongTensor): Batch of lengths of each input batch (B,).\n ys (Tensor): Batch of padded target features (B, Lmax, odim).\n olens (LongTensor): Batch of the lengths of each target (B,).\n spembs (Tensor, optional): Batch of speaker embedding vectors\n (B, spk_embed_dim).\n\n Returns:\n Tensor: Loss value.\n\n \"\"\"\n # remove unnecessary padded part (for multi-gpus)\n max_ilen = max(ilens)\n max_olen = max(olens)\n if max_ilen != xs.shape[1]:\n xs = xs[:, :max_ilen]\n if max_olen != ys.shape[1]:\n ys = ys[:, :max_olen]\n labels = labels[:, :max_olen]\n\n # thin out input frames for reduction factor\n # (B, Lmax, idim) -> (B, Lmax // r, idim * r)\n if self.encoder_reduction_factor > 1:\n B, Lmax, idim = xs.shape\n if Lmax % self.encoder_reduction_factor != 0:\n xs = xs[:, : -(Lmax % self.encoder_reduction_factor), :]\n xs_ds = xs.contiguous().view(\n B,\n int(Lmax / self.encoder_reduction_factor),\n idim * self.encoder_reduction_factor,\n )\n ilens_ds = ilens.new(\n [ilen // self.encoder_reduction_factor for ilen in ilens]\n )\n else:\n xs_ds, ilens_ds = xs, ilens\n\n # forward encoder\n x_masks = self._source_mask(ilens_ds)\n hs, hs_masks = self.encoder(xs_ds, x_masks)\n\n # integrate speaker embedding\n if self.spk_embed_dim is not None:\n hs_int = self._integrate_with_spk_embed(hs, spembs)\n else:\n hs_int = hs\n\n # thin out frames for reduction factor (B, Lmax, odim) -> (B, Lmax//r, odim)\n if self.reduction_factor > 1:\n ys_in = ys[:, self.reduction_factor - 1 :: self.reduction_factor]\n olens_in = olens.new([olen // self.reduction_factor for olen in olens])\n else:\n ys_in, olens_in = ys, olens\n\n # add first zero frame and remove last frame for auto-regressive\n ys_in = self._add_first_frame_and_remove_last_frame(ys_in)\n\n # if conv2d, modify mask. Use ceiling division here\n if \"conv2d\" in self.transformer_input_layer:\n ilens_ds_st = ilens_ds.new(\n [((ilen - 2 + 1) // 2 - 2 + 1) // 2 for ilen in ilens_ds]\n )\n else:\n ilens_ds_st = ilens_ds\n\n # forward decoder\n y_masks = self._target_mask(olens_in)\n zs, _ = self.decoder(ys_in, y_masks, hs_int, hs_masks)\n # (B, Lmax//r, odim * r) -> (B, Lmax//r * r, odim)\n before_outs = self.feat_out(zs).view(zs.size(0), -1, self.odim)\n # (B, Lmax//r, r) -> (B, Lmax//r * r)\n logits = self.prob_out(zs).view(zs.size(0), -1)\n\n # postnet -> (B, Lmax//r * r, odim)\n if self.postnet is None:\n after_outs = before_outs\n else:\n after_outs = before_outs + self.postnet(\n before_outs.transpose(1, 2)\n ).transpose(1, 2)\n\n # modifiy mod part of groundtruth\n if self.reduction_factor > 1:\n olens = olens.new([olen - olen % self.reduction_factor for olen in olens])\n max_olen = max(olens)\n ys = ys[:, :max_olen]\n labels = labels[:, :max_olen]\n labels[:, -1] = 1.0 # make sure at least one frame has 1\n\n # caluculate loss values\n l1_loss, l2_loss, bce_loss = self.criterion(\n after_outs, before_outs, logits, ys, labels, olens\n )\n if self.loss_type == \"L1\":\n loss = l1_loss + bce_loss\n elif self.loss_type == \"L2\":\n loss = l2_loss + bce_loss\n elif self.loss_type == \"L1+L2\":\n loss = l1_loss + l2_loss + bce_loss\n else:\n raise ValueError(\"unknown --loss-type \" + self.loss_type)\n report_keys = [\n {\"l1_loss\": l1_loss.item()},\n {\"l2_loss\": l2_loss.item()},\n {\"bce_loss\": bce_loss.item()},\n {\"loss\": loss.item()},\n ]\n\n # calculate guided attention loss\n if self.use_guided_attn_loss:\n # calculate for encoder\n if \"encoder\" in self.modules_applied_guided_attn:\n att_ws = []\n for idx, layer_idx in enumerate(\n reversed(range(len(self.encoder.encoders)))\n ):\n att_ws += [\n self.encoder.encoders[layer_idx].self_attn.attn[\n :, : self.num_heads_applied_guided_attn\n ]\n ]\n if idx + 1 == self.num_layers_applied_guided_attn:\n break\n att_ws = torch.cat(att_ws, dim=1) # (B, H*L, T_in, T_in)\n enc_attn_loss = self.attn_criterion(\n att_ws, ilens_ds_st, ilens_ds_st\n ) # TODO(unilight): is changing to ilens_ds_st right?\n loss = loss + enc_attn_loss\n report_keys += [{\"enc_attn_loss\": enc_attn_loss.item()}]\n # calculate for decoder\n if \"decoder\" in self.modules_applied_guided_attn:\n att_ws = []\n for idx, layer_idx in enumerate(\n reversed(range(len(self.decoder.decoders)))\n ):\n att_ws += [\n self.decoder.decoders[layer_idx].self_attn.attn[\n :, : self.num_heads_applied_guided_attn\n ]\n ]\n if idx + 1 == self.num_layers_applied_guided_attn:\n break\n att_ws = torch.cat(att_ws, dim=1) # (B, H*L, T_out, T_out)\n dec_attn_loss = self.attn_criterion(att_ws, olens_in, olens_in)\n loss = loss + dec_attn_loss\n report_keys += [{\"dec_attn_loss\": dec_attn_loss.item()}]\n # calculate for encoder-decoder\n if \"encoder-decoder\" in self.modules_applied_guided_attn:\n att_ws = []\n for idx, layer_idx in enumerate(\n reversed(range(len(self.decoder.decoders)))\n ):\n att_ws += [\n self.decoder.decoders[layer_idx].src_attn.attn[\n :, : self.num_heads_applied_guided_attn\n ]\n ]\n if idx + 1 == self.num_layers_applied_guided_attn:\n break\n att_ws = torch.cat(att_ws, dim=1) # (B, H*L, T_out, T_in)\n enc_dec_attn_loss = self.attn_criterion(\n att_ws, ilens_ds_st, olens_in\n ) # TODO(unilight): is changing to ilens_ds_st right?\n loss = loss + enc_dec_attn_loss\n report_keys += [{\"enc_dec_attn_loss\": enc_dec_attn_loss.item()}]\n\n # report extra information\n if self.use_scaled_pos_enc:\n report_keys += [\n {\"encoder_alpha\": self.encoder.embed[-1].alpha.data.item()},\n {\"decoder_alpha\": self.decoder.embed[-1].alpha.data.item()},\n ]\n self.reporter.report(report_keys)\n\n return loss\n\n def inference(self, x, inference_args, spemb=None, *args, **kwargs):\n \"\"\"Generate the sequence of features given the sequences of acoustic features.\n\n Args:\n x (Tensor): Input sequence of acoustic features (T, idim).\n inference_args (Namespace):\n - threshold (float): Threshold in inference.\n - minlenratio (float): Minimum length ratio in inference.\n - maxlenratio (float): Maximum length ratio in inference.\n spemb (Tensor, optional): Speaker embedding vector (spk_embed_dim).\n\n Returns:\n Tensor: Output sequence of features (L, odim).\n Tensor: Output sequence of stop probabilities (L,).\n Tensor: Encoder-decoder (source) attention weights (#layers, #heads, L, T).\n\n \"\"\"\n # get options\n threshold = inference_args.threshold\n minlenratio = inference_args.minlenratio\n maxlenratio = inference_args.maxlenratio\n use_att_constraint = getattr(\n inference_args, \"use_att_constraint\", False\n ) # keep compatibility\n if use_att_constraint:\n logging.warning(\n \"Attention constraint is not yet supported in Transformer. Not enabled.\"\n )\n\n # thin out input frames for reduction factor\n # (B, Lmax, idim) -> (B, Lmax // r, idim * r)\n if self.encoder_reduction_factor > 1:\n Lmax, idim = x.shape\n if Lmax % self.encoder_reduction_factor != 0:\n x = x[: -(Lmax % self.encoder_reduction_factor), :]\n x_ds = x.contiguous().view(\n int(Lmax / self.encoder_reduction_factor),\n idim * self.encoder_reduction_factor,\n )\n else:\n x_ds = x\n\n # forward encoder\n x_ds = x_ds.unsqueeze(0)\n hs, _ = self.encoder(x_ds, None)\n\n # integrate speaker embedding\n if self.spk_embed_dim is not None:\n spembs = spemb.unsqueeze(0)\n hs = self._integrate_with_spk_embed(hs, spembs)\n\n # set limits of length\n maxlen = int(hs.size(1) * maxlenratio / self.reduction_factor)\n minlen = int(hs.size(1) * minlenratio / self.reduction_factor)\n\n # initialize\n idx = 0\n ys = hs.new_zeros(1, 1, self.odim)\n outs, probs = [], []\n\n # forward decoder step-by-step\n z_cache = self.decoder.init_state(x)\n while True:\n # update index\n idx += 1\n\n # calculate output and stop prob at idx-th step\n y_masks = subsequent_mask(idx).unsqueeze(0).to(x.device)\n z, z_cache = self.decoder.forward_one_step(\n ys, y_masks, hs, cache=z_cache\n ) # (B, adim)\n outs += [\n self.feat_out(z).view(self.reduction_factor, self.odim)\n ] # [(r, odim), ...]\n probs += [torch.sigmoid(self.prob_out(z))[0]] # [(r), ...]\n\n # update next inputs\n ys = torch.cat(\n (ys, outs[-1][-1].view(1, 1, self.odim)), dim=1\n ) # (1, idx + 1, odim)\n\n # get attention weights\n att_ws_ = []\n for name, m in self.named_modules():\n if isinstance(m, MultiHeadedAttention) and \"src\" in name:\n att_ws_ += [m.attn[0, :, -1].unsqueeze(1)] # [(#heads, 1, T),...]\n if idx == 1:\n att_ws = att_ws_\n else:\n # [(#heads, l, T), ...]\n att_ws = [\n torch.cat([att_w, att_w_], dim=1)\n for att_w, att_w_ in zip(att_ws, att_ws_)\n ]\n\n # check whether to finish generation\n if int(sum(probs[-1] >= threshold)) > 0 or idx >= maxlen:\n # check mininum length\n if idx < minlen:\n continue\n outs = (\n torch.cat(outs, dim=0).unsqueeze(0).transpose(1, 2)\n ) # (L, odim) -> (1, L, odim) -> (1, odim, L)\n if self.postnet is not None:\n outs = outs + self.postnet(outs) # (1, odim, L)\n outs = outs.transpose(2, 1).squeeze(0) # (L, odim)\n probs = torch.cat(probs, dim=0)\n break\n\n # concatenate attention weights -> (#layers, #heads, L, T)\n att_ws = torch.stack(att_ws, dim=0)\n\n return outs, probs, att_ws\n\n def calculate_all_attentions(\n self,\n xs,\n ilens,\n ys,\n olens,\n spembs=None,\n skip_output=False,\n keep_tensor=False,\n *args,\n **kwargs\n ):\n \"\"\"Calculate all of the attention weights.\n\n Args:\n xs (Tensor): Batch of padded acoustic features (B, Tmax, idim).\n ilens (LongTensor): Batch of lengths of each input batch (B,).\n ys (Tensor): Batch of padded target features (B, Lmax, odim).\n olens (LongTensor): Batch of the lengths of each target (B,).\n spembs (Tensor, optional): Batch of speaker embedding vectors\n (B, spk_embed_dim).\n skip_output (bool, optional): Whether to skip calculate the final output.\n keep_tensor (bool, optional): Whether to keep original tensor.\n\n Returns:\n dict: Dict of attention weights and outputs.\n\n \"\"\"\n with torch.no_grad():\n # thin out input frames for reduction factor\n # (B, Lmax, idim) -> (B, Lmax // r, idim * r)\n if self.encoder_reduction_factor > 1:\n B, Lmax, idim = xs.shape\n if Lmax % self.encoder_reduction_factor != 0:\n xs = xs[:, : -(Lmax % self.encoder_reduction_factor), :]\n xs_ds = xs.contiguous().view(\n B,\n int(Lmax / self.encoder_reduction_factor),\n idim * self.encoder_reduction_factor,\n )\n ilens_ds = ilens.new(\n [ilen // self.encoder_reduction_factor for ilen in ilens]\n )\n else:\n xs_ds, ilens_ds = xs, ilens\n\n # forward encoder\n x_masks = self._source_mask(ilens_ds)\n hs, hs_masks = self.encoder(xs_ds, x_masks)\n\n # integrate speaker embedding\n if self.spk_embed_dim is not None:\n hs = self._integrate_with_spk_embed(hs, spembs)\n\n # thin out frames for reduction factor\n # (B, Lmax, odim) -> (B, Lmax//r, odim)\n if self.reduction_factor > 1:\n ys_in = ys[:, self.reduction_factor - 1 :: self.reduction_factor]\n olens_in = olens.new([olen // self.reduction_factor for olen in olens])\n else:\n ys_in, olens_in = ys, olens\n\n # add first zero frame and remove last frame for auto-regressive\n ys_in = self._add_first_frame_and_remove_last_frame(ys_in)\n\n # forward decoder\n y_masks = self._target_mask(olens_in)\n zs, _ = self.decoder(ys_in, y_masks, hs, hs_masks)\n\n # calculate final outputs\n if not skip_output:\n before_outs = self.feat_out(zs).view(zs.size(0), -1, self.odim)\n if self.postnet is None:\n after_outs = before_outs\n else:\n after_outs = before_outs + self.postnet(\n before_outs.transpose(1, 2)\n ).transpose(1, 2)\n\n # modifiy mod part of output lengths due to reduction factor > 1\n if self.reduction_factor > 1:\n olens = olens.new([olen - olen % self.reduction_factor for olen in olens])\n\n # store into dict\n att_ws_dict = dict()\n if keep_tensor:\n for name, m in self.named_modules():\n if isinstance(m, MultiHeadedAttention):\n att_ws_dict[name] = m.attn\n if not skip_output:\n att_ws_dict[\"before_postnet_fbank\"] = before_outs\n att_ws_dict[\"after_postnet_fbank\"] = after_outs\n else:\n for name, m in self.named_modules():\n if isinstance(m, MultiHeadedAttention):\n attn = m.attn.cpu().numpy()\n if \"encoder\" in name:\n attn = [a[:, :l, :l] for a, l in zip(attn, ilens.tolist())]\n elif \"decoder\" in name:\n if \"src\" in name:\n attn = [\n a[:, :ol, :il]\n for a, il, ol in zip(\n attn, ilens.tolist(), olens_in.tolist()\n )\n ]\n elif \"self\" in name:\n attn = [\n a[:, :l, :l] for a, l in zip(attn, olens_in.tolist())\n ]\n else:\n logging.warning(\"unknown attention module: \" + name)\n else:\n logging.warning(\"unknown attention module: \" + name)\n att_ws_dict[name] = attn\n if not skip_output:\n before_outs = before_outs.cpu().numpy()\n after_outs = after_outs.cpu().numpy()\n att_ws_dict[\"before_postnet_fbank\"] = [\n m[:l].T for m, l in zip(before_outs, olens.tolist())\n ]\n att_ws_dict[\"after_postnet_fbank\"] = [\n m[:l].T for m, l in zip(after_outs, olens.tolist())\n ]\n\n return att_ws_dict\n\n def _integrate_with_spk_embed(self, hs, spembs):\n \"\"\"Integrate speaker embedding with hidden states.\n\n Args:\n hs (Tensor): Batch of hidden state sequences (B, Tmax, adim).\n spembs (Tensor): Batch of speaker embeddings (B, spk_embed_dim).\n\n Returns:\n Tensor: Batch of integrated hidden state sequences (B, Tmax, adim)\n\n \"\"\"\n if self.spk_embed_integration_type == \"add\":\n # apply projection and then add to hidden states\n spembs = self.projection(F.normalize(spembs))\n hs = hs + spembs.unsqueeze(1)\n elif self.spk_embed_integration_type == \"concat\":\n # concat hidden states with spk embeds and then apply projection\n spembs = F.normalize(spembs).unsqueeze(1).expand(-1, hs.size(1), -1)\n hs = self.projection(torch.cat([hs, spembs], dim=-1))\n else:\n raise NotImplementedError(\"support only add or concat.\")\n\n return hs\n\n def _source_mask(self, ilens):\n \"\"\"Make masks for self-attention.\n\n Args:\n ilens (LongTensor or List): Batch of lengths (B,).\n\n Returns:\n Tensor: Mask tensor for self-attention.\n dtype=torch.uint8 in PyTorch 1.2-\n dtype=torch.bool in PyTorch 1.2+ (including 1.2)\n\n Examples:\n >>> ilens = [5, 3]\n >>> self._source_mask(ilens)\n tensor([[[1, 1, 1, 1, 1],\n [[1, 1, 1, 0, 0]]], dtype=torch.uint8)\n\n \"\"\"\n x_masks = make_non_pad_mask(ilens).to(next(self.parameters()).device)\n return x_masks.unsqueeze(-2)\n\n def _target_mask(self, olens):\n \"\"\"Make masks for masked self-attention.\n\n Args:\n olens (LongTensor or List): Batch of lengths (B,).\n\n Returns:\n Tensor: Mask tensor for masked self-attention.\n dtype=torch.uint8 in PyTorch 1.2-\n dtype=torch.bool in PyTorch 1.2+ (including 1.2)\n\n Examples:\n >>> olens = [5, 3]\n >>> self._target_mask(olens)\n tensor([[[1, 0, 0, 0, 0],\n [1, 1, 0, 0, 0],\n [1, 1, 1, 0, 0],\n [1, 1, 1, 1, 0],\n [1, 1, 1, 1, 1]],\n [[1, 0, 0, 0, 0],\n [1, 1, 0, 0, 0],\n [1, 1, 1, 0, 0],\n [1, 1, 1, 0, 0],\n [1, 1, 1, 0, 0]]], dtype=torch.uint8)\n\n \"\"\"\n y_masks = make_non_pad_mask(olens).to(next(self.parameters()).device)\n s_masks = subsequent_mask(y_masks.size(-1), device=y_masks.device).unsqueeze(0)\n return y_masks.unsqueeze(-2) & s_masks\n\n @property\n def base_plot_keys(self):\n \"\"\"Return base key names to plot during training.\n\n keys should match what `chainer.reporter` reports.\n If you add the key `loss`, the reporter will report `main/loss`\n and `validation/main/loss` values.\n also `loss.png` will be created as a figure visulizing `main/loss`\n and `validation/main/loss` values.\n\n Returns:\n list: List of strings which are base keys to plot during training.\n\n \"\"\"\n plot_keys = [\"loss\", \"l1_loss\", \"l2_loss\", \"bce_loss\"]\n if self.use_scaled_pos_enc:\n plot_keys += [\"encoder_alpha\", \"decoder_alpha\"]\n if self.use_guided_attn_loss:\n if \"encoder\" in self.modules_applied_guided_attn:\n plot_keys += [\"enc_attn_loss\"]\n if \"decoder\" in self.modules_applied_guided_attn:\n plot_keys += [\"dec_attn_loss\"]\n if \"encoder-decoder\" in self.modules_applied_guided_attn:\n plot_keys += [\"enc_dec_attn_loss\"]\n\n return plot_keys\n",
"# coding: utf-8\n\n# Copyright 2019 Hirofumi Inaguma\n# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)\n\nimport argparse\nimport importlib\nimport logging\nimport numpy\nimport pytest\nimport torch\n\nfrom test.test_e2e_asr_transformer import run_transformer_copy\nfrom test.test_e2e_asr_transformer import subsequent_mask\n\n\nlogging.basicConfig(\n level=logging.INFO,\n format=\"%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s\",\n)\n\n\[email protected](\"module\", [\"pytorch\"])\ndef test_mask(module):\n T = importlib.import_module(\n \"espnet.nets.{}_backend.e2e_mt_transformer\".format(module)\n )\n m = T.subsequent_mask(3)\n print(m)\n print(subsequent_mask(3))\n assert (m.unsqueeze(0) == subsequent_mask(3)).all()\n\n\ndef make_arg(**kwargs):\n defaults = dict(\n adim=16,\n aheads=2,\n dropout_rate=0.0,\n transformer_attn_dropout_rate=None,\n elayers=2,\n eunits=16,\n dlayers=2,\n dunits=16,\n sym_space=\"<space>\",\n sym_blank=\"<blank>\",\n transformer_decoder_selfattn_layer_type=\"selfattn\",\n transformer_encoder_selfattn_layer_type=\"selfattn\",\n transformer_init=\"pytorch\",\n transformer_input_layer=\"conv2d\",\n transformer_length_normalized_loss=True,\n report_bleu=False,\n lsm_weight=0.001,\n wshare=4,\n ldconv_encoder_kernel_length=\"21_23_25_27_29_31_33_35_37_39_41_43\",\n ldconv_decoder_kernel_length=\"11_13_15_17_19_21\",\n ldconv_usebias=False,\n char_list=[\"<blank>\", \"a\", \"e\", \"i\", \"o\", \"u\"],\n tie_src_tgt_embedding=False,\n tie_classifier=False,\n multilingual=False,\n replace_sos=False,\n )\n defaults.update(kwargs)\n return argparse.Namespace(**defaults)\n\n\ndef prepare(backend, args):\n idim = 5\n odim = 5\n T = importlib.import_module(\n \"espnet.nets.{}_backend.e2e_mt_transformer\".format(backend)\n )\n\n model = T.E2E(idim, odim, args)\n batchsize = 5\n n_token = odim - 1\n if backend == \"pytorch\":\n y_src = (torch.randn(batchsize, 10) * n_token % n_token).long() + 1\n y_tgt = (torch.randn(batchsize, 11) * n_token % n_token).long() + 1\n # NOTE: + 1 to avoid to assign idx:0\n else:\n y_src = numpy.random.randn(batchsize, 10, idim).astype(numpy.int64) + 1\n y_tgt = numpy.random.randn(batchsize, 11, idim).astype(numpy.int64) + 1\n ilens = [3, 9, 10, 2, 3]\n olens = [4, 10, 11, 3, 4]\n for i in range(batchsize):\n y_src[i, ilens[i] :] = model.pad\n y_tgt[i, olens[i] :] = model.ignore_id\n\n data = []\n for i in range(batchsize):\n data.append(\n (\n \"utt%d\" % i,\n {\"input\": [{\"shape\": [ilens[i]]}], \"output\": [{\"shape\": [olens[i]]}]},\n )\n )\n if backend == \"pytorch\":\n return model, y_src, torch.tensor(ilens), y_tgt, data\n else:\n return model, y_src, ilens, y_tgt, data\n\n\[email protected](\"module\", [\"pytorch\"])\ndef test_transformer_mask(module):\n args = make_arg()\n model, y_src, ilens, y_tgt, data = prepare(module, args)\n from espnet.nets.pytorch_backend.transformer.add_sos_eos import add_sos_eos\n from espnet.nets.pytorch_backend.transformer.mask import target_mask\n\n yi, yo = add_sos_eos(y_tgt, model.sos, model.eos, model.ignore_id)\n y_mask = target_mask(yi, model.ignore_id)\n y_tgt = model.decoder.embed(yi)\n y_tgt[0, 3:] = float(\"nan\")\n a = model.decoder.decoders[0].self_attn\n a(y_tgt, y_tgt, y_tgt, y_mask)\n assert not numpy.isnan(a.attn[0, :, :3, :3].detach().numpy()).any()\n\n\nldconv_lconv_args = dict(\n transformer_decoder_selfattn_layer_type=\"lightconv\",\n transformer_encoder_selfattn_layer_type=\"lightconv\",\n wshare=4,\n ldconv_encoder_kernel_length=\"5_7_11\",\n ldconv_decoder_kernel_length=\"3_7\",\n ldconv_usebias=False,\n)\n\nldconv_dconv_args = dict(\n transformer_decoder_selfattn_layer_type=\"dynamicconv\",\n transformer_encoder_selfattn_layer_type=\"dynamicconv\",\n wshare=4,\n ldconv_encoder_kernel_length=\"5_7_11\",\n ldconv_decoder_kernel_length=\"3_7\",\n ldconv_usebias=False,\n)\n\n\[email protected](\n \"module, model_dict\",\n [\n (\"pytorch\", {}),\n (\"pytorch\", ldconv_lconv_args),\n (\"pytorch\", ldconv_dconv_args),\n (\"pytorch\", {\"report_bleu\": True}),\n (\"pytorch\", {\"tie_src_tgt_embedding\": True}),\n (\"pytorch\", {\"tie_classifier\": True}),\n (\"pytorch\", {\"tie_src_tgt_embedding\": True, \"tie_classifier\": True}),\n ],\n)\ndef test_transformer_trainable_and_decodable(module, model_dict):\n args = make_arg(**model_dict)\n model, y_src, ilens, y_tgt, data = prepare(module, args)\n\n # test beam search\n trans_args = argparse.Namespace(\n beam_size=1,\n penalty=0.0,\n ctc_weight=0.0,\n maxlenratio=1.0,\n lm_weight=0,\n minlenratio=0,\n nbest=1,\n tgt_lang=False,\n )\n if module == \"pytorch\":\n # test trainable\n optim = torch.optim.Adam(model.parameters(), 0.01)\n loss = model(y_src, ilens, y_tgt)\n optim.zero_grad()\n loss.backward()\n optim.step()\n\n # test attention plot\n attn_dict = model.calculate_all_attentions(y_src[0:1], ilens[0:1], y_tgt[0:1])\n from espnet.nets.pytorch_backend.transformer import plot\n\n plot.plot_multi_head_attention(data, attn_dict, \"/tmp/espnet-test\")\n\n # test decodable\n with torch.no_grad():\n nbest = model.translate(\n [y_src[0, : ilens[0]].numpy()], trans_args, args.char_list\n )\n print(y_tgt[0])\n print(nbest[0][\"yseq\"][1:-1])\n else:\n raise NotImplementedError\n\n\nif __name__ == \"__main__\":\n run_transformer_copy()\n",
"from collections import defaultdict\nfrom typing import Dict\nfrom typing import List\n\nimport torch\n\nfrom espnet.nets.pytorch_backend.rnn.attentions import AttAdd\nfrom espnet.nets.pytorch_backend.rnn.attentions import AttCov\nfrom espnet.nets.pytorch_backend.rnn.attentions import AttCovLoc\nfrom espnet.nets.pytorch_backend.rnn.attentions import AttDot\nfrom espnet.nets.pytorch_backend.rnn.attentions import AttForward\nfrom espnet.nets.pytorch_backend.rnn.attentions import AttForwardTA\nfrom espnet.nets.pytorch_backend.rnn.attentions import AttLoc\nfrom espnet.nets.pytorch_backend.rnn.attentions import AttLoc2D\nfrom espnet.nets.pytorch_backend.rnn.attentions import AttLocRec\nfrom espnet.nets.pytorch_backend.rnn.attentions import AttMultiHeadAdd\nfrom espnet.nets.pytorch_backend.rnn.attentions import AttMultiHeadDot\nfrom espnet.nets.pytorch_backend.rnn.attentions import AttMultiHeadLoc\nfrom espnet.nets.pytorch_backend.rnn.attentions import AttMultiHeadMultiResLoc\nfrom espnet.nets.pytorch_backend.rnn.attentions import NoAtt\nfrom espnet.nets.pytorch_backend.transformer.attention import MultiHeadedAttention\n\n\nfrom espnet2.train.abs_espnet_model import AbsESPnetModel\n\n\[email protected]_grad()\ndef calculate_all_attentions(\n model: AbsESPnetModel, batch: Dict[str, torch.Tensor]\n) -> Dict[str, List[torch.Tensor]]:\n \"\"\"Derive the outputs from the all attention layers\n\n Args:\n model:\n batch: same as forward\n Returns:\n return_dict: A dict of a list of tensor.\n key_names x batch x (D1, D2, ...)\n\n \"\"\"\n bs = len(next(iter(batch.values())))\n assert all(len(v) == bs for v in batch.values()), {\n k: v.shape for k, v in batch.items()\n }\n\n # 1. Register forward_hook fn to save the output from specific layers\n outputs = {}\n handles = {}\n for name, modu in model.named_modules():\n\n def hook(module, input, output, name=name):\n if isinstance(module, MultiHeadedAttention):\n # NOTE(kamo): MultiHeadedAttention doesn't return attention weight\n # attn: (B, Head, Tout, Tin)\n outputs[name] = module.attn.detach().cpu()\n elif isinstance(module, AttLoc2D):\n c, w = output\n # w: previous concate attentions\n # w: (B, nprev, Tin)\n att_w = w[:, -1].detach().cpu()\n outputs.setdefault(name, []).append(att_w)\n elif isinstance(module, (AttCov, AttCovLoc)):\n c, w = output\n assert isinstance(w, list), type(w)\n # w: list of previous attentions\n # w: nprev x (B, Tin)\n att_w = w[-1].detach().cpu()\n outputs.setdefault(name, []).append(att_w)\n elif isinstance(module, AttLocRec):\n # w: (B, Tin)\n c, (w, (att_h, att_c)) = output\n att_w = w.detach().cpu()\n outputs.setdefault(name, []).append(att_w)\n elif isinstance(\n module,\n (\n AttMultiHeadDot,\n AttMultiHeadAdd,\n AttMultiHeadLoc,\n AttMultiHeadMultiResLoc,\n ),\n ):\n c, w = output\n # w: nhead x (B, Tin)\n assert isinstance(w, list), type(w)\n att_w = [_w.detach().cpu() for _w in w]\n outputs.setdefault(name, []).append(att_w)\n elif isinstance(\n module, (AttAdd, AttDot, AttForward, AttForwardTA, AttLoc, NoAtt,),\n ):\n c, w = output\n att_w = w.detach().cpu()\n outputs.setdefault(name, []).append(att_w)\n\n handle = modu.register_forward_hook(hook)\n handles[name] = handle\n\n # 2. Just forward one by one sample.\n # Batch-mode can't be used to keep requirements small for each models.\n keys = []\n for k in batch:\n if not k.endswith(\"_lengths\"):\n keys.append(k)\n\n return_dict = defaultdict(list)\n for ibatch in range(bs):\n # *: (B, L, ...) -> (1, L2, ...)\n _sample = {\n k: batch[k][ibatch, None, : batch[k + \"_lengths\"][ibatch]]\n if k + \"_lengths\" in batch\n else batch[k][ibatch, None]\n for k in keys\n }\n\n # *_lengths: (B,) -> (1,)\n _sample.update(\n {\n k + \"_lengths\": batch[k + \"_lengths\"][ibatch, None]\n for k in keys\n if k + \"_lengths\" in batch\n }\n )\n model(**_sample)\n\n # Derive the attention results\n for name, output in outputs.items():\n if isinstance(output, list):\n if isinstance(output[0], list):\n # output: nhead x (Tout, Tin)\n output = torch.stack(\n [\n # Tout x (1, Tin) -> (Tout, Tin)\n torch.cat([o[idx] for o in output], dim=0)\n for idx in range(len(output[0]))\n ],\n dim=0,\n )\n else:\n # Tout x (1, Tin) -> (Tout, Tin)\n output = torch.cat(output, dim=0)\n else:\n # output: (1, NHead, Tout, Tin) -> (NHead, Tout, Tin)\n output = output.squeeze(0)\n # output: (Tout, Tin) or (NHead, Tout, Tin)\n return_dict[name].append(output)\n outputs.clear()\n\n # 3. Remove all hooks\n for _, handle in handles.items():\n handle.remove()\n\n return dict(return_dict)\n",
"from distutils.version import LooseVersion\n\nimport h5py\nimport kaldiio\nimport numpy as np\nimport pytest\nimport soundfile\nimport torch\n\nfrom espnet2.fileio.npy_scp import NpyScpWriter\nfrom espnet2.fileio.sound_scp import SoundScpWriter\nfrom espnet2.train.iterable_dataset import IterableESPnetDataset\n\n\ndef preprocess(id: str, data):\n new_data = {}\n for k, v in data.items():\n if isinstance(v, str):\n if v == \"hello world\":\n new_data[k] = np.array([0])\n elif v == \"foo bar\":\n new_data[k] = np.array([1])\n else:\n new_data[k] = np.array([2])\n else:\n new_data[k] = v\n return new_data\n\n\[email protected]\ndef sound_scp(tmp_path):\n p = tmp_path / \"wav.scp\"\n w = SoundScpWriter(tmp_path / \"data\", p)\n w[\"a\"] = 16000, np.random.randint(-100, 100, (160000,), dtype=np.int16)\n w[\"b\"] = 16000, np.random.randint(-100, 100, (80000,), dtype=np.int16)\n return str(p)\n\n\[email protected](\n LooseVersion(torch.__version__) < LooseVersion(\"1.2\"), reason=\"require pytorch>=1.2\"\n)\ndef test_ESPnetDataset_sound_scp(sound_scp):\n dataset = IterableESPnetDataset(\n path_name_type_list=[(sound_scp, \"data1\", \"sound\")], preprocess=preprocess,\n )\n print(dataset)\n print(dataset.names())\n assert dataset.has_name(\"data1\")\n\n for key, data in dataset:\n if key == \"a\":\n assert data[\"data1\"].shape == (160000,)\n if key == \"b\":\n assert data[\"data1\"].shape == (80000,)\n\n\[email protected]\ndef pipe_wav(tmp_path):\n p = tmp_path / \"wav.scp\"\n soundfile.write(\n tmp_path / \"a.wav\",\n np.random.randint(-100, 100, (160000,), dtype=np.int16),\n 16000,\n )\n soundfile.write(\n tmp_path / \"b.wav\",\n np.random.randint(-100, 100, (80000,), dtype=np.int16),\n 16000,\n )\n with p.open(\"w\") as f:\n f.write(f\"a {tmp_path / 'a.wav'}\\n\")\n f.write(f\"b {tmp_path / 'b.wav'}\\n\")\n return str(p)\n\n\[email protected](\n LooseVersion(torch.__version__) < LooseVersion(\"1.2\"), reason=\"require pytorch>=1.2\"\n)\ndef test_ESPnetDataset_pipe_wav(pipe_wav):\n dataset = IterableESPnetDataset(\n path_name_type_list=[(pipe_wav, \"data1\", \"pipe_wav\")], preprocess=preprocess,\n )\n\n for key, data in dataset:\n if key == \"a\":\n assert data[\"data1\"].shape == (160000,)\n if key == \"b\":\n assert data[\"data1\"].shape == (80000,)\n\n\[email protected]\ndef feats_scp(tmp_path):\n p = tmp_path / \"feats.scp\"\n p2 = tmp_path / \"feats.ark\"\n with kaldiio.WriteHelper(f\"ark,scp:{p2},{p}\") as w:\n w[\"a\"] = np.random.randn(100, 80)\n w[\"b\"] = np.random.randn(150, 80)\n return str(p)\n\n\[email protected](\n LooseVersion(torch.__version__) < LooseVersion(\"1.2\"), reason=\"require pytorch>=1.2\"\n)\ndef test_ESPnetDataset_feats_scp(feats_scp,):\n dataset = IterableESPnetDataset(\n path_name_type_list=[(feats_scp, \"data2\", \"kaldi_ark\")], preprocess=preprocess,\n )\n\n for key, data in dataset:\n if key == \"a\":\n assert data[\"data2\"].shape == (100, 80,)\n if key == \"b\":\n assert data[\"data2\"].shape == (150, 80,)\n\n\[email protected]\ndef npy_scp(tmp_path):\n p = tmp_path / \"npy.scp\"\n w = NpyScpWriter(tmp_path / \"data\", p)\n w[\"a\"] = np.random.randn(100, 80)\n w[\"b\"] = np.random.randn(150, 80)\n return str(p)\n\n\[email protected](\n LooseVersion(torch.__version__) < LooseVersion(\"1.2\"), reason=\"require pytorch>=1.2\"\n)\ndef test_ESPnetDataset_npy_scp(npy_scp):\n dataset = IterableESPnetDataset(\n path_name_type_list=[(npy_scp, \"data3\", \"npy\")], preprocess=preprocess,\n )\n\n for key, data in dataset:\n if key == \"a\":\n assert data[\"data3\"].shape == (100, 80,)\n if key == \"b\":\n assert data[\"data3\"].shape == (150, 80,)\n\n\[email protected]\ndef h5file_1(tmp_path):\n p = tmp_path / \"file.h5\"\n with h5py.File(p, \"w\") as w:\n w[\"a\"] = np.random.randn(100, 80)\n w[\"b\"] = np.random.randn(150, 80)\n return str(p)\n\n\[email protected](\n LooseVersion(torch.__version__) < LooseVersion(\"1.2\"), reason=\"require pytorch>=1.2\"\n)\ndef test_ESPnetDataset_h5file_1(h5file_1):\n dataset = IterableESPnetDataset(\n path_name_type_list=[(h5file_1, \"data4\", \"hdf5\")], preprocess=preprocess,\n )\n\n for key, data in dataset:\n if key == \"a\":\n assert data[\"data4\"].shape == (100, 80,)\n if key == \"b\":\n assert data[\"data4\"].shape == (150, 80,)\n\n\[email protected]\ndef shape_file(tmp_path):\n p = tmp_path / \"shape.txt\"\n with p.open(\"w\") as f:\n f.write(\"a 100,80\\n\")\n f.write(\"b 150,80\\n\")\n return str(p)\n\n\[email protected](\n LooseVersion(torch.__version__) < LooseVersion(\"1.2\"), reason=\"require pytorch>=1.2\"\n)\ndef test_ESPnetDataset_rand_float(shape_file):\n dataset = IterableESPnetDataset(\n path_name_type_list=[(shape_file, \"data5\", \"rand_float\")],\n preprocess=preprocess,\n )\n\n for key, data in dataset:\n if key == \"a\":\n assert data[\"data5\"].shape == (100, 80,)\n if key == \"b\":\n assert data[\"data5\"].shape == (150, 80,)\n\n\[email protected](\n LooseVersion(torch.__version__) < LooseVersion(\"1.2\"), reason=\"require pytorch>=1.2\"\n)\ndef test_ESPnetDataset_rand_int(shape_file):\n dataset = IterableESPnetDataset(\n path_name_type_list=[(shape_file, \"data6\", \"rand_int_0_10\")],\n preprocess=preprocess,\n )\n\n for key, data in dataset:\n if key == \"a\":\n assert data[\"data6\"].shape == (100, 80,)\n if key == \"b\":\n assert data[\"data6\"].shape == (150, 80,)\n\n\[email protected]\ndef text(tmp_path):\n p = tmp_path / \"text\"\n with p.open(\"w\") as f:\n f.write(\"a hello world\\n\")\n f.write(\"b foo bar\\n\")\n return str(p)\n\n\[email protected](\n LooseVersion(torch.__version__) < LooseVersion(\"1.2\"), reason=\"require pytorch>=1.2\"\n)\ndef test_ESPnetDataset_text(text):\n dataset = IterableESPnetDataset(\n path_name_type_list=[(text, \"data7\", \"text\")], preprocess=preprocess,\n )\n\n for key, data in dataset:\n if key == \"a\":\n assert tuple(data[\"data7\"]) == (0,)\n if key == \"b\":\n assert tuple(data[\"data7\"]) == (1,)\n\n\[email protected]\ndef text_float(tmp_path):\n p = tmp_path / \"shape.txt\"\n with p.open(\"w\") as f:\n f.write(\"a 1.4 3.4\\n\")\n f.write(\"b 0.9 9.3\\n\")\n return str(p)\n\n\[email protected](\n LooseVersion(torch.__version__) < LooseVersion(\"1.2\"), reason=\"require pytorch>=1.2\"\n)\ndef test_ESPnetDataset_text_float(text_float):\n dataset = IterableESPnetDataset(\n path_name_type_list=[(text_float, \"data8\", \"text_float\")],\n preprocess=preprocess,\n )\n\n for key, data in dataset:\n if key == \"a\":\n assert all((data[\"data8\"]) == np.array([1.4, 3.4], dtype=np.float32))\n if key == \"b\":\n assert all((data[\"data8\"]) == np.array([0.9, 9.3], dtype=np.float32))\n\n\[email protected]\ndef text_int(tmp_path):\n p = tmp_path / \"shape.txt\"\n with p.open(\"w\") as f:\n f.write(\"a 0 1 2\\n\")\n f.write(\"b 2 3 4\\n\")\n return str(p)\n\n\[email protected](\n LooseVersion(torch.__version__) < LooseVersion(\"1.2\"), reason=\"require pytorch>=1.2\"\n)\ndef test_ESPnetDataset_text_int(text_int):\n dataset = IterableESPnetDataset(\n path_name_type_list=[(text_int, \"data8\", \"text_int\")], preprocess=preprocess,\n )\n\n for key, data in dataset:\n if key == \"a\":\n assert tuple(data[\"data8\"]) == (0, 1, 2)\n if key == \"b\":\n assert tuple(data[\"data8\"]) == (2, 3, 4)\n\n\[email protected]\ndef csv_float(tmp_path):\n p = tmp_path / \"shape.txt\"\n with p.open(\"w\") as f:\n f.write(\"a 1.4,3.4\\n\")\n f.write(\"b 0.9,9.3\\n\")\n return str(p)\n\n\[email protected](\n LooseVersion(torch.__version__) < LooseVersion(\"1.2\"), reason=\"require pytorch>=1.2\"\n)\ndef test_ESPnetDataset_csv_float(csv_float):\n dataset = IterableESPnetDataset(\n path_name_type_list=[(csv_float, \"data8\", \"csv_float\")], preprocess=preprocess,\n )\n\n for key, data in dataset:\n if key == \"a\":\n assert all((data[\"data8\"]) == np.array([1.4, 3.4], dtype=np.float32))\n if key == \"b\":\n assert all((data[\"data8\"]) == np.array([0.9, 9.3], dtype=np.float32))\n\n\[email protected]\ndef csv_int(tmp_path):\n p = tmp_path / \"shape.txt\"\n with p.open(\"w\") as f:\n f.write(\"a 0,1,2\\n\")\n f.write(\"b 2,3,4\\n\")\n return str(p)\n\n\[email protected](\n LooseVersion(torch.__version__) < LooseVersion(\"1.2\"), reason=\"require pytorch>=1.2\"\n)\ndef test_ESPnetDataset_csv_int(csv_int):\n dataset = IterableESPnetDataset(\n path_name_type_list=[(csv_int, \"data8\", \"csv_int\")], preprocess=preprocess,\n )\n\n for key, data in dataset:\n if key == \"a\":\n assert tuple(data[\"data8\"]) == (0, 1, 2)\n if key == \"b\":\n assert tuple(data[\"data8\"]) == (2, 3, 4)\n"
] | [
[
"torch.nn.functional.normalize",
"torch.nn.Module.__init__",
"torch.cat",
"torch.tensor",
"torch.nn.Linear",
"torch.no_grad",
"torch.stack"
],
[
"numpy.random.randn",
"torch.no_grad",
"torch.randn",
"torch.tensor"
],
[
"torch.no_grad",
"torch.cat"
],
[
"numpy.array",
"numpy.random.randn",
"numpy.random.randint"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mpsilfve/fairseq | [
"eb228ee74c6bc9803eb7dbd398d8cda16c55ccd2",
"eb228ee74c6bc9803eb7dbd398d8cda16c55ccd2"
] | [
"fairseq/optim/adam.py",
"fairseq/modules/linearized_convolution.py"
] | [
"# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport logging\nimport math\nfrom collections.abc import Collection\nfrom dataclasses import dataclass, field\nfrom typing import List\n\nimport torch\nimport torch.distributed as dist\nimport torch.optim\nfrom fairseq.dataclass import FairseqDataclass\nfrom fairseq.optim import FairseqOptimizer, register_optimizer\nfrom fairseq.optim.fused_adam import get_fused_adam_class\nfrom omegaconf import II, DictConfig\n\n\nlogger = logging.getLogger(__name__)\n\n\n@dataclass\nclass FairseqAdamConfig(FairseqDataclass):\n adam_betas: str = field(\n default=\"(0.9, 0.999)\", metadata={\"help\": \"betas for Adam optimizer\"}\n )\n adam_eps: float = field(\n default=1e-8, metadata={\"help\": \"epsilon for Adam optimizer\"}\n )\n weight_decay: float = field(default=0.0, metadata={\"help\": \"weight decay\"})\n use_old_adam: bool = field(\n default=False, metadata={\"help\": \"Use fairseq.optim.adam.Adam\"}\n )\n # TODO common vars below in parent\n tpu: bool = II(\"common.tpu\")\n lr: List[float] = II(\"optimization.lr\")\n\n\n@register_optimizer(\"adam\", dataclass=FairseqAdamConfig)\nclass FairseqAdam(FairseqOptimizer):\n \"\"\"Adam optimizer for fairseq.\n\n Important note: this optimizer corresponds to the \"AdamW\" variant of\n Adam in its weight decay behavior. As such, it is most closely\n analogous to torch.optim.AdamW from PyTorch.\n \"\"\"\n\n def __init__(self, cfg: DictConfig, params):\n super().__init__(cfg)\n fused_adam_cls = get_fused_adam_class()\n use_fused_adam = (\n not getattr(cfg, \"use_old_adam\", False)\n and fused_adam_cls is not None\n and torch.cuda.is_available()\n )\n if getattr(cfg, \"tpu\", False):\n # on TPUs we use the Adam defined here, since it\n # automatically casts gradients to FP32\n self._optimizer = Adam(params, **self.optimizer_config)\n elif use_fused_adam:\n logger.info(\"using FusedAdam\")\n self._optimizer = fused_adam_cls(params, **self.optimizer_config)\n else:\n self._optimizer = Adam(params, **self.optimizer_config)\n\n @property\n def optimizer_config(self):\n \"\"\"\n Return a kwarg dictionary that will be used to override optimizer\n args stored in checkpoints. This allows us to load a checkpoint and\n resume training using a different set of optimizer args, e.g., with a\n different learning rate.\n \"\"\"\n return {\n \"lr\": self.cfg.lr[0]\n if isinstance(self.cfg.lr, Collection)\n else self.cfg.lr,\n \"betas\": eval(self.cfg.adam_betas),\n \"eps\": self.cfg.adam_eps,\n \"weight_decay\": self.cfg.weight_decay,\n }\n\n def average_params(self):\n \"\"\"Reduce Params is only used during BMUF distributed training.\"\"\"\n state_dict = self.optimizer.state_dict()\n total_gpus = float(dist.get_world_size())\n\n for _, value in state_dict[\"state\"].items():\n value[\"exp_avg\"] /= total_gpus\n value[\"exp_avg_sq\"] /= total_gpus\n dist.all_reduce(value[\"exp_avg\"], op=dist.ReduceOp.SUM)\n dist.all_reduce(value[\"exp_avg_sq\"], op=dist.ReduceOp.SUM)\n\n\nclass Adam(torch.optim.Optimizer):\n r\"\"\"Implements Adam algorithm.\n\n This implementation is modified from torch.optim.Adam based on:\n `Fixed Weight Decay Regularization in Adam`\n (see https://arxiv.org/abs/1711.05101)\n\n It has been proposed in `Adam: A Method for Stochastic Optimization`_.\n\n Args:\n params (iterable): iterable of parameters to optimize or dicts defining\n parameter groups\n lr (float, optional): learning rate (default: 1e-3)\n betas (Tuple[float, float], optional): coefficients used for computing\n running averages of gradient and its square (default: (0.9, 0.999))\n eps (float, optional): term added to the denominator to improve\n numerical stability (default: 1e-8)\n weight_decay (float, optional): weight decay (L2 penalty) (default: 0)\n amsgrad (boolean, optional): whether to use the AMSGrad variant of this\n algorithm from the paper `On the Convergence of Adam and Beyond`_\n\n .. _Adam\\: A Method for Stochastic Optimization:\n https://arxiv.org/abs/1412.6980\n .. _On the Convergence of Adam and Beyond:\n https://openreview.net/forum?id=ryQu7f-RZ\n \"\"\"\n\n def __init__(\n self,\n params,\n lr=1e-3,\n betas=(0.9, 0.999),\n eps=1e-8,\n weight_decay=0,\n amsgrad=False,\n ):\n defaults = dict(\n lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, amsgrad=amsgrad\n )\n super(Adam, self).__init__(params, defaults)\n\n @property\n def supports_memory_efficient_fp16(self):\n return True\n\n @property\n def supports_flat_params(self):\n return True\n\n def step(self, closure=None):\n \"\"\"Performs a single optimization step.\n\n Args:\n closure (callable, optional): A closure that reevaluates the model\n and returns the loss.\n \"\"\"\n loss = None\n if closure is not None:\n loss = closure()\n\n for group in self.param_groups:\n for p in group[\"params\"]:\n if p.grad is None:\n continue\n grad = p.grad.data\n if grad.dtype in {torch.float16, torch.bfloat16}:\n grad = grad.float()\n if grad.is_sparse:\n raise RuntimeError(\n \"Adam does not support sparse gradients, please consider SparseAdam instead\"\n )\n amsgrad = group.get(\"amsgrad\", False)\n\n p_data_fp32 = p.data\n if p.data.dtype in {torch.float16, torch.bfloat16}:\n p_data_fp32 = p_data_fp32.float()\n\n state = self.state[p]\n\n # State initialization\n if len(state) == 0:\n state[\"step\"] = 0\n # Exponential moving average of gradient values\n state[\"exp_avg\"] = torch.zeros_like(p_data_fp32)\n # Exponential moving average of squared gradient values\n state[\"exp_avg_sq\"] = torch.zeros_like(p_data_fp32)\n if amsgrad:\n # Maintains max of all exp. moving avg. of sq. grad. values\n state[\"max_exp_avg_sq\"] = torch.zeros_like(p_data_fp32)\n else:\n state[\"exp_avg\"] = state[\"exp_avg\"].to(p_data_fp32)\n state[\"exp_avg_sq\"] = state[\"exp_avg_sq\"].to(p_data_fp32)\n if amsgrad:\n state[\"max_exp_avg_sq\"] = state[\"max_exp_avg_sq\"].to(\n p_data_fp32\n )\n\n exp_avg, exp_avg_sq = state[\"exp_avg\"], state[\"exp_avg_sq\"]\n if amsgrad:\n max_exp_avg_sq = state[\"max_exp_avg_sq\"]\n beta1, beta2 = group[\"betas\"]\n\n state[\"step\"] += 1\n\n # Decay the first and second moment running average coefficient\n exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1)\n exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2)\n if amsgrad:\n # Maintains the maximum of all 2nd moment running avg. till now\n torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq)\n # Use the max. for normalizing running avg. of gradient\n denom = max_exp_avg_sq.sqrt().add_(group[\"eps\"])\n else:\n denom = exp_avg_sq.sqrt().add_(group[\"eps\"])\n\n bias_correction1 = 1 - beta1 ** state[\"step\"]\n bias_correction2 = 1 - beta2 ** state[\"step\"]\n step_size = group[\"lr\"] * math.sqrt(bias_correction2) / bias_correction1\n\n if group[\"weight_decay\"] != 0:\n p_data_fp32.add_(\n p_data_fp32, alpha=-group[\"weight_decay\"] * group[\"lr\"]\n )\n\n p_data_fp32.addcdiv_(exp_avg, denom, value=-step_size)\n\n if p.data.dtype in {torch.float16, torch.bfloat16}:\n p.data.copy_(p_data_fp32)\n\n return loss\n",
"# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport torch\nimport torch.nn.functional as F\nfrom fairseq import utils\nfrom fairseq.incremental_decoding_utils import with_incremental_state\n\nfrom .conv_tbc import ConvTBC\n\nfrom typing import Dict, Optional\nfrom torch import Tensor\n\n@with_incremental_state\nclass LinearizedConvolution(ConvTBC):\n \"\"\"An optimized version of nn.Conv1d.\n\n At training time, this module uses ConvTBC, which is an optimized version\n of Conv1d. At inference time, it optimizes incremental generation (i.e.,\n one time step at a time) by replacing the convolutions with linear layers.\n Note that the input order changes from training to inference.\n \"\"\"\n\n def __init__(self, in_channels, out_channels, kernel_size, **kwargs):\n super().__init__(in_channels, out_channels, kernel_size, **kwargs)\n self._linearized_weight = None\n self.register_backward_hook(self._clear_linearized_weight)\n\n def state_dict(self, destination=None, prefix=\"\", keep_vars=False):\n state = ConvTBC.state_dict(self, destination, prefix, keep_vars=keep_vars)\n # don't store redundant _linearized_weight in checkpoints\n if prefix + \"_linearized_weight\" in state:\n del state[prefix + \"_linearized_weight\"]\n return state\n\n def upgrade_state_dict_named(self, state_dict, name):\n prefix = name + \".\" if name != \"\" else \"\"\n if prefix + \"_linearized_weight\" in state_dict:\n del state_dict[prefix + \"_linearized_weight\"]\n\n @torch.jit.export\n def forward(self, input, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None):\n \"\"\"\n Args:\n incremental_state: Used to buffer signal; if not None, then input is\n expected to contain a single frame. If the input order changes\n between time steps, call reorder_incremental_state.\n Input:\n Time x Batch x Channel during training\n Batch x Time x Channel during inference\n \"\"\"\n if incremental_state is None:\n output = self.conv_tbc(input)\n if self.kernel_size[0] > 1 and self.padding[0] > 0:\n # remove future timesteps added by padding\n output = output[: -self.padding[0], :, :]\n return output\n\n # reshape weight\n weight = self._get_linearized_weight()\n kw = self.kernel_size[0]\n\n bsz = input.size(0) # input: bsz x len x dim\n if kw > 1:\n input = input.data\n input_buffer = self._get_input_buffer(incremental_state)\n if input_buffer is None:\n input_buffer = input.new(bsz, kw, input.size(2)).zero_()\n self._set_input_buffer(incremental_state, input_buffer)\n else:\n # shift buffer\n input_buffer[:, :-1, :] = input_buffer[:, 1:, :].clone()\n # append next input\n input_buffer[:, -1, :] = input[:, -1, :]\n input = input_buffer\n with torch.no_grad():\n output = F.linear(input.view(bsz, -1), weight, self.bias)\n return output.view(bsz, 1, -1)\n\n @torch.jit.unused\n def reorder_incremental_state(self, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]], new_order):\n input_buffer = self._get_input_buffer(incremental_state)\n if input_buffer is not None:\n input_buffer = input_buffer.index_select(0, new_order)\n self._set_input_buffer(incremental_state, input_buffer)\n\n @torch.jit.unused\n def _get_input_buffer(self, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]]):\n return utils.get_incremental_state(self, incremental_state, \"input_buffer\")\n\n @torch.jit.unused\n def _set_input_buffer(self, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]], new_buffer):\n return utils.set_incremental_state(\n self, incremental_state, \"input_buffer\", new_buffer\n )\n\n @torch.jit.unused\n def _get_linearized_weight(self):\n if self._linearized_weight is None:\n kw = self.kernel_size[0]\n weight = self.weight.transpose(2, 1).transpose(1, 0).contiguous()\n assert weight.size() == (self.out_channels, kw, self.in_channels)\n return weight.view(self.out_channels, -1)\n return self._linearized_weight\n\n @torch.jit.unused\n def _clear_linearized_weight(self, *args):\n self._linearized_weight = None\n"
] | [
[
"torch.max",
"torch.zeros_like",
"torch.cuda.is_available",
"torch.distributed.get_world_size",
"torch.distributed.all_reduce"
],
[
"torch.no_grad"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
sdjohnson-astro/redshifting | [
"6073123bf3ea6e48de410d99521e418abc980c99",
"6073123bf3ea6e48de410d99521e418abc980c99"
] | [
"cubs_compare_spec1D.py",
"redshift.py"
] | [
"#!/usr/bin/env python\nimport glob\nimport argparse\nfrom astropy.table import Table\nimport numpy as np\n\n# Set up the command line argument parser\nparser = argparse.ArgumentParser(description='Compare two versions of spec1D files from CUBS IMACS or LDSS3')\nparser.add_argument('-d1', metavar='directory 1', type=str, help='Parent directory 1', required=True)\nparser.add_argument('-d2', metavar='directory 2', type=str, help='Parent directory 2', required=True)\nparser.add_argument('-m', metavar='maskname', type=str, help='mask name', required=True)\n\nargs = parser.parse_args()\n\n\nmask = Table.read('{}/{}_spec1D/{}_objects.fits'.format(args.d1, args.m, args.m))\nmask['maxabsDflux'] = 0.0\n\nfor object in mask:\n \n try:\n \n filename1 = '{}/{}_spec1D/{}_{}_{}.fits'.format(args.d1, args.m, args.m, object['row'], object['id'])\n spec1 = Table.read(filename1)\n \n filename2 = '{}/{}_spec1D/{}_{}_{}.fits'.format(args.d2, args.m, args.m, object['row'], object['id'])\n spec2 = Table.read(filename2)\n \n print(np.max(np.abs(spec1['flux'] - spec2['flux'])))\n object['maxabsDflux'] = np.max(np.abs(spec1['flux'] - spec2['flux']))\n \n except:\n \n print('file not found')\n \nprint(mask)\n\nmaxabsDiff = np.max(mask['maxabsDflux'])\n\nif maxabsDiff > 0.0:\n \n print('Differences found!!!!!!!!!!!')\n \nelse:\n \n print('No difference -- ok')",
"import os\nimport numpy as np\n#from astropy.table import Table, Column, Row\nfrom scipy.interpolate import interp1d, InterpolatedUnivariateSpline, UnivariateSpline\nfrom lmfit import Model, Parameters\nimport corner\nimport time\nimport warnings\nfrom matplotlib import pyplot as plt\n\n# Set constants\nc_kms = 299792.458\n\n# Read in the galaxy eigenspectra\neigen_galaxy = np.load(os.environ['REDSHIFTING'] + '/eigenspectra/eigen_galaxy.npy')\n\n# Interpolate the Eigenspectra for galaxies\neigen_galaxy1_interp = interp1d(eigen_galaxy['wave'], eigen_galaxy['flux1'], fill_value=(-999.0, -999.0), assume_sorted=False, bounds_error=False)\neigen_galaxy2_interp = interp1d(eigen_galaxy['wave'], eigen_galaxy['flux2'], fill_value=(-999.0, -999.0), assume_sorted=False, bounds_error=False)\neigen_galaxy3_interp = interp1d(eigen_galaxy['wave'], eigen_galaxy['flux3'], fill_value=(-999.0, -999.0), assume_sorted=False, bounds_error=False)\neigen_galaxy4_interp = interp1d(eigen_galaxy['wave'], eigen_galaxy['flux4'], fill_value=(-999.0, -999.0), assume_sorted=False, bounds_error=False)\n\n\n# Read in the quasar eigenspectra\neigen_qso = np.load(os.environ['REDSHIFTING'] + '/eigenspectra/eigen_qso.npy')\neigen_qso1_interp = interp1d(eigen_qso['wave'], eigen_qso['flux1'], fill_value=(-999.0, -999.0), assume_sorted=False, bounds_error=False)\neigen_qso2_interp = interp1d(eigen_qso['wave'], eigen_qso['flux2'], fill_value=(-999.0, -999.0), assume_sorted=False, bounds_error=False)\neigen_qso3_interp = interp1d(eigen_qso['wave'], eigen_qso['flux3'], fill_value=(-999.0, -999.0), assume_sorted=False, bounds_error=False)\neigen_qso4_interp = interp1d(eigen_qso['wave'], eigen_qso['flux4'], fill_value=(-999.0, -999.0), assume_sorted=False, bounds_error=False)\n\n\n# Read in the star eigenspectra\neigen_star = np.load(os.environ['REDSHIFTING'] + '/eigenspectra/eigen_star.npy')\neigen_star1_interp = interp1d(eigen_star['wave'], eigen_star['flux1'], fill_value=(-999.0, -999.0), assume_sorted=False, bounds_error=False)\neigen_star2_interp = interp1d(eigen_star['wave'], eigen_star['flux2'], fill_value=(-999.0, -999.0), assume_sorted=False, bounds_error=False)\neigen_star3_interp = interp1d(eigen_star['wave'], eigen_star['flux3'], fill_value=(-999.0, -999.0), assume_sorted=False, bounds_error=False)\neigen_star4_interp = interp1d(eigen_star['wave'], eigen_star['flux4'], fill_value=(-999.0, -999.0), assume_sorted=False, bounds_error=False)\neigen_star5_interp = interp1d(eigen_star['wave'], eigen_star['flux5'], fill_value=(-999.0, -999.0), assume_sorted=False, bounds_error=False)\neigen_star6_interp = interp1d(eigen_star['wave'], eigen_star['flux6'], fill_value=(-999.0, -999.0), assume_sorted=False, bounds_error=False)\neigen_star7_interp = interp1d(eigen_star['wave'], eigen_star['flux7'], fill_value=(-999.0, -999.0), assume_sorted=False, bounds_error=False)\neigen_star8_interp = interp1d(eigen_star['wave'], eigen_star['flux8'], fill_value=(-999.0, -999.0), assume_sorted=False, bounds_error=False)\neigen_star9_interp = interp1d(eigen_star['wave'], eigen_star['flux9'], fill_value=(-999.0, -999.0), assume_sorted=False, bounds_error=False)\neigen_star10_interp = interp1d(eigen_star['wave'], eigen_star['flux10'], fill_value=(-999.0, -999.0), assume_sorted=False, bounds_error=False)\neigen_star11_interp = interp1d(eigen_star['wave'], eigen_star['flux11'], fill_value=(-999.0, -999.0), assume_sorted=False, bounds_error=False)\n\n\n# Read in quasar HW templates\nquasar_HW = np.load(os.environ['REDSHIFTING'] + '/eigenspectra/quasar_HW.npy')\nquasar_HW1_interp = interp1d(quasar_HW['wave'], quasar_HW['flux1'], fill_value=(-999.0, -999.0), assume_sorted=False, bounds_error=False)\nquasar_HW2_interp = interp1d(quasar_HW['wave'], quasar_HW['flux2'], fill_value=(-999.0, -999.0), assume_sorted=False, bounds_error=False)\n\n\n\n# Read in LATIS z=2-3 galaxy \neigen_latis = np.load(os.environ['REDSHIFTING'] + '/eigenspectra/eigen_latis.npy')\neigen_latis1_interp = interp1d(eigen_latis['wave'], eigen_latis['flux1'], fill_value=(-999.0, -999.0), assume_sorted=False, bounds_error=False)\neigen_latis2_interp = interp1d(eigen_latis['wave'], eigen_latis['flux2'], fill_value=(-999.0, -999.0), assume_sorted=False, bounds_error=False)\neigen_latis3_interp = interp1d(eigen_latis['wave'], eigen_latis['flux3'], fill_value=(-999.0, -999.0), assume_sorted=False, bounds_error=False)\neigen_latis4_interp = interp1d(eigen_latis['wave'], eigen_latis['flux4'], fill_value=(-999.0, -999.0), assume_sorted=False, bounds_error=False)\neigen_latis5_interp = interp1d(eigen_latis['wave'], eigen_latis['flux5'], fill_value=(-999.0, -999.0), assume_sorted=False, bounds_error=False)\n\n\n# calculat a redshift plus a velocity in km/s\ndef dz(z, dv):\n znew = dv/c_kms*(1.0 + z) + z\n\n return znew\n\n# calculate the velocity difference in km/s between two redshfits\ndef dv(z0, z1):\n \n dv = (z1 - z0)/(1.0 + z0)*c_kms\n \n return dv\n\n\n# Return a galaxy spectrum model evaluated on wavelength grid wave\n# at redshift z with SDSS eigenspectra coefficients eigen1, eigen2, eigen3, eigen4\n# Optional low-order polynomials have coffieicnts fluxcal0, fluxcal1, fluxcal2\n# where 0, 1, and 2 are for 0th, 1st, and 2nd order polynomials\n# There are better ways to implement this last option\ndef eigensum_galaxy(wave, z, eigen1, eigen2, eigen3, eigen4,\n fluxcal0, fluxcal1, fluxcal2):\n \n wave_zp1 = wave/(1.0 + z)\n flux = eigen_galaxy1_interp(wave_zp1)*eigen1 \\\n + eigen_galaxy2_interp(wave_zp1)*eigen2 \\\n + eigen_galaxy3_interp(wave_zp1)*eigen3 \\\n + eigen_galaxy4_interp(wave_zp1)*eigen4 \\\n + fluxcal0 + fluxcal1*wave + fluxcal2*wave**2\n \n return flux\n \n\n# Same as eigensum_galaxy but with the SDSS quasar eigenspectra\ndef eigensum_qso(wave, z, eigen1, eigen2, eigen3, eigen4,\n fluxcal0, fluxcal1, fluxcal2):\n \n wave_zp1 = wave/(1.0 + z)\n flux = eigen_qso1_interp(wave_zp1)*eigen1 \\\n + eigen_qso2_interp(wave_zp1)*eigen2 \\\n + eigen_qso3_interp(wave_zp1)*eigen3 \\\n + eigen_qso4_interp(wave_zp1)*eigen4 \\\n + fluxcal0 + fluxcal1*wave + fluxcal2*wave**2\n \n return flux\n \n# Same as eigensum_galaxy but with the LATIS galaxy templates\ndef eigensum_latis(wave, z, eigen1, eigen2, eigen3, eigen4, eigen5, \n fluxcal0, fluxcal1, fluxcal2):\n \n wave_zp1 = wave/(1.0 + z)\n flux = eigen_latis1_interp(wave_zp1)*eigen1 \\\n + eigen_latis2_interp(wave_zp1)*eigen2 \\\n + eigen_latis3_interp(wave_zp1)*eigen3 \\\n + eigen_latis4_interp(wave_zp1)*eigen4 \\\n + eigen_latis5_interp(wave_zp1)*eigen5 \\\n + fluxcal0 + fluxcal1*wave + fluxcal2*wave**2\n \n return flux\n\n# Same as eigensum_galaxy but with SDSS stellar spectral templates.\ndef eigensum_star(wave, z, eigen1, eigen2, eigen3, eigen4, eigen5, eigen6, eigen7,\n eigen8, eigen9, eigen10, eigen11,\n fluxcal0, fluxcal1, fluxcal2):\n \n wave_zp1 = wave/(1.0 + z)\n flux = eigen_star1_interp(wave_zp1)*eigen1 \\\n + eigen_star2_interp(wave_zp1)*eigen2 \\\n + eigen_star3_interp(wave_zp1)*eigen3 \\\n + eigen_star4_interp(wave_zp1)*eigen4 \\\n + eigen_star5_interp(wave_zp1)*eigen5 \\\n + eigen_star6_interp(wave_zp1)*eigen6 \\\n + eigen_star7_interp(wave_zp1)*eigen7 \\\n + eigen_star8_interp(wave_zp1)*eigen8 \\\n + eigen_star9_interp(wave_zp1)*eigen9 \\\n + eigen_star10_interp(wave_zp1)*eigen10 \\\n + eigen_star11_interp(wave_zp1)*eigen11 \\\n + fluxcal0 + fluxcal1*wave + fluxcal2*wave**2\n \n return flux\n\n# Get best-fit galaxy model at a redshift. flux calibration polynomials not currently included\ndef fitatz_galaxy(spec, z):\n \n \n # Evaluate eigenspecra at needed redshifts\n \n constant = np.ones(len(spec))\n linear = np.arange(len(spec))\n square = np.arange(len(spec))**2\n \n wave_zp1 = spec['wave']/(1.0 + z)\n eigen1 = eigen_galaxy1_interp(wave_zp1)\n eigen2 = eigen_galaxy2_interp(wave_zp1)\n eigen3 = eigen_galaxy3_interp(wave_zp1)\n eigen4 = eigen_galaxy4_interp(wave_zp1)\n \n mask = (eigen1 != -999.0).astype(float)*spec['mask']\n \n At = np.matrix([eigen1, eigen2, eigen3, eigen4, constant, linear, square])\n A = At.transpose()\n \n one_over_sigmasquared = 1/spec['error']**2\n one_over_sigmasquared[~np.isfinite(one_over_sigmasquared)] = 0.0\n Ci = np.diag(one_over_sigmasquared)\n A = At.transpose()\n Y = np.matrix(spec['flux']).transpose()\n \n\n\n AtCi = np.matrix([eigen1*one_over_sigmasquared*mask,\n eigen2*one_over_sigmasquared*mask,\n eigen3*one_over_sigmasquared*mask,\n eigen4*one_over_sigmasquared*mask,\n constant*one_over_sigmasquared*mask,\n linear*one_over_sigmasquared*mask,\n square*one_over_sigmasquared*mask])#At*Ci\n eigenvalues = np.linalg.inv(AtCi*A)*AtCi*Y\n eigenvalues = eigenvalues.getA1()\n\n model = eigenvalues[0]*eigen1 + eigenvalues[1]*eigen2 \\\n + eigenvalues[2]*eigen3 + eigenvalues[3]*eigen4 \\\n + eigenvalues[4]*constant + eigenvalues[5]*linear + eigenvalues[6]*square\n \n chi2 = np.sum(np.square(spec['flux'] - model)*one_over_sigmasquared*mask)\n dof = np.sum(mask) - 7\n chi2_pdf = chi2/dof\n\n return (eigenvalues, model, chi2_pdf)\n \n \n \ndef fitatz_latis(spec, z):\n \n \n # Evaluate eigenspecra at needed redshifts\n \n constant = np.ones(len(spec))\n linear = np.arange(len(spec))\n square = np.arange(len(spec))**2\n \n wave_zp1 = spec['wave']/(1.0 + z)\n eigen1 = eigen_latis1_interp(wave_zp1)\n eigen2 = eigen_latis2_interp(wave_zp1)\n eigen3 = eigen_latis3_interp(wave_zp1)\n eigen4 = eigen_latis4_interp(wave_zp1)\n eigen5 = eigen_latis5_interp(wave_zp1)\n \n mask = (eigen1 != -999.0).astype(float)*spec['mask']\n \n At = np.matrix([eigen1, eigen2, eigen3, eigen4, eigen5, constant, linear, square])\n A = At.transpose()\n \n one_over_sigmasquared = 1/spec['error']**2\n one_over_sigmasquared[~np.isfinite(one_over_sigmasquared)] = 0.0\n Ci = np.diag(one_over_sigmasquared)\n A = At.transpose()\n Y = np.matrix(spec['flux']).transpose()\n \n\n\n AtCi = np.matrix([eigen1*one_over_sigmasquared*mask,\n eigen2*one_over_sigmasquared*mask,\n eigen3*one_over_sigmasquared*mask,\n eigen4*one_over_sigmasquared*mask,\n eigen5*one_over_sigmasquared*mask,\n constant*one_over_sigmasquared*mask,\n linear*one_over_sigmasquared*mask,\n square*one_over_sigmasquared*mask])#At*Ci\n eigenvalues = np.linalg.inv(AtCi*A)*AtCi*Y\n eigenvalues = eigenvalues.getA1()\n\n model = eigenvalues[0]*eigen1 + eigenvalues[1]*eigen2 \\\n + eigenvalues[2]*eigen3 + eigenvalues[3]*eigen4 + eigenvalues[4]*eigen5 \\\n + eigenvalues[5]*constant + eigenvalues[6]*linear + eigenvalues[7]*square\n model = model*mask \n chi2 = np.sum(np.square(spec['flux'] - model)*one_over_sigmasquared*mask)\n dof = np.sum(mask) - 8\n chi2_pdf = chi2/dof\n\n return (eigenvalues, model, chi2_pdf)\n \n \n \n # Same as fitatz_galaxy but for quasars \ndef fitatz_qso(spec, z):\n \n constant = np.ones(len(spec))\n linear = np.arange(len(spec))\n square = np.arange(len(spec))**2\n \n # Evaluate eigenspecra at needed redshifts\n wave_zp1 = spec['wave']/(1.0 + z)\n eigen1 = eigen_qso1_interp(wave_zp1)\n eigen2 = eigen_qso2_interp(wave_zp1)\n eigen3 = eigen_qso3_interp(wave_zp1)\n eigen4 = eigen_qso4_interp(wave_zp1)\n \n mask = (eigen1 != -999.0).astype(float)\n \n At = np.matrix([eigen1, eigen2, eigen3, eigen4, constant, linear, square])\n C = np.diag(spec['error']**2)\n one_over_sigmasquared = 1/spec['error']**2\n one_over_sigmasquared[~np.isfinite(one_over_sigmasquared)] = 0.0\n \n index = np.where((spec['mask'] == 0) | (spec['error'] == 0.0))\n one_over_sigmasquared[index] = 0.0\n \n Ci = np.diag(1/spec['error']**2)\n A = At.transpose()\n Y = np.matrix(spec['flux']).transpose()\n \n \n \n AtCi = np.matrix([eigen1*one_over_sigmasquared*mask, \n eigen2*one_over_sigmasquared*mask,\n eigen3*one_over_sigmasquared*mask,\n eigen4*one_over_sigmasquared*mask,\n constant*one_over_sigmasquared*mask,\n linear*one_over_sigmasquared*mask,\n square*one_over_sigmasquared*mask])#At*Ci\n eigenvalues = np.linalg.inv(AtCi*A)*AtCi*Y\n eigenvalues = eigenvalues.getA1()\n \n model = eigenvalues[0]*eigen1 + eigenvalues[1]*eigen2 \\\n + eigenvalues[2]*eigen3 + eigenvalues[3]*eigen4 \\\n + eigenvalues[4]*constant + eigenvalues[5]*linear + eigenvalues[6]*square\n \n chi2 = np.sum(np.square(spec['flux'] - model)*one_over_sigmasquared*mask)\n dof = np.sum(mask) - 7.0\n chi2_pdf = chi2/dof\n \n return (eigenvalues, model, chi2_pdf)\n \n # Same as fitatz_galaxy but for stars \ndef fitatz_star(spec, z):\n \n constant = np.ones(len(spec))\n linear = np.arange(len(spec))\n square = np.arange(len(spec))**2\n \n # Precompute some matrices\n C = np.diag(spec['error']**2)\n Ci = np.diag(1/spec['error']**2)\n one_over_sigmasquared = 1/spec['error']**2\n one_over_sigmasquared[~np.isfinite(one_over_sigmasquared)] = 0.0\n \n index = np.where((spec['mask'] == 0) | (spec['error'] == 0.0))\n one_over_sigmasquared[index] = 0.0\n \n wave = spec['wave']\n flux = spec['flux']\n error = spec['error']\n Y = np.matrix(flux).transpose()\n \n # Evaluate eigenspecra at needed redshifts\n wave_zp1 = wave/(1.0 + z)\n eigen1 = eigen_star1_interp(wave_zp1)\n eigen2 = eigen_star2_interp(wave_zp1)\n eigen3 = eigen_star3_interp(wave_zp1)\n eigen4 = eigen_star4_interp(wave_zp1)\n eigen5 = eigen_star5_interp(wave_zp1)\n eigen6 = eigen_star6_interp(wave_zp1)\n eigen7 = eigen_star7_interp(wave_zp1)\n eigen8 = eigen_star8_interp(wave_zp1)\n eigen9 = eigen_star9_interp(wave_zp1)\n eigen10 = eigen_star10_interp(wave_zp1)\n eigen11 = eigen_star11_interp(wave_zp1)\n \n mask = (eigen1 != -999.0).astype(float)\n \n \n \n At = np.matrix([eigen1, eigen2, eigen3, eigen4, eigen5, eigen6, eigen7,\n eigen8, eigen9, eigen10, eigen11, constant, linear, square])\n A = At.transpose()\n \n \n \n AtCi = np.matrix([eigen1*one_over_sigmasquared*mask,\n eigen2*one_over_sigmasquared*mask,\n eigen3*one_over_sigmasquared*mask,\n eigen4*one_over_sigmasquared*mask,\n eigen5*one_over_sigmasquared*mask,\n eigen6*one_over_sigmasquared*mask,\n eigen7*one_over_sigmasquared*mask,\n eigen8*one_over_sigmasquared*mask,\n eigen9*one_over_sigmasquared*mask,\n eigen10*one_over_sigmasquared*mask,\n eigen11*one_over_sigmasquared*mask,\n constant*one_over_sigmasquared*mask,\n linear*one_over_sigmasquared*mask,\n square*one_over_sigmasquared*mask])#At*Ci\n eigenvalues = np.linalg.inv(AtCi*A)*AtCi*Y\n eigenvalues = eigenvalues.getA1()\n \n model = eigenvalues[0]*eigen1 + eigenvalues[1]*eigen2 \\\n + eigenvalues[2]*eigen3 + eigenvalues[3]*eigen4 \\\n + eigenvalues[4]*eigen5 + eigenvalues[5]*eigen6 \\\n + eigenvalues[6]*eigen7 + eigenvalues[7]*eigen8 \\\n + eigenvalues[8]*eigen9 + eigenvalues[9]*eigen10 \\\n + eigenvalues[10]*eigen11 \\\n + eigenvalues[11]*constant + eigenvalues[12]*linear + eigenvalues[13]*square\n \n model = model*mask \n chi2 = np.sum(np.square(flux - model)*one_over_sigmasquared*mask)\n \n dof = np.sum(mask) - 13.0\n chi2_pdf = chi2/dof\n \n \n return (eigenvalues, model, chi2_pdf)\n \n \n # Same as fitatz_galaxy but for quasars with Hewitt and Wild cross-correlation template and flux calibration polynomials\ndef fitatz_qso_hw_poly(spec, z):\n \n \n C = np.diag(spec['error']**2)\n Ci = np.diag(1/spec['error']**2)\n one_over_sigmasquared = 1/spec['error']**2\n \n index = np.where((spec['mask'] == 0) | (spec['error'] == 0.0))\n one_over_sigmasquared[index] = 0.0\n \n wave = spec['wave']\n flux = spec['flux']\n error = spec['error']\n Y = np.matrix(flux).transpose()\n \n \n constant = np.ones(len(spec))\n linear = np.arange(len(spec))\n square = np.arange(len(spec))**2\n \n \n wave_zp1 = wave/(1.0 + z)\n eigen1 = quasar_HW1_interp(wave_zp1)\n eigen2 = quasar_HW2_interp(wave_zp1)\n mask = (eigen1 != -999.0).astype(float)*spec['mask']\n\n\n At = np.matrix([eigen1, eigen2, constant, linear, square])\n A = At.transpose()\n \n\n\n AtCi = np.matrix([eigen1*one_over_sigmasquared,\n eigen2*one_over_sigmasquared,\n constant*one_over_sigmasquared,\n linear*one_over_sigmasquared,\n square*one_over_sigmasquared])#At*Ci\n eigenvalues = np.linalg.inv(AtCi*A)*AtCi*Y\n eigenvalues = eigenvalues.getA1()\n\n model_qso = eigenvalues[0]*eigen1 + eigenvalues[1]*eigen2\n model_poly = eigenvalues[2]*constant + eigenvalues[3]*linear + eigenvalues[4]*square\n model = model_qso + model_poly\n \n chi2 = np.sum(np.square(flux - model)*one_over_sigmasquared)\n dof = np.sum(mask) - 5.0\n chi2_pdf = chi2/dof\n \n return (eigenvalues, model, chi2_pdf)\n \n \n\n# Return chi2, chi2pdf, and best-fit coefficients for stellar model fit to spectrum over a grid of wavelengths from zim to zmax with steps of dz\n# The spectrum *must* my a numpy nparray or equivalent\n# with wave, flux, error, and mask columns\n# wavelength in angstroms\n# flux and error in flambda\n# mask = 1 for good data, = 0 for bad data \ndef findz_star(spec, zmin=-0.005, zmax=0.005, dz=0.000025):\n \n zs = np.arange(zmin, zmax, dz)\n zeros = np.zeros(len(zs))\n indices = np.arange(0, len(zs), dtype=int)\n redshifts = np.ndarray(len(zs),\n dtype={'names':('index', 'z', 'chi2', 'chi2_pdf', 'eigen1', 'eigen2',\n 'eigen3', 'eigen4', 'eigen5', 'eigen6', 'eigen7',\n 'eigen8', 'eigen9', 'eigen10', 'eigen11',\n 'fluxcal0', 'fluxcal1', 'fluxcal2'), \n 'formats':(int, float, float, float, float, float,\n float, float, float, float, float,\n float, float, float, float,\n float, float, float)})\n redshifts['index'] = indices\n redshifts['z'] = zs \n redshifts['eigen1'] = 0.0 \n redshifts['eigen2'] = 0.0\n redshifts['eigen3'] = 0.0\n redshifts['eigen4'] = 0.0\n redshifts['eigen5'] = 0.0\n redshifts['eigen6'] = 0.0\n redshifts['eigen7'] = 0.0\n redshifts['eigen8'] = 0.0\n redshifts['eigen9'] = 0.0\n redshifts['eigen10'] = 0.0\n redshifts['eigen11'] = 0.0\n redshifts['fluxcal0'] = 0.0\n redshifts['fluxcal1'] = 0.0\n redshifts['fluxcal2'] = 0.0\n \n constant = np.ones(len(spec))\n linear = np.arange(len(spec))\n square = np.arange(len(spec))**2\n \n #redshifts = Table(redshifts)\n \n # Precompute some matrices\n C = np.diag(spec['error']**2)\n Ci = np.diag(1/spec['error']**2)\n one_over_sigmasquared = 1/spec['error']**2\n \n index = np.where((spec['mask'] == 0) | (spec['error'] == 0.0))\n one_over_sigmasquared[index] = 0.0\n \n wave = spec['wave']\n flux = spec['flux']\n error = spec['error']\n Y = np.matrix(flux).transpose()\n \n def fitatz_star_precompute(z):\n \n # Evaluate eigenspecra at needed redshifts\n wave_zp1 = wave/(1.0 + z)\n eigen1 = eigen_star1_interp(wave_zp1)\n eigen2 = eigen_star2_interp(wave_zp1)\n eigen3 = eigen_star3_interp(wave_zp1)\n eigen4 = eigen_star4_interp(wave_zp1)\n eigen5 = eigen_star5_interp(wave_zp1)\n eigen6 = eigen_star6_interp(wave_zp1)\n eigen7 = eigen_star7_interp(wave_zp1)\n eigen8 = eigen_star8_interp(wave_zp1)\n eigen9 = eigen_star9_interp(wave_zp1)\n eigen10 = eigen_star10_interp(wave_zp1)\n eigen11 = eigen_star11_interp(wave_zp1)\n \n mask = (eigen1 != -999.0).astype(float)\n \n At = np.matrix([eigen1, eigen2, eigen3, eigen4, eigen5, eigen6, eigen7,\n eigen8, eigen9, eigen10, eigen11, constant, linear, square])\n A = At.transpose()\n \n \n \n AtCi = np.matrix([eigen1*one_over_sigmasquared*mask,\n eigen2*one_over_sigmasquared*mask,\n eigen3*one_over_sigmasquared*mask,\n eigen4*one_over_sigmasquared*mask,\n eigen5*one_over_sigmasquared*mask,\n eigen6*one_over_sigmasquared*mask,\n eigen7*one_over_sigmasquared*mask,\n eigen8*one_over_sigmasquared*mask,\n eigen9*one_over_sigmasquared*mask,\n eigen10*one_over_sigmasquared*mask,\n eigen11*one_over_sigmasquared*mask,\n constant*one_over_sigmasquared*mask,\n linear*one_over_sigmasquared*mask,\n square*one_over_sigmasquared*mask])#At*Ci\n eigenvalues = np.linalg.inv(AtCi*A)*AtCi*Y\n eigenvalues = eigenvalues.getA1()\n \n model = eigenvalues[0]*eigen1 + eigenvalues[1]*eigen2 \\\n + eigenvalues[2]*eigen3 + eigenvalues[3]*eigen4 \\\n + eigenvalues[4]*eigen5 + eigenvalues[5]*eigen6 \\\n + eigenvalues[6]*eigen7 + eigenvalues[7]*eigen8 \\\n + eigenvalues[8]*eigen9 + eigenvalues[9]*eigen10 \\\n + eigenvalues[10]*eigen11 \\\n + + eigenvalues[11]*constant + eigenvalues[12]*linear + eigenvalues[13]*square\n model = model*mask\n \n chi2 = np.sum(np.square(flux - model)*one_over_sigmasquared*mask)\n dof = np.sum(mask)*1.0 - 11.0\n chi2_pdf = chi2/dof\n return (eigenvalues, chi2, chi2_pdf)\n\n for redshift in redshifts:\n \n \n eigenvalues, chi2, chi2_pdf = fitatz_star_precompute(redshift['z'])\n redshift['chi2'] = chi2\n redshift['chi2_pdf'] = chi2_pdf\n redshift['eigen1'] = eigenvalues[0]\n redshift['eigen2'] = eigenvalues[1]\n redshift['eigen3'] = eigenvalues[2]\n redshift['eigen4'] = eigenvalues[3]\n redshift['eigen5'] = eigenvalues[4]\n redshift['eigen6'] = eigenvalues[5]\n redshift['eigen7'] = eigenvalues[6]\n redshift['eigen8'] = eigenvalues[7]\n redshift['eigen9'] = eigenvalues[8]\n redshift['eigen10'] = eigenvalues[9]\n redshift['eigen11'] = eigenvalues[10]\n redshifts['fluxcal0'] = eigenvalues[11]\n redshifts['fluxcal1'] = eigenvalues[12]\n redshifts['fluxcal2'] = eigenvalues[13]\n \n return redshifts\n \n \n# Return chi2, chi2pdf, and best-fit coefficients for quasar model fit to spectrum over a grid of wavelengths from zim to zmax with steps of dz\n# The spectrum *must* my a numpy nparray or equivalent\n# with wave, flux, error, and mask columns\n# wavelength in angstroms\n# flux and error in flambda\n# mask = 1 for good data, = 0 for bad data \n# Does *not* include IGM attenuation\ndef findz_qso(spec, zmin=0.0, zmax=3.0, dz=0.0005):\n \n \n \n # Create an array of Lya absorption\n zs = np.arange(zmin, zmax, dz)\n zeros = np.zeros(len(zs))\n indices = np.arange(0, len(zs), dtype=int)\n redshifts = np.ndarray(len(zs),\n dtype={'names':('index', 'z', 'chi2', 'chi2_pdf', 'eigen1', 'eigen2',\n 'eigen3', 'eigen4', 'fluxcal0', 'fluxcal1', 'fluxcal2'), \n 'formats':(int, float, float, float, float, float,\n float, float, float, float, float)})\n redshifts['index'] = indices\n redshifts['z'] = zs \n redshifts['eigen1'] = 0.0 \n redshifts['eigen2'] = 0.0\n redshifts['eigen3'] = 0.0\n redshifts['eigen4'] = 0.0\n redshifts['fluxcal0'] = 0.0\n redshifts['fluxcal1'] = 0.0\n redshifts['fluxcal2'] = 0.0\n \n constant = np.ones(len(spec))\n linear = np.arange(len(spec))\n square = np.arange(len(spec))**2\n \n \n \n # Precompute some matrices\n C = np.diag(spec['error']**2)\n Ci = np.diag(1/spec['error']**2)\n one_over_sigmasquared = 1/spec['error']**2\n \n index = np.where((spec['mask'] == 0) | (spec['error'] == 0.0))\n one_over_sigmasquared[index] = 0.0\n \n wave = spec['wave']\n flux = spec['flux']\n error = spec['error']\n Y = np.matrix(flux).transpose()\n \n def fitatz_qso_precompute(z):\n \n # Evaluate eigenspecra at needed redshifts\n wave_zp1 = wave/(1.0 + z)\n eigen1 = eigen_qso1_interp(wave_zp1)\n eigen2 = eigen_qso2_interp(wave_zp1)\n eigen3 = eigen_qso3_interp(wave_zp1)\n eigen4 = eigen_qso4_interp(wave_zp1)\n \n mask = (eigen1 != -999.0).astype(float)*spec['mask']\n \n At = np.matrix([eigen1, eigen2, eigen3, eigen4, constant, linear, square])\n A = At.transpose()\n \n \n \n AtCi = np.matrix([eigen1*one_over_sigmasquared*mask,\n eigen2*one_over_sigmasquared*mask,\n eigen3*one_over_sigmasquared*mask,\n eigen4*one_over_sigmasquared*mask,\n constant*one_over_sigmasquared*mask,\n linear*one_over_sigmasquared*mask,\n square*one_over_sigmasquared*mask])#At*Ci\n eigenvalues = np.linalg.inv(AtCi*A)*AtCi*Y\n eigenvalues = eigenvalues.getA1()\n \n model = eigenvalues[0]*eigen1 + eigenvalues[1]*eigen2 \\\n + eigenvalues[2]*eigen3 + eigenvalues[3]*eigen4 \\\n + eigenvalues[4]*constant + eigenvalues[5]*linear + eigenvalues[6]*square\n model = model*mask \n chi2 = np.sum(np.square(flux - model)*one_over_sigmasquared*mask)\n dof = np.sum(mask) - 4.0\n chi2_pdf = chi2/dof\n return (eigenvalues, chi2, chi2_pdf)\n\n for redshift in redshifts:\n \n \n eigenvalues, chi2, chi2_pdf = fitatz_qso_precompute(redshift['z'])\n redshift['chi2'] = chi2\n redshift['chi2_pdf'] = chi2_pdf\n redshift['eigen1'] = eigenvalues[0]\n redshift['eigen2'] = eigenvalues[1]\n redshift['eigen3'] = eigenvalues[2]\n redshift['eigen4'] = eigenvalues[3]\n redshifts['fluxcal0'] = eigenvalues[4]\n redshifts['fluxcal1'] = eigenvalues[5]\n redshifts['fluxcal2'] = eigenvalues[6]\n\n \n return redshifts\n \n \n \n# Return chi2, chi2pdf, and best-fit coefficients for quasar model fit to spectrum over a grid of wavelengths from zim to zmax with steps of dz\n# The spectrum *must* my a numpy nparray or equivalent\n# with wave, flux, error, and mask columns\n# wavelength in angstroms\n# flux and error in flambda\n# mask = 1 for good data, = 0 for bad data \n# Does *not* include IGM attenuation\ndef findz_qso_hw(spec, zmin=0.0, zmax=3.0, dz=0.0005):\n \n \n \n # Create an array of Lya absorption\n zs = np.arange(zmin, zmax, dz)\n zeros = np.zeros(len(zs))\n indices = np.arange(0, len(zs), dtype=int)\n redshifts = np.ndarray(len(zs),\n dtype={'names':('index', 'z', 'chi2', 'chi2_pdf', 'eigen1', 'eigen2',\n 'fluxcal0', 'fluxcal1', 'fluxcal2'), \n 'formats':(int, float, float, float, float, float,\n float, float, float, float, float)})\n redshifts['index'] = indices\n redshifts['z'] = zs \n redshifts['eigen1'] = 0.0 \n redshifts['eigen2'] = 0.0\n redshifts['fluxcal0'] = 0.0\n redshifts['fluxcal1'] = 0.0\n redshifts['fluxcal2'] = 0.0\n \n #redshifts = Table(redshifts)\n \n # Precompute some matrices\n C = np.diag(spec['error']**2)\n Ci = np.diag(1/spec['error']**2)\n one_over_sigmasquared = 1/spec['error']**2\n \n index = np.where((spec['mask'] == 0) | (spec['error'] == 0.0))\n one_over_sigmasquared[index] = 0.0\n \n wave = spec['wave']\n flux = spec['flux']\n error = spec['error']\n Y = np.matrix(flux).transpose()\n \n \n constant = np.ones(len(spec))\n linear = np.arange(len(spec))\n square = np.arange(len(spec))**2\n \n def fitatz_qso_hw_precompute(z):\n \n # Evaluate eigenspecra at needed redshifts\n wave_zp1 = wave/(1.0 + z)\n eigen1 = quasar_HW1_interp(wave_zp1)\n eigen2 = quasar_HW2_interp(wave_zp1)\n mask = (eigen1 != -999.0).astype(float)*spec['mask']\n\n \n At = np.matrix([eigen1, eigen2, constant, linear, square])\n A = At.transpose()\n \n \n \n AtCi = np.matrix([eigen1*one_over_sigmasquared,\n eigen2*one_over_sigmasquared,\n constant*one_over_sigmasquared,\n linear*one_over_sigmasquared,\n square*one_over_sigmasquared])#At*Ci\n eigenvalues = np.linalg.inv(AtCi*A)*AtCi*Y\n eigenvalues = eigenvalues.getA1()\n \n model_qso = eigenvalues[0]*eigen1 + eigenvalues[1]*eigen2\n model_poly = eigenvalues[2]*constant + eigenvalues[3]*linear + eigenvalues[4]*square\n model = model_qso + model_poly\n \n chi2 = np.sum(np.square(flux - model)*one_over_sigmasquared)\n dof = np.sum(mask) - 5.0\n chi2_pdf = chi2/dof\n return (eigenvalues, chi2, chi2_pdf)\n\n for redshift in redshifts:\n \n \n eigenvalues, chi2, chi2_pdf = fitatz_qso_hw_precompute(redshift['z'])\n redshift['chi2'] = chi2\n \n redshift['chi2_pdf'] = chi2_pdf\n \n redshift['eigen1'] = eigenvalues[0]\n redshift['eigen2'] = eigenvalues[1]\n redshift['fluxcal0'] = eigenvalues[2]\n redshift['fluxcal1'] = eigenvalues[3]\n redshift['fluxcal2'] = eigenvalues[4]\n \n return redshifts\n \n\n\n \n \n \n \n# # Evaluate eigenspecra at needed redshifts\n# wave_zp1 = spec['wave']/(1.0 + z)\n# hw1_qso = quasar_HW1_interp(wave_zp1)\n# hw2_qso = quasar_HW2_interp(wave_zp1)\n# \n# constant = np.ones(len(spec))\n# linear = np.arange(len(spec))\n# square = np.arange(len(spec))**2\n# \n# At = np.matrix([hw1_qso, hw2_qso,\n# constant, linear, square])\n# C = np.diag(spec['error']**2)\n# one_over_sigmasquared = 1/spec['error']**2\n# Ci = np.diag(1/spec['error']**2)\n# A = At.transpose()\n# Y = np.matrix(spec['flux']).transpose()\n# \n# \n# \n# AtCi = np.matrix([hw1_qso*one_over_sigmasquared,\n# hw2_qso*one_over_sigmasquared,\n# constant*one_over_sigmasquared,\n# linear*one_over_sigmasquared,\n# square*one_over_sigmasquared])#At*Ci\n# eigenvalues = np.linalg.inv(AtCi*A)*AtCi*Y\n# eigenvalues = eigenvalues.getA1()\n# \n# model_qso = eigenvalues[0]*hw1_qso + eigenvalues[1]*hw2_qso\n# model_poly = eigenvalues[2]*constant + eigenvalues[3]*linear + eigenvalues[4]*square\n# \n# model = model_qso + model_poly\n# \n# chi2 = np.sum(np.square(spec['flux'] - model)*one_over_sigmasquared)\n# \n# \n# mask = (hw1_qso != -999.0).astype(float)*spec['mask'] \n# \n# dof = np.sum(mask) - 13.0\n# chi2_pdf = chi2/dof\n# \n# return (eigenvalues, model, chi2_pdf)\n \n \n# Return chi2, chi2pdf, and best-fit coefficients for stellar model fit to spectrum over a grid of wavelengths from zim to zmax with steps of dz\n# The spectrum *must* my a numpy nparray or equivalent\n# with wave, flux, error, and mask columns\n# wavelength in angstroms\n# flux and error in flambda\n# mask = 1 for good data, = 0 for bad data \n# Appropriate for low-z galaxies observed in rest-frame optical\n# Does not include IGM attenuation\ndef findz_galaxy(spec, zmin=-0.1, zmax=1.5, dz=0.0001):\n \n \n \n # Create an array of Lya absorption\n zs = np.arange(zmin, zmax, dz)\n zeros = np.zeros(len(zs))\n indices = np.arange(0, len(zs), dtype=int)\n redshifts = np.ndarray(len(zs),\n dtype={'names':('index', 'z', 'chi2', 'chi2_pdf', 'eigen1', 'eigen2',\n 'eigen3', 'eigen4', 'fluxcal0', 'fluxcal1', 'fluxcal2'), \n 'formats':(int, float, float, float, float, float,\n float, float, float, float, float)})\n redshifts['index'] = indices\n redshifts['z'] = zs \n redshifts['eigen1'] = 0.0 \n redshifts['eigen2'] = 0.0\n redshifts['eigen3'] = 0.0\n redshifts['eigen4'] = 0.0\n redshifts['fluxcal0'] = 0.0\n redshifts['fluxcal1'] = 0.0\n redshifts['fluxcal2'] = 0.0\n \n \n constant = np.ones(len(spec))\n linear = np.arange(len(spec))\n square = np.arange(len(spec))**2\n \n #redshifts = Table(redshifts)\n \n # Precompute some matrices\n C = np.diag(spec['error']**2)\n Ci = np.diag(1/spec['error']**2)\n one_over_sigmasquared = 1/spec['error']**2\n \n index = np.where((np.isnan(spec['flux'])) | (np.isnan(spec['error'])))\n spec[index]['flux'] = 0.0\n spec[index]['error'] = 0.0\n spec[index]['mask'] = 0\n \n index = np.where((spec['mask'] == 0) | (spec['error'] == 0.0))\n one_over_sigmasquared[index] = 0.0\n \n index = np.where(spec['mask'] == 0)\n one_over_sigmasquared[index] = 0.0\n \n wave = spec['wave']\n flux = spec['flux']\n error = spec['error']\n Y = np.matrix(flux).transpose()\n \n def fitatz_galaxy_precompute(z):\n \n # Evaluate eigenspecra at needed redshifts\n wave_zp1 = wave/(1.0 + z)\n eigen1 = eigen_galaxy1_interp(wave_zp1)\n eigen2 = eigen_galaxy2_interp(wave_zp1)\n eigen3 = eigen_galaxy3_interp(wave_zp1)\n eigen4 = eigen_galaxy4_interp(wave_zp1)\n \n mask = (eigen1 != -999.0).astype(float)*spec['mask']\n \n At = np.matrix([eigen1, eigen2, eigen3, eigen4, constant, linear, square])\n A = At.transpose()\n \n \n \n AtCi = np.matrix([eigen1*one_over_sigmasquared*mask,\n eigen2*one_over_sigmasquared*mask,\n eigen3*one_over_sigmasquared*mask,\n eigen4*one_over_sigmasquared*mask,\n constant*one_over_sigmasquared*mask,\n linear*one_over_sigmasquared*mask,\n square*one_over_sigmasquared*mask])#At*Ci\n eigenvalues = np.linalg.inv(AtCi*A)*AtCi*Y\n eigenvalues = eigenvalues.getA1()\n \n model = eigenvalues[0]*eigen1 + eigenvalues[1]*eigen2 \\\n + eigenvalues[2]*eigen3 + eigenvalues[3]*eigen4 \\\n + eigenvalues[4]*constant + eigenvalues[5]*linear + eigenvalues[6]*square\n \n chi2 = np.sum(np.square(flux - model)*one_over_sigmasquared*mask)\n dof = np.sum(mask) - 7\n chi2_pdf = chi2/dof\n \n return (eigenvalues, chi2, chi2_pdf)\n\n for redshift in redshifts:\n \n \n eigenvalues, chi2, chi2_pdf = fitatz_galaxy_precompute(redshift['z'])\n redshift['chi2'] = chi2\n redshift['chi2_pdf'] = chi2_pdf\n redshift['eigen1'] = eigenvalues[0]\n redshift['eigen2'] = eigenvalues[1]\n redshift['eigen3'] = eigenvalues[2]\n redshift['eigen4'] = eigenvalues[3]\n redshifts['fluxcal0'] = eigenvalues[4]\n redshifts['fluxcal1'] = eigenvalues[5]\n redshifts['fluxcal2'] = eigenvalues[6]\n\n \n return redshifts\n \n \n\n# Return chi2, chi2pdf, and best-fit coefficients for z=2 galaxy model fit to spectrum over a grid of wavelengths from zim to zmax with steps of dz\n# The spectrum *must* my a numpy nparray or equivalent\n# with wave, flux, error, and mask columns\n# wavelength in angstroms\n# flux and error in flambda\n# mask = 1 for good data, = 0 for bad data \n# Appropriate for low-z galaxies observed in rest-frame optical\n# Does not include IGM attenuation?\ndef findz_latis(spec, zmin=2.5, zmax=5.0, dz=0.0005):\n \n \n \n # Create an array of Lya absorption\n zs = np.arange(zmin, zmax, dz)\n zeros = np.zeros(len(zs))\n indices = np.arange(0, len(zs), dtype=int)\n redshifts = np.ndarray(len(zs),\n dtype={'names':('index', 'z', 'chi2', 'chi2_pdf', 'eigen1', 'eigen2',\n 'eigen3', 'eigen4', 'eigen5', 'fluxcal0', 'fluxcal1', 'fluxcal2'), \n 'formats':(int, float, float, float, float, float, float,\n float, float, float, float, float)})\n redshifts['index'] = indices\n redshifts['z'] = zs \n redshifts['eigen1'] = 0.0 \n redshifts['eigen2'] = 0.0\n redshifts['eigen3'] = 0.0\n redshifts['eigen4'] = 0.0\n redshifts['eigen5'] = 0.0\n redshifts['fluxcal0'] = 0.0\n redshifts['fluxcal1'] = 0.0\n redshifts['fluxcal2'] = 0.0\n \n \n constant = np.ones(len(spec))\n linear = np.arange(len(spec))\n square = np.arange(len(spec))**2\n \n #redshifts = Table(redshifts)\n \n # Precompute some matrices\n C = np.diag(spec['error']**2)\n Ci = np.diag(1/spec['error']**2)\n one_over_sigmasquared = 1/spec['error']**2\n \n index = np.where((np.isnan(spec['flux'])) | (np.isnan(spec['error'])))\n spec[index]['flux'] = 0.0\n spec[index]['error'] = 0.0\n spec[index]['mask'] = 0\n \n index = np.where((spec['mask'] == 0) | (spec['error'] == 0.0))\n one_over_sigmasquared[index] = 0.0\n \n index = np.where(spec['mask'] == 0)\n one_over_sigmasquared[index] = 0.0\n \n wave = spec['wave']\n flux = spec['flux']\n error = spec['error']\n Y = np.matrix(flux).transpose()\n \n def fitatz_latis_precompute(z):\n \n # Evaluate eigenspecra at needed redshifts\n wave_zp1 = wave/(1.0 + z)\n eigen1 = eigen_latis1_interp(wave_zp1)\n eigen2 = eigen_latis2_interp(wave_zp1)\n eigen3 = eigen_latis3_interp(wave_zp1)\n eigen4 = eigen_latis4_interp(wave_zp1)\n eigen5 = eigen_latis5_interp(wave_zp1)\n \n mask = (eigen1 != -999.0).astype(float)*spec['mask']\n \n At = np.matrix([eigen1, eigen2, eigen3, eigen4, eigen5, constant, linear, square])\n A = At.transpose()\n \n \n \n AtCi = np.matrix([eigen1*one_over_sigmasquared*mask,\n eigen2*one_over_sigmasquared*mask,\n eigen3*one_over_sigmasquared*mask,\n eigen4*one_over_sigmasquared*mask,\n eigen5*one_over_sigmasquared*mask,\n constant*one_over_sigmasquared*mask,\n linear*one_over_sigmasquared*mask,\n square*one_over_sigmasquared*mask])#At*Ci\n eigenvalues = np.linalg.inv(AtCi*A)*AtCi*Y\n eigenvalues = eigenvalues.getA1()\n \n model = eigenvalues[0]*eigen1 + eigenvalues[1]*eigen2 \\\n + eigenvalues[2]*eigen3 + eigenvalues[3]*eigen4 + eigenvalues[4]*eigen5 \\\n + eigenvalues[5]*constant + eigenvalues[6]*linear + eigenvalues[7]*square\n \n chi2 = np.sum(np.square(flux - model)*one_over_sigmasquared*mask)\n dof = np.sum(mask) - 8\n chi2_pdf = chi2/dof\n \n return (eigenvalues, chi2, chi2_pdf)\n\n for redshift in redshifts:\n \n \n eigenvalues, chi2, chi2_pdf = fitatz_latis_precompute(redshift['z'])\n redshift['chi2'] = chi2\n redshift['chi2_pdf'] = chi2_pdf\n redshift['eigen1'] = eigenvalues[0]\n redshift['eigen2'] = eigenvalues[1]\n redshift['eigen3'] = eigenvalues[2]\n redshift['eigen4'] = eigenvalues[3]\n redshift['eigen5'] = eigenvalues[4]\n redshifts['fluxcal0'] = eigenvalues[5]\n redshifts['fluxcal1'] = eigenvalues[6]\n redshifts['fluxcal2'] = eigenvalues[7]\n\n \n return redshifts\n\n\n\n# Fit a galaxy model with a guess redshift and optionally low-order\n# polynomials to account for an flux calibration errors\ndef fitz_galaxy_emcee(spec, zguess, fluxpoly=True, steps=5000, burn=2000, progress=True, printReport=True, saveCorner='', zMin=None, zMax=None):\n \n flux_median = np.median(spec['flux'])\n parameters = Parameters()\n # (Name, Value, Vary, Min, Max, Expr)\n parameters.add_many(('z', zguess, True, zMin, zMax, None),\n ('eigen1', flux_median*0.4, True, None, None, None),\n ('eigen2', flux_median*0.4, True, None, None, None),\n ('eigen3', flux_median*0.1, True, None, None, None),\n ('eigen4', flux_median*0.1, True, None, None, None),\n ('fluxcal0', 0.0, fluxpoly, None, None, None),\n ('fluxcal1', 0.0, fluxpoly, None, None, None),\n ('fluxcal2', 0.0, fluxpoly, None, None, None))\n \n galaxy_model = Model(eigensum_galaxy, missing='drop')\n result = galaxy_model.fit(spec['flux'], wave=spec['wave'], weights=1/spec['error'],\n params=parameters, missing='drop')\n \n \n emcee_kws = dict(steps=steps, burn=burn, is_weighted=True,\n progress=progress)\n emcee_params = result.params.copy()\n \n result_emcee = galaxy_model.fit(spec['flux'], wave=spec['wave'], weights=1/spec['error'],\n params=emcee_params, method='emcee', nan_policy='omit',\n missing='drop', fit_kws=emcee_kws, show_titles=True)\n result_emcee.conf_interval \n \n # find the maximum likelihood solution\n highest_prob = np.argmax(result_emcee.lnprob)\n hp_loc = np.unravel_index(highest_prob, result_emcee.lnprob.shape)\n mle_soln = result_emcee.chain[hp_loc]\n \n #result_emcee.conf_interval()\n \n if printReport:\n print(result_emcee.fit_report())\n #print(result_emcee.ci_report())\n \n z_marginalized = np.percentile(result_emcee.flatchain['z'], [50])[0]\n zErrUp = np.percentile(result_emcee.flatchain['z'], [84.1])[0] - np.percentile(result_emcee.flatchain['z'], [50])[0]\n zErrDown = np.percentile(result_emcee.flatchain['z'], [50])[0] - np.percentile(result_emcee.flatchain['z'], [15.9])[0]\n print('z = {:0.5f} +{:0.5f} -{:0.5f}'.format(z_marginalized, zErrUp, zErrDown))\n \n interval68 = np.percentile(result_emcee.flatchain['z'], [15.9, 84.1])\n interval95 = np.percentile(result_emcee.flatchain['z'], [2.28, 97.7])\n print('68% C.I:')\n print(interval68)\n \n print('95% C.I:')\n print(interval95)\n \n \n \n \n if saveCorner != '':\n emcee_corner = corner.corner(result_emcee.flatchain, labels=['z', 'eigen1', 'eigen2', 'eigen3', 'eigen4', 'fluxcal0', 'fluxcal1', 'fluxcal2'],\n truths=mle_soln)\n emcee_corner.savefig(saveCorner)\n plt.close()\n \n \n return result_emcee\n\n\n\n \n# Fit a galaxy model with a guess redshift and optionally low-order\n# polynomials to account for an flux calibration errors\ndef fitz_galaxy(spec, zguess, fluxpoly=True):\n \n flux_median = np.median(spec['flux'])\n parameters = Parameters()\n # (Name, Value, Vary, Min, Max, Expr)\n parameters.add_many(('z', zguess, True, None, None, None),\n ('eigen1', flux_median*0.4, True, None, None, None),\n ('eigen2', flux_median*0.4, True, None, None, None),\n ('eigen3', flux_median*0.1, True, None, None, None),\n ('eigen4', flux_median*0.1, True, None, None, None),\n ('fluxcal0', 0.0, fluxpoly, None, None, None),\n ('fluxcal1', 0.0, fluxpoly, None, None, None),\n ('fluxcal2', 0.0, fluxpoly, None, None, None))\n \n galaxy_model = Model(eigensum_galaxy, missing='drop')\n result = galaxy_model.fit(spec['flux'], wave=spec['wave'], weights=1/spec['error'],\n params=parameters, missing='drop')\n \n \n emcee_kws = dict(steps=500, burn=200, is_weighted=True,\n progress=True)\n emcee_params = result.params.copy()\n \n return result\n \n\n\n# Fit a quasar model with a guess redshift and optionally low-order\n# polynomials to account for an flux calibration errors\ndef fitz_qso(spec, zguess, fluxpoly=True):\n \n flux_median = np.median(spec['flux'])\n parameters = Parameters()\n # (Name, Value, Vary, Min, Max, Expr)\n parameters.add_many(('z', zguess, True, None, None, None),\n ('eigen1', flux_median*0.4, True, None, None, None),\n ('eigen2', flux_median*0.4, True, None, None, None),\n ('eigen3', flux_median*0.1, True, None, None, None),\n ('eigen4', flux_median*0.1, True, None, None, None),\n ('fluxcal0', 0.0, fluxpoly, None, None, None),\n ('fluxcal1', 0.0, fluxpoly, None, None, None),\n ('fluxcal2', 0.0, fluxpoly, None, None, None))\n \n galaxy_model = Model(eigensum_qso, missing='drop')\n result = galaxy_model.fit(spec['flux'], wave=spec['wave'], weights=1/spec['error'],\n params=parameters, missing='drop')\n return result\n \n\n# Fit a star model with a guess redshift and optionally low-order\n# polynomials to account for an flux calibration errors \ndef fitz_star(spec, zguess, fluxpoly=True):\n \n flux_median = np.median(spec['flux'])\n parameters = Parameters()\n # (Name, Value, Vary, Min, Max, Expr)\n parameters.add_many(('z', zguess, True, None, None, None),\n ('eigen1', flux_median*0.1, True, None, None, None),\n ('eigen2', flux_median*0.1, True, None, None, None),\n ('eigen3', flux_median*0.1, True, None, None, None),\n ('eigen4', flux_median*0.1, True, None, None, None),\n ('eigen5', flux_median*0.1, True, None, None, None),\n ('eigen6', flux_median*0.1, True, None, None, None),\n ('eigen7', flux_median*0.1, True, None, None, None),\n ('eigen8', flux_median*0.1, True, None, None, None),\n ('eigen9', flux_median*0.1, True, None, None, None),\n ('eigen10', flux_median*0.1, True, None, None, None),\n ('eigen11', flux_median*0.1, True, None, None, None),\n ('fluxcal0', 0.0, fluxpoly, None, None, None),\n ('fluxcal1', 0.0, fluxpoly, None, None, None),\n ('fluxcal2', 0.0, fluxpoly, None, None, None))\n \n star_model = Model(eigensum_star, missing='drop')\n result = star_model.fit(spec['flux'], wave=spec['wave'], weights=1/spec['error'],\n params=parameters, missing='drop')\n return result\n \n \n\n\n \n"
] | [
[
"numpy.max",
"numpy.abs"
],
[
"numpy.matrix",
"numpy.diag",
"numpy.unravel_index",
"numpy.square",
"numpy.isfinite",
"numpy.linalg.inv",
"numpy.arange",
"numpy.median",
"numpy.isnan",
"numpy.percentile",
"scipy.interpolate.interp1d",
"numpy.argmax",
"matplotlib.pyplot.close",
"numpy.load",
"numpy.where",
"numpy.sum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
}
] |
gelijergensen/PermutationImportance | [
"7a09a407e42745c223055e0597c5226ff64b2f3c"
] | [
"PermutationImportance/abstract_runner.py"
] | [
"\"\"\"The general algorithm for all of the data-based variable importance methods\nis the same, regardless of whether the method is Sequential Selection or \nPermutation Importance or something else. This is represented in the \n``abstract_variable_importance`` function. All of the different methods we \nprovide use this function under the hood and the only difference between them is\nthe ``selection_strategy`` object, which is detailed in \n:mod:`PermutationImportance.selection_strategies`. Typically, you will not need \nto use this method but can instead use one of the methods imported directly into \nthe top package of **PermutationImportance**.\n\nIf you wish to implement your own variable importance method, you will need to\ndevise your own ``selection_strategy``. We recommend using\n:mod:`PermutationImportance.selection_strategies` as a template for implementing \nyour own variable importance method.\"\"\"\n\nimport numpy as np\nimport multiprocessing as mp\n\nfrom .data_verification import verify_data, determine_variable_names\nfrom .multiprocessing_utils import pool_imap_unordered\nfrom .result import ImportanceResult\nfrom .scoring_strategies import verify_scoring_strategy\nfrom .utils import add_ranks_to_dict, get_data_subset\n\n\ndef abstract_variable_importance(training_data, scoring_data, scoring_fn, scoring_strategy, selection_strategy, variable_names=None, nimportant_vars=None, method=None, njobs=1):\n \"\"\"Performs an abstract variable importance over data given a particular\n set of functions for scoring, determining optimal variables, and selecting\n data\n\n :param training_data: a 2-tuple ``(inputs, outputs)`` for training in the\n ``scoring_fn``\n :param scoring_data: a 2-tuple ``(inputs, outputs)`` for scoring in the\n ``scoring_fn``\n :param scoring_fn: a function to be used for scoring. Should be of the form\n ``(training_data, scoring_data) -> some_value``\n :param scoring_strategy: a function to be used for determining optimal\n variables. Should be of the form ``([some_value]) -> index``\n :param variable_names: an optional list for variable names. If not given,\n will use names of columns of data (if pandas dataframe) or column\n indices\n :param nimportant_vars: number of variables to compute importance for.\n Defaults to all variables\n :param method: a string for the name of the method used. Defaults to the\n name of the ``selection_strategy`` if not given\n :param njobs: an integer for the number of threads to use. If negative, will\n use ``num_cpus + njobs``. Defaults to 1\n :returns: :class:`PermutationImportance.result.ImportanceResult` object \n which contains the results for each run\n \"\"\"\n\n training_data = verify_data(training_data)\n scoring_data = verify_data(scoring_data)\n scoring_strategy = verify_scoring_strategy(scoring_strategy)\n variable_names = determine_variable_names(scoring_data, variable_names)\n nimportant_vars = len(\n variable_names) if nimportant_vars is None else nimportant_vars\n method = getattr(selection_strategy, \"name\", getattr(\n selection_strategy, \"__name__\")) if method is None else method\n njobs = mp.cpu_count() + njobs if njobs <= 0 else njobs\n\n important_vars = list()\n num_vars = len(variable_names)\n\n # Compute the original score over all the data\n original_score = scoring_fn(training_data, scoring_data)\n result_obj = ImportanceResult(method, variable_names, original_score)\n for _ in range(nimportant_vars):\n selection_iter = selection_strategy(\n training_data, scoring_data, num_vars, important_vars)\n if njobs == 1:\n result = _singlethread_iteration(\n selection_iter, scoring_fn)\n else:\n result = _multithread_iteration(\n selection_iter, scoring_fn, njobs)\n next_result = add_ranks_to_dict(\n result, variable_names, scoring_strategy)\n best_var = min(\n next_result.keys(), key=lambda key: next_result[key][0])\n best_index = np.flatnonzero(variable_names == best_var)[0]\n result_obj.add_new_results(\n next_result, next_important_variable=best_var)\n important_vars.append(best_index)\n\n return result_obj\n\n\ndef _singlethread_iteration(selection_iterator, scoring_fn):\n \"\"\"Handles a single pass of the abstract variable importance algorithm, \n assuming a single worker thread\n\n :param selection_iterator: an iterator which yields triples\n ``(variable, training_data, scoring_data)``. Typically a \n :class:`PermutationImportance.selection_strategies.SelectionStrategy`\n :param scoring_fn: a function to be used for scoring. Should be of the form\n ``(training_data, scoring_data) -> float``\n :returns: a dict of ``{var: score}``\n \"\"\"\n result = dict()\n for var, training_data, scoring_data in selection_iterator:\n score = scoring_fn(training_data, scoring_data)\n result[var] = score\n return result\n\n\ndef _multithread_iteration(selection_iterator, scoring_fn, njobs):\n \"\"\"Handles a single pass of the abstract variable importance algorithm using\n multithreading\n\n :param selection_iterator: an iterator which yields triples\n ``(variable, training_data, scoring_data)``. Typically a \n :class:`PermutationImportance.selection_strategies.SelectionStrategy`\n :param scoring_fn: a function to be used for scoring. Should be of the form\n ``(training_data, scoring_data) -> float``\n :param num_jobs: number of processes to use\n :returns: a dict of ``{var: score}``\n \"\"\"\n result = dict()\n for index, score in pool_imap_unordered(scoring_fn, selection_iterator, njobs):\n result[index] = score\n return result\n"
] | [
[
"numpy.flatnonzero"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ben15021999/fairseq_rl | [
"89f3c1123052927f67c008f01f3ffa4383f90150"
] | [
"fairseq/tasks/online_backtranslation.py"
] | [
"# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport contextlib\nimport json\nimport logging\nimport math\nimport os\nfrom argparse import Namespace\nfrom collections import OrderedDict, defaultdict\nfrom pathlib import Path\nfrom typing import Dict, Sequence, Tuple\nfrom argparse import ArgumentError\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nimport fairseq\nfrom fairseq import metrics, options, utils\nfrom fairseq.data import (\n FairseqDataset,\n LanguagePairDataset,\n NoisingDataset,\n PrependTokenDataset,\n RoundRobinZipDatasets,\n TransformEosLangPairDataset,\n data_utils,\n encoders,\n)\nfrom fairseq.sequence_generator_rl import SequenceGenerator\nfrom fairseq.tasks import register_task\nfrom fairseq.tasks.translation import TranslationTask, load_langpair_dataset\n\nlogger = logging.getLogger(__name__)\n\n\nclass PiecewiseLinearFn:\n \"\"\"Piecewise linear function. Can be configured with a string.\"\"\"\n\n def __init__(self, pieces: Sequence[Tuple[int, float]]):\n assert pieces == sorted(\n pieces\n ), f\"PiecewiseLinearFn configuration should be sorted, received: {pieces}\"\n\n self.pieces = pieces\n\n def __call__(self, x: int) -> float:\n for i, (x_a, y_a) in enumerate(self.pieces[:-1]):\n x_b, y_b = self.pieces[i + 1]\n if x_a <= x <= x_b:\n return y_a + (x - x_a) * (y_b - y_a) / (x_b - x_a)\n\n return self.pieces[-1][1]\n\n @staticmethod\n def from_string(configuration: str) -> \"PiecewiseLinearFn\":\n \"\"\"\n Parse the configuration of lambda coefficient (for scheduling).\n x = \"3\" # lambda will be a constant equal to x\n x = \"0:1,1000:0\" # lambda will start from 1 and linearly decrease\n # to 0 during the first 1000 iterations\n x = \"0:0,1000:0,2000:1\" # lambda will be equal to 0 for the first 1000\n # iterations, then will linearly increase to 1 until iteration 2000\n \"\"\"\n if isinstance(configuration, float):\n return PiecewiseLinearFn([(0, configuration)])\n\n try:\n parts = configuration.split(\",\")\n if len(parts) == 1:\n v = float(configuration)\n return PiecewiseLinearFn([(0, v)])\n\n split = [s.split(\":\") for s in parts]\n pieces = [(int(t), float(v)) for t, v in split]\n return PiecewiseLinearFn(pieces)\n except Exception:\n raise ValueError(\n f\"Invalid PiecewiseLinearFn configuration: {configuration!r}\"\n )\n\n @staticmethod\n def one() -> \"PiecewiseLinearFn\":\n return PiecewiseLinearFn([(0, 1.0)])\n\n\n@register_task(\"online_backtranslation\")\nclass OnlineBackTranslationTask(TranslationTask):\n @staticmethod\n def add_args(parser):\n \"\"\"Add task-specific arguments to the parser.\"\"\"\n # fmt: off\n # Generic translation args\n parser.add_argument('data', help='colon separated path to data directories list, \\\n will be iterated upon during epochs in round-robin manner; \\\n however, valid and test data are always in the first directory to \\\n avoid the need for repeating them in all directories')\n parser.add_argument('--mono-langs', metavar='MONO_LANGS',\n help='monolingual languages for training')\n parser.add_argument('--valid-lang-pairs', default=None, metavar='VALID_LANG_PAIRS',\n help='language pairs for validation')\n parser.add_argument('--load-alignments', action='store_true',\n help='load the binarized alignments')\n parser.add_argument('--left-pad-source', default='False', type=str, metavar='BOOL',\n help='pad the source on the left')\n parser.add_argument('--left-pad-target', default='False', type=str, metavar='BOOL',\n help='pad the target on the left')\n parser.add_argument('--upsample-primary', default=1, type=int,\n help='amount to upsample primary dataset')\n try:\n parser.add_argument('--max-source-positions', default=1024, type=int, metavar='N',\n help='max number of tokens in the source sequence')\n parser.add_argument('--max-target-positions', default=1024, type=int, metavar='N',\n help='max number of tokens in the target sequence')\n except ArgumentError:\n # this might have already been defined. Once we transition this to hydra it should be fine to add it here.\n pass\n parser.add_argument('--truncate-source', action='store_true', default=False,\n help='truncate source to max-source-positions')\n parser.add_argument('--num-batch-buckets', default=0, type=int, metavar='N',\n help='if >0, then bucket source and target lengths into N '\n 'buckets and pad accordingly; this is useful on TPUs '\n 'to minimize the number of compilations')\n\n # Denoising args\n parser.add_argument('--max-word-shuffle-distance', default=3.0, type=float, metavar='N',\n help='maximum word shuffle distance for denoising autoencoding data generation')\n parser.add_argument('--word-dropout-prob', default=0.1, type=float, metavar='N',\n help='word dropout probability for denoising autoencoding data generation')\n parser.add_argument('--word-blanking-prob', default=0.2, type=float, metavar='N',\n help='word blanking probability for denoising autoencoding data generation')\n\n # Backtranslation args\n parser.add_argument('--lambda-bt', default=\"1.0\", type=str, metavar='N',\n help='back-translation weight')\n parser.add_argument('--lambda-dae', default=\"1.0\", type=str, metavar='N',\n help='denoising auto-encoder weight')\n\n # Evaluation args\n parser.add_argument('--generate-one-by-one', action='store_true',\n help='generate one sentence at a time for backtranslation')\n\n parser.add_argument('--eval-bleu', action='store_true',\n help='evaluation with BLEU scores')\n parser.add_argument('--eval-bleu-detok', type=str, default=\"space\",\n help='detokenize before computing BLEU (e.g., \"moses\"); '\n 'required if using --eval-bleu; use \"space\" to '\n 'disable detokenization; see fairseq.data.encoders '\n 'for other options')\n parser.add_argument('--eval-bleu-detok-args', type=str, metavar='JSON',\n help='args for building the tokenizer, if needed')\n parser.add_argument('--eval-tokenized-bleu', action='store_true', default=False,\n help='compute tokenized BLEU instead of sacrebleu')\n parser.add_argument('--eval-bleu-remove-bpe', nargs='?', const='@@ ', default=None,\n help='remove BPE before computing BLEU')\n parser.add_argument('--eval-bleu-args', type=str, metavar='JSON',\n help='generation args for BLUE scoring, '\n 'e.g., \\'{\"beam\": 4, \"lenpen\": 0.6}\\'')\n parser.add_argument('--eval-bleu-print-samples', action='store_true',\n help='print sample generations during validation')\n # fmt: on\n\n def __init__(self, args, common_dict, mono_langs, valid_lang_pairs):\n super().__init__(args, common_dict, common_dict)\n self.common_dict = common_dict\n self.mono_langs = mono_langs\n self.valid_lang_pairs = valid_lang_pairs\n\n self.SHOW_SAMPLES_INTERVAL = 1000\n # Start by showing samples\n self._show_samples_ctr = self.SHOW_SAMPLES_INTERVAL\n self.SHOW_SAMPLES_NUMBER = 5\n self.lambda_bt = PiecewiseLinearFn.from_string(args.lambda_bt)\n self.lambda_dae = PiecewiseLinearFn.from_string(args.lambda_dae)\n\n self.args = args\n self.data = utils.split_paths(self.args.data)\n if len(self.data) == 1:\n shards = list(Path(self.data[0]).glob(\"shard*\"))\n if len(shards) > 0:\n # keep this as strings, since it can also be a manifold path\n old_data = self.data\n self.data = [str(shard) for shard in shards]\n logging.warning(f\"Expanded data directory {old_data} to {self.data}\")\n\n @classmethod\n def setup_task(cls, args, **kwargs):\n \"\"\"Setup the task (e.g., load dictionaries).\n\n Args:\n args (argparse.Namespace): parsed command-line arguments\n \"\"\"\n args.left_pad_source = options.eval_bool(args.left_pad_source)\n args.left_pad_target = options.eval_bool(args.left_pad_target)\n\n paths = utils.split_paths(args.data)\n assert len(paths) > 0\n assert args.mono_langs is not None\n\n mono_langs = args.mono_langs.split(\",\")\n valid_lang_pairs = args.valid_lang_pairs.split(\",\")\n\n # load dictionary\n dict_path = os.path.join(paths[0], \"dict.txt\")\n common_dict = cls.load_dictionary(dict_path)\n\n return cls(args, common_dict, mono_langs, valid_lang_pairs)\n\n def load_dataset(self, split, epoch=1, combine=False, **kwargs) -> FairseqDataset:\n \"\"\"Load a given dataset split.\n\n Args:\n split (str): name of the split (e.g., train, valid, test)\n \"\"\"\n if split == \"train\":\n data_path = self.data[(epoch - 1) % len(self.data)]\n dataset = self.load_train_dataset(data_path)\n else:\n # valid/test should always be the same.\n dataset = self.load_translation_dataset(split, self.data[0])\n\n self.datasets[split] = dataset\n return dataset\n\n def load_train_dataset(self, data_path: str) -> FairseqDataset:\n \"\"\"The training dataset is made of backtranslation dataset and denoising dataset.\"\"\"\n data = []\n for lang in self.mono_langs:\n train_path = os.path.join(data_path, lang, \"train\")\n # TODO: could we do the BT using denoise sample ?\n # this would half the data loading work\n data.append((f\"{lang}-BT\", self.load_bt_dataset(train_path, lang)))\n data.append(\n (f\"{lang}-DENOISE\", self.load_denoise_dataset(train_path, lang))\n )\n\n return RoundRobinZipDatasets(OrderedDict(data))\n\n def _langpair_dataset(\n self, src: FairseqDataset, tgt: FairseqDataset\n ) -> LanguagePairDataset:\n return LanguagePairDataset(\n src,\n src.sizes,\n self.dictionary,\n tgt=tgt,\n tgt_sizes=tgt.sizes,\n tgt_dict=self.dictionary,\n left_pad_source=self.args.left_pad_source,\n left_pad_target=self.args.left_pad_target,\n # TODO: should we shuffle ? we are already sorting batch by sizes so ?\n # shuffle=True,\n )\n\n def _prepend_lang_bos_to_target(\n self, dataset: LanguagePairDataset, lang: str\n ) -> LanguagePairDataset:\n bos = _lang_token_index(self.dictionary, lang)\n return TransformEosLangPairDataset(\n dataset,\n src_eos=self.dictionary.eos(),\n new_src_eos=self.dictionary.eos(),\n tgt_bos=self.dictionary.eos(),\n new_tgt_bos=bos,\n )\n\n def load_bt_dataset(self, data_path: str, lang: str) -> FairseqDataset:\n \"\"\"The BT dataset is generated with (tgt, tgt) pairs.\n The actual translation to a (generated_src, tgt) pair\n is done on the fly during training.\n \"\"\"\n mono_dataset = data_utils.load_indexed_dataset(\n data_path, self.common_dict, self.args.dataset_impl\n )\n assert mono_dataset is not None, f\"No dataset found for {lang}\"\n\n mono_dataset_src = PrependTokenDataset(\n mono_dataset, _lang_token_index(self.dictionary, lang)\n )\n\n mono_dataset_bt = self._langpair_dataset(mono_dataset_src, mono_dataset)\n logger.info(\n f\"mono_lang = {lang} \"\n f\"lang token index = {_lang_token_index(self.dictionary, lang)} \"\n f\"lang token = {_lang_token(lang)}\"\n )\n\n mono_dataset_bt = self._prepend_lang_bos_to_target(mono_dataset_bt, lang)\n return mono_dataset_bt\n\n def load_denoise_dataset(self, data_path: str, lang: str) -> FairseqDataset:\n \"\"\"Classic denoising dataset\"\"\"\n dataset = data_utils.load_indexed_dataset(\n data_path, self.common_dict, self.args.dataset_impl\n )\n noisy_dataset = NoisingDataset(\n dataset,\n self.dictionary,\n seed=1,\n max_word_shuffle_distance=self.args.max_word_shuffle_distance,\n word_dropout_prob=self.args.word_dropout_prob,\n word_blanking_prob=self.args.word_blanking_prob,\n )\n noisy_dataset = PrependTokenDataset(\n noisy_dataset, _lang_token_index(self.dictionary, lang)\n )\n\n clean_dataset = data_utils.load_indexed_dataset(\n data_path, self.common_dict, self.args.dataset_impl\n )\n denoising_dataset = self._langpair_dataset(noisy_dataset, clean_dataset)\n denoising_dataset = self._prepend_lang_bos_to_target(denoising_dataset, lang)\n return denoising_dataset\n\n def load_translation_dataset(\n self, split: str, data_path: str, combine: bool = False\n ):\n # only judging with one language pair for the moment,\n # since ConcatDataset doesn't work as expected\n assert len(self.valid_lang_pairs) == 1, \"For now...\"\n valid_lang_pair = self.valid_lang_pairs[0]\n src, tgt = valid_lang_pair.split(\"-\")\n\n # use the same function than TranslationTask\n src_tgt_dt = load_langpair_dataset(\n data_path,\n split,\n src,\n self.common_dict,\n tgt,\n self.common_dict,\n combine=combine,\n dataset_impl=self.args.dataset_impl,\n upsample_primary=self.args.upsample_primary,\n left_pad_source=self.args.left_pad_source,\n left_pad_target=self.args.left_pad_target,\n max_source_positions=self.args.max_source_positions,\n max_target_positions=self.args.max_target_positions,\n load_alignments=self.args.load_alignments,\n truncate_source=self.args.truncate_source,\n num_buckets=self.args.num_batch_buckets,\n shuffle=(split != \"test\"),\n prepend_bos_src=_lang_token_index(self.dictionary, src),\n )\n\n src_tgt_eos_dt = self._prepend_lang_bos_to_target(src_tgt_dt, tgt)\n src_tgt_eos_dt.args = self.args\n return src_tgt_eos_dt\n\n def build_dataset_for_inference(self, src_tokens, src_lengths, constraints=None):\n raise NotImplementedError\n\n def build_model(self, args, from_checkpoint=False):\n # torch.autograd.set_detect_anomaly(True)\n model = super().build_model(args, from_checkpoint)\n\n add_secial_tokens_to_dict_and_model(self.common_dict, model, self.mono_langs)\n\n self.sequence_generators = {}\n for mono_lang in self.mono_langs:\n self.sequence_generators[mono_lang] = SequenceGenerator(\n [model],\n tgt_dict=self.dictionary,\n beam_size=1,\n max_len_a=1.3,\n max_len_b=5,\n min_len=5,\n # keep 1 to be able to prepend bos\n max_len=model.max_decoder_positions() - 1,\n )\n\n if getattr(args, \"eval_bleu\", False):\n assert getattr(args, \"eval_bleu_detok\", None) is not None, (\n \"--eval-bleu-detok is required if using --eval-bleu; \"\n \"try --eval-bleu-detok=moses (or --eval-bleu-detok=space \"\n \"to disable detokenization, e.g., when using sentencepiece)\"\n )\n detok_args = json.loads(getattr(args, \"eval_bleu_detok_args\", \"{}\") or \"{}\")\n self.tokenizer = encoders.build_tokenizer(\n Namespace(\n tokenizer=getattr(args, \"eval_bleu_detok\", None), **detok_args\n )\n )\n\n gen_args = json.loads(getattr(args, \"eval_bleu_args\", \"{}\") or \"{}\")\n self.bleu_sequence_generator = self.build_generator(\n [model], Namespace(**gen_args)\n )\n\n return model\n\n def max_positions(self):\n \"\"\"Return the max sentence length allowed by the task.\"\"\"\n return (self.args.max_source_positions, self.args.max_target_positions)\n\n @property\n def dictionary(self):\n \"\"\"Return the source :class:`~fairseq.data.Dictionary`.\"\"\"\n return self.common_dict\n\n def display_samples_once_in_a_while(self, smp, mono_lang, other_lang):\n self._show_samples_ctr += 1\n if self._show_samples_ctr < self.SHOW_SAMPLES_INTERVAL:\n return\n self._show_samples_ctr = 0\n\n ln = smp[\"net_input\"][\"src_tokens\"].shape[0]\n\n logger.info(\n f\"(r:{self.args.distributed_rank}) : \"\n f\"{other_lang} ---> {mono_lang} \"\n f\"({other_lang} was generated by back-translation.) {ln} samples\"\n )\n\n for i in range(min(ln, self.SHOW_SAMPLES_NUMBER)):\n src_tokens = smp[\"net_input\"][\"src_tokens\"][i]\n tgt_tokens = smp[\"target\"][i]\n\n src_str = self.dictionary.string(src_tokens, \"sentencepiece\")\n tgt_str = self.dictionary.string(tgt_tokens, \"sentencepiece\")\n logger.info(\n f\"\\n{i}\\t\\t[{other_lang} generated] {src_str}\\n\"\n f\"\\t\\t[{mono_lang} original ] {tgt_str}\\n\"\n f\"\\t\\t[ src tokens] {src_tokens}\\n\"\n )\n\n def backtranslate_sample(self, smp, orig_lang, other_lang) -> None:\n \"\"\"\n * WARNING: smp is modified in place.\n * At the start of this function, `smp` has the same input and target:\n |--------------------------------------------------------|\n | smp['net_input']['src_tokens'] | smp['target'] |\n | (from data) __en__ hello world | __en__ hello world |\n |--------------------------------------------------------|\n\n * We call generator.generate(smp, bos_token = token(\"ro\")),\n and copy the result as input\n * At the end, `smp` has the translation to other language.\n |--------------------------------------------------------|\n | smp['net_input']['src_tokens'] | smp['target'] |\n | (generated) __ro__ salut lume | __en__ hello world |\n |--------------------------------------------------------|\n\n \"\"\"\n bos_token = _lang_token_index(self.dictionary, other_lang)\n generated = self.sequence_generators[orig_lang].generate(\n models=[], sample=smp, bos_token=bos_token\n )\n\n max_lngth = max([gn[0][\"tokens\"].size(0) for gn in generated])\n net_input = smp[\"net_input\"]\n n_src_tokens = torch.empty(\n size=(len(generated), max_lngth + 1), dtype=net_input[\"src_tokens\"].dtype\n )\n n_src_lengths = torch.empty(\n len(generated), dtype=net_input[\"src_lengths\"].dtype\n )\n\n for i, gn in enumerate(generated):\n tokens = gn[0][\"tokens\"]\n tokens_size = tokens.size(0)\n padding_needed = max_lngth - tokens_size\n tokens = torch.cat([tokens.new([bos_token]), tokens])\n tokens = F.pad(tokens, (0, padding_needed), value=self.dictionary.pad())\n n_src_tokens[i] = tokens\n n_src_lengths[i] = tokens_size + 1\n\n device = net_input[\"src_tokens\"].device\n # This seems to be important\n del net_input[\"src_tokens\"]\n del net_input[\"src_lengths\"]\n net_input[\"src_tokens\"] = n_src_tokens.to(device)\n net_input[\"src_lengths\"] = n_src_lengths.to(device)\n\n def generate(self, smp, model):\n model.eval()\n orig_lang = (\n self.dictionary[smp[\"net_input\"][\"src_tokens\"][0][0]]\n .replace(\" \", \"\")\n .replace(\"_\", \"\")\n )\n bos_token = smp[\"net_input\"][\"prev_output_tokens\"][0][0]\n with torch.no_grad():\n generated = self.sequence_generators[orig_lang].generate(\n models=[model], sample=smp, bos_token=bos_token\n )\n return generated\n\n def get_other_lang(self, lang):\n # TODO: allow more complex mapping\n if lang != self.mono_langs[0]:\n return self.mono_langs[0]\n if len(self.mono_langs) == 2:\n return self.mono_langs[1]\n return self.mono_langs[np.random.randint(1, len(self.mono_langs))]\n\n def train_step(\n self, sample, model, criterion, optimizer, update_num, ignore_grad=False\n ):\n\n model.train()\n model.set_num_updates(update_num)\n\n agg_loss, agg_sample_size = 0.0, 0.0\n agg_logging_output: Dict[str, float] = defaultdict(float)\n\n dataset_keys = self.datasets[\"train\"].datasets.keys()\n\n weights = {\n \"BT\": self.lambda_bt(update_num),\n \"DENOISE\": self.lambda_dae(update_num),\n }\n log_keys = {\"BT\": \"bt_\", \"DENOISE\": \"dae_\"}\n\n for dataset_key in dataset_keys:\n smp = sample[dataset_key]\n mono_lang, task_subtype = dataset_key.split(\"-\")\n if weights[task_subtype] == 0:\n continue\n\n if task_subtype == \"BT\":\n with torch.autograd.profiler.record_function(\"backtranslation\"):\n model.eval()\n # TODO: Could we translate to several language at once ?\n # this would allow to share encoder_out and maximize GPU usage.\n other_lang = self.get_other_lang(mono_lang)\n self.backtranslate_sample(smp, mono_lang, other_lang)\n self.display_samples_once_in_a_while(smp, mono_lang, other_lang)\n model.train()\n\n # Like in FairseqTask.train_step\n with torch.autograd.profiler.record_function(\"forward\"):\n loss, sample_size, logging_output = criterion(model, smp)\n loss *= weights[task_subtype]\n if ignore_grad:\n loss *= 0\n with torch.autograd.profiler.record_function(\"backward\"):\n optimizer.backward(loss)\n\n agg_loss += loss.item()\n agg_sample_size += sample_size\n for k in logging_output:\n agg_logging_output[log_keys[task_subtype] + k] += logging_output[k]\n agg_logging_output[k] += logging_output[k]\n\n return agg_loss, agg_sample_size, agg_logging_output\n\n def get_bos_token_from_sample(self, sample):\n net_input = sample[\"net_input\"]\n source_lang_token_id = torch.unique(net_input[\"src_tokens\"][:, 0]).item()\n source_lang_token = self.dictionary[source_lang_token_id].replace(\"_\", \"\")\n target_lang_token_id = _lang_token_index(\n self.dictionary, self.get_other_lang(source_lang_token)\n )\n\n return target_lang_token_id\n\n def reduce_metrics(self, logging_outputs, criterion):\n super().reduce_metrics(logging_outputs, criterion)\n bt_sample_size = sum(x.get(\"bt_sample_size\", 0) for x in logging_outputs)\n if bt_sample_size:\n bt_loss_sum = sum(x.get(\"bt_loss\", 0) for x in logging_outputs)\n bt_loss_sum *= 1 / bt_sample_size / math.log(2)\n metrics.log_scalar(\"bt_loss\", bt_loss_sum, bt_sample_size, round=3)\n\n bt_nll_loss_sum = sum(x.get(\"bt_nll_loss\", 0) for x in logging_outputs)\n bt_ntokens = sum(x.get(\"bt_ntokens\", 0) for x in logging_outputs)\n bt_nll_loss_sum *= 1 / bt_ntokens / math.log(2)\n metrics.log_scalar(\"bt_nll_loss\", bt_nll_loss_sum, bt_ntokens, round=3)\n metrics.log_derived(\n \"bt_ppl\", lambda meters: utils.get_perplexity(meters[\"bt_nll_loss\"].avg)\n )\n\n dae_sample_size = sum(x.get(\"dae_sample_size\", 0) for x in logging_outputs)\n if dae_sample_size:\n dae_loss_sum = sum(x.get(\"dae_loss\", 0) for x in logging_outputs)\n dae_loss_sum *= 1 / dae_sample_size / math.log(2)\n metrics.log_scalar(\"dae_loss\", dae_loss_sum, dae_sample_size, round=3)\n\n dae_nll_loss_sum = sum(x.get(\"dae_nll_loss\", 0) for x in logging_outputs)\n dae_ntokens = sum(x.get(\"dae_ntokens\", 0) for x in logging_outputs)\n dae_nll_loss_sum *= 1 / dae_ntokens / math.log(2)\n metrics.log_scalar(\"dae_nll_loss\", dae_nll_loss_sum, dae_ntokens, round=3)\n metrics.log_derived(\n \"dae_ppl\",\n lambda meters: utils.get_perplexity(meters[\"dae_nll_loss\"].avg),\n )\n\n\[email protected]_grad()\ndef extend_embedding(\n emb: nn.Module, new_vocab_size: int, copy_from_token_id: int\n) -> None:\n old_emb_data = emb.weight.data\n (old_vocab_size, dim) = old_emb_data.shape\n assert new_vocab_size >= old_vocab_size\n\n if new_vocab_size > old_vocab_size:\n emb.weight.data = torch.zeros((new_vocab_size, dim))\n emb.weight.data[:old_vocab_size, :] = old_emb_data\n # initialize new embeddings\n emb.weight.data[old_vocab_size:, :] = old_emb_data[copy_from_token_id]\n if hasattr(emb, \"num_embeddings\"):\n emb.num_embeddings = new_vocab_size\n if hasattr(emb, \"out_features\"):\n emb.out_features = new_vocab_size\n\n if getattr(emb, \"bias\", None) is None:\n return\n\n # Fix the bias.\n # Bias shape can be different from the previous vocab size\n # if the weight matrix was shared and alread extended but not the bias.\n (old_vocab_size,) = emb.bias.shape\n assert new_vocab_size >= old_vocab_size\n if new_vocab_size > old_vocab_size:\n old_bias = emb.bias.data\n new_bias = torch.zeros(\n (new_vocab_size,), dtype=old_bias.dtype, device=old_bias.device\n )\n new_bias[:old_vocab_size] = old_bias\n emb.bias.data = new_bias\n\n\ndef add_secial_tokens_to_dict_and_model(\n dictionary: \"fairseq.data.Dictionary\",\n model: nn.Module,\n mono_langs: Sequence[str],\n) -> None:\n embs = model.encoder.embed_tokens\n vocab_size, embedding_dim = embs.weight.shape\n\n # The model may or may not have a '<mask>' embedding yet\n assert (\n len(dictionary) <= vocab_size <= len(dictionary) + 1\n ), f\"Dictionary len ({len(dictionary)}) doesn't match embs shape ({embs.weight.shape})\"\n # TODO: we should reuse the pretrained model dict which already has <mask>\n dictionary.add_symbol(\"<mask>\")\n\n for lang in mono_langs:\n lang_token = _lang_token(lang)\n dictionary.add_symbol(lang_token)\n logger.info(\n f\"dictionary: {len(dictionary)} -> {vocab_size} tokens \"\n f\"after adding {len(mono_langs)} lang tokens.\"\n )\n\n if len(dictionary) <= vocab_size:\n return\n\n extend_embedding(embs, len(dictionary), dictionary.bos())\n dec_embs = model.decoder.embed_tokens\n extend_embedding(dec_embs, len(dictionary), dictionary.bos())\n lm_head = model.decoder.output_projection\n extend_embedding(lm_head, len(dictionary), dictionary.bos())\n assert lm_head.weight.shape == (len(dictionary), embedding_dim)\n\n\ndef _lang_token(lang: str) -> str:\n return f\"__{lang}__\"\n\n\ndef _lang_token_index(dictionary, lang: str) -> int:\n return dictionary.index(_lang_token(lang))\n\n\[email protected]\ndef assert_weights_have_changed(model: nn.Module):\n def checksum(model: nn.Module) -> float:\n return sum(p.sum().item() for p in model.parameters())\n\n initial_checksum = checksum(model)\n yield model\n final_checksum = checksum(model)\n logger.info(\n f\"initial_checksum={initial_checksum} -> final_checksum={final_checksum}\"\n )\n assert initial_checksum != final_checksum, \"Model hasn't changed !\"\n"
] | [
[
"torch.autograd.profiler.record_function",
"torch.unique",
"torch.no_grad",
"torch.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ZhaoChuyang/dgreid | [
"ee1d7af74b796f2f194307ab023e43ecc3d3d525",
"ee1d7af74b796f2f194307ab023e43ecc3d3d525",
"ee1d7af74b796f2f194307ab023e43ecc3d3d525"
] | [
"reid/models/resnet_mldg_smm.py",
"examples/rsc_baseline.py",
"reid/datasets/viper.py"
] | [
"from __future__ import absolute_import\n\nimport torch\nfrom torch import nn\nfrom torch.nn import functional as F\nfrom torch.nn import init\nimport torchvision\nfrom collections import OrderedDict\n\nfrom ..models.layers.adain import SMMBlock\n\n\n__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',\n 'resnet152', 'resnet50_mldg_smm']\n\n\nclass ResNet(nn.Module):\n __factory = {\n 18: torchvision.models.resnet18,\n 34: torchvision.models.resnet34,\n 50: torchvision.models.resnet50,\n 101: torchvision.models.resnet101,\n 152: torchvision.models.resnet152,\n }\n\n def __init__(self, depth, pretrained=True, cut_at_pooling=False,\n num_features=0, norm=False, dropout=0, num_classes=None):\n super(ResNet, self).__init__()\n self.pretrained = pretrained\n self.depth = depth\n self.cut_at_pooling = cut_at_pooling\n\n # Construct base (pretrained) resnet\n if depth not in ResNet.__factory:\n raise KeyError(\"Unsupported depth:\", depth)\n resnet = ResNet.__factory[depth](pretrained=pretrained)\n resnet.layer4[0].conv2.stride = (1,1)\n resnet.layer4[0].downsample[0].stride = (1,1)\n # self.base = nn.Sequential(\n # resnet.conv1, resnet.bn1, resnet.relu, resnet.maxpool,\n # resnet.layer1, resnet.layer2, resnet.layer3, resnet.layer4)\n\n self.conv = nn.Sequential(OrderedDict([\n ('conv1', resnet.conv1),\n ('bn1', resnet.bn1),\n ('relu', resnet.relu),\n ('maxpool', resnet.maxpool)]))\n\n self.layer1 = resnet.layer1\n self.layer2 = resnet.layer2\n self.layer3 = resnet.layer3\n self.layer4 = resnet.layer4\n\n self.gap = nn.AdaptiveAvgPool2d(1)\n\n self.smm_block = SMMBlock(1, rand=False, learnable=False)\n\n if not self.cut_at_pooling:\n self.num_features = num_features\n self.norm = norm\n self.dropout = dropout\n self.has_embedding = num_features > 0\n self.num_classes = num_classes\n\n out_planes = resnet.fc.in_features\n\n # Append new layers\n if self.has_embedding:\n self.feat = nn.Linear(out_planes, self.num_features)\n self.feat_bn = nn.BatchNorm1d(self.num_features)\n init.kaiming_normal_(self.feat.weight, mode='fan_out')\n init.constant_(self.feat.bias, 0)\n else:\n # Change the num_features to CNN output channels\n self.num_features = out_planes\n self.feat_bn = nn.BatchNorm1d(self.num_features)\n self.feat_bn.bias.requires_grad_(False)\n if self.dropout > 0:\n self.drop = nn.Dropout(self.dropout)\n\n self.classifier = nn.Linear(self.num_features, self.num_classes, bias=False)\n init.normal_(self.classifier.weight, std=0.001)\n\n init.constant_(self.feat_bn.weight, 1)\n init.constant_(self.feat_bn.bias, 0)\n\n if not pretrained:\n self.reset_params()\n\n def forward(self, x, meta_train=True, output_prob=False, return_featuremaps=False):\n if self.training:\n num_domains = len(x)\n x = torch.cat(x, dim=0)\n\n x = self.conv(x)\n\n # NOTE: change to 'if self.training and meta_train:'\n if meta_train:\n mixed_x, _ = self.smm_block(x)\n if return_featuremaps:\n return [x, mixed_x]\n x = mixed_x\n\n\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n\n x = self.gap(x)\n x = x.view(x.size(0), -1)\n\n if self.cut_at_pooling:\n return x\n\n if self.has_embedding:\n bn_x = self.feat_bn(self.feat(x))\n else:\n bn_x = self.feat_bn(x)\n\n if self.training is False and output_prob is False:\n bn_x = F.normalize(bn_x)\n return bn_x\n\n if self.norm:\n norm_bn_x = F.normalize(bn_x)\n elif self.has_embedding:\n bn_x = F.relu(bn_x)\n\n if self.dropout > 0:\n bn_x = self.drop(bn_x)\n\n prob = self.classifier(bn_x)\n\n # prob, mixed_prob = torch.chunk(prob, 2, dim=0)\n prob = torch.chunk(prob, num_domains, dim=0)\n # mixed_prob = torch.chunk(mixed_prob, num_domains, dim=0)\n # x, mixed_x = torch.chunk(x, 2, dim=0)\n x = torch.chunk(x, num_domains, dim=0)\n # mixed_x = torch.chunk(mixed_x, num_domains, dim=0)\n\n return prob, x\n\n def reset_params(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n init.kaiming_normal_(m.weight, mode='fan_out')\n if m.bias is not None:\n init.constant_(m.bias, 0)\n elif isinstance(m, nn.BatchNorm2d):\n init.constant_(m.weight, 1)\n init.constant_(m.bias, 0)\n elif isinstance(m, nn.BatchNorm1d):\n init.constant_(m.weight, 1)\n init.constant_(m.bias, 0)\n elif isinstance(m, nn.Linear):\n init.normal_(m.weight, std=0.001)\n if m.bias is not None:\n init.constant_(m.bias, 0)\n\n def get_params(self):\n for param in self.parameters():\n if param.requires_grad:\n yield param\n\n # def train(self, mode=True):\n # \"\"\"\n # Override the default train() to freeze the BN parameters\n # \"\"\"\n # super().train(mode)\n # self.freeze_bn()\n #\n # def freeze_bn(self):\n # for m in self.modules():\n # if isinstance(m, nn.BatchNorm1d):\n # m.eval()\n # if isinstance(m, nn.BatchNorm2d):\n # m.eval()\n\n\ndef resnet18(**kwargs):\n return ResNet(18, **kwargs)\n\n\ndef resnet34(**kwargs):\n return ResNet(34, **kwargs)\n\n\ndef resnet50(**kwargs):\n return ResNet(50, **kwargs)\n\n\ndef resnet101(**kwargs):\n return ResNet(101, **kwargs)\n\n\ndef resnet152(**kwargs):\n return ResNet(152, **kwargs)\n\n\ndef resnet50_mde(**kwargs):\n return ResNet(50, **kwargs)\n\n\ndef resnet50_mldg_smm(**kwargs):\n return ResNet(50, **kwargs)\n\n",
"from __future__ import print_function, absolute_import\nimport argparse\nimport os.path as osp\nimport random\nimport numpy as np\nimport sys\nimport collections\nimport copy\nimport time\nfrom datetime import timedelta\n\nfrom sklearn.cluster import DBSCAN, KMeans\nfrom sklearn.preprocessing import normalize\n\nimport torch\nfrom torch import nn\nfrom torch.backends import cudnn\nfrom torch.utils.data import DataLoader\nimport torch.nn.functional as F\n\nsys.path.append(\".\")\nfrom reid import datasets\nfrom reid import models\n# from reid.models.dsbn import convert_dsbn, convert_bn\n# from reid.models.csbn import convert_csbn\n# from reid.models.idm_dsbn import convert_dsbn_idm, convert_bn_idm\n# from reid.models.xbm import XBM\nfrom reid.trainers import RSCTrainer\nfrom reid.evaluators import Evaluator, extract_features\nfrom reid.utils.data import CommDataset\nfrom reid.utils.data import IterLoader\nfrom reid.utils.data import transforms as T\nfrom reid.utils.data.sampler import RandomMultipleGallerySampler\nfrom reid.utils.data.preprocessor import Preprocessor\nfrom reid.utils.logging import Logger\nfrom reid.utils.serialization import load_checkpoint, save_checkpoint, copy_state_dict\nfrom reid.utils.rerank import compute_jaccard_distance\n\n\nstart_epoch = best_mAP = 0\n\ndef get_data(name, data_dir, combineall=False):\n # data_dir = '/data/datasets'\n root = osp.join(data_dir, name)\n dataset = datasets.create(name, root, combineall=combineall)\n return dataset\n\ndef get_train_loader(args, dataset, height, width, batch_size, workers,\n num_instances, iters, trainset=None):\n\n normalizer = T.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n train_transformer = T.Compose([\n T.Resize((height, width), interpolation=3),\n T.RandomHorizontalFlip(p=0.5),\n T.Pad(10),\n T.RandomCrop((height, width)),\n T.ToTensor(),\n normalizer,\n\t # T.RandomErasing(probability=0.5, mean=[0.485, 0.456, 0.406])\n ])\n\n train_set = sorted(dataset.train) if trainset is None else sorted(trainset)\n rmgs_flag = num_instances > 0\n if rmgs_flag:\n sampler = RandomMultipleGallerySampler(train_set, num_instances)\n else:\n sampler = None\n train_loader = IterLoader(\n DataLoader(Preprocessor(train_set, root=dataset.images_dir, transform=train_transformer),\n batch_size=batch_size, num_workers=workers, sampler=sampler,\n shuffle=not rmgs_flag, pin_memory=True, drop_last=True), length=iters)\n\n return train_loader\n\n\ndef get_test_loader(dataset, height, width, batch_size, workers, testset=None):\n normalizer = T.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n\n test_transformer = T.Compose([\n T.Resize((height, width), interpolation=3),\n T.ToTensor(),\n normalizer\n ])\n\n if (testset is None):\n testset = list(set(dataset.query) | set(dataset.gallery))\n\n test_loader = DataLoader(\n Preprocessor(testset, root=dataset.images_dir, transform=test_transformer),\n batch_size=batch_size, num_workers=workers,\n shuffle=False, pin_memory=True)\n\n return test_loader\n\n\ndef create_model(args):\n model = models.create(args.arch, num_features=args.features, norm=False, dropout=args.dropout, \n num_classes=args.nclass)\n\n # use CUDA\n model.cuda()\n model = nn.DataParallel(model)\n return model\n\n\ndef main():\n args = parser.parse_args()\n\n if args.seed is not None:\n random.seed(args.seed)\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n cudnn.deterministic = True\n\n main_worker(args)\n\n\ndef main_worker(args):\n global start_epoch, best_mAP\n start_time = time.monotonic()\n\n cudnn.benchmark = True\n\n sys.stdout = Logger(osp.join(args.logs_dir, 'log.txt'))\n print(\"==========\\nArgs:{}\\n==========\".format(args))\n\n # Create datasets\n iters = args.iters if (args.iters>0) else None\n print(\"==> Load source-domain dataset\")\n train_items = []\n for src in args.dataset_source.split(','):\n dataset = get_data(src, args.data_dir, args.combine_all)\n train_items.extend(dataset.train)\n dataset_source = CommDataset(train_items)\n\n print(\"==> Load target-domain dataset\")\n dataset_target = get_data(args.dataset_target, args.data_dir)\n test_loader_target = get_test_loader(dataset_target, args.height, args.width, args.batch_size, args.workers)\n train_loader_source = get_train_loader(args, dataset_source, args.height, args.width,\n args.batch_size, args.workers, args.num_instances, iters)\n\n source_classes = dataset_source.num_train_pids\n\n args.nclass = source_classes\n\n # Create model\n model = create_model(args)\n print(model)\n\n # Evaluator\n evaluator = Evaluator(model)\n\n # Optimizer\n params = [{\"params\": [value]} for _, value in model.named_parameters() if value.requires_grad]\n optimizer = torch.optim.Adam(params, lr=args.lr, weight_decay=args.weight_decay)\n lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=args.step_size, gamma=0.1)\n\n # Trainer\n trainer = RSCTrainer(model, args.nclass, margin=args.margin)\n\n for epoch in range(args.epochs):\n\n train_loader_source.new_epoch()\n # train_loader_target.new_epoch()\n trainer.train(epoch, train_loader_source, optimizer, print_freq=args.print_freq, train_iters=args.iters)\n \n if ((epoch+1)%args.eval_step==0 or (epoch==args.epochs-1)):\n\n print('Test on target: ', args.dataset_target)\n _, mAP = evaluator.evaluate(test_loader_target, dataset_target.query, dataset_target.gallery, cmc_flag=True)\n is_best = (mAP>best_mAP)\n best_mAP = max(mAP, best_mAP)\n save_checkpoint({\n 'state_dict': model.state_dict(),\n 'epoch': epoch + 1,\n 'best_mAP': best_mAP,\n }, is_best, fpath=osp.join(args.logs_dir, 'checkpoint.pth.tar'))\n\n print('\\n * Finished epoch {:3d} model mAP: {:5.1%} best: {:5.1%}{}\\n'.\n format(epoch, mAP, best_mAP, ' *' if is_best else ''))\n\n lr_scheduler.step()\n\n\n print ('==> Test with the best model on the target domain:')\n checkpoint = load_checkpoint(osp.join(args.logs_dir, 'model_best.pth.tar'))\n model.load_state_dict(checkpoint['state_dict'])\n evaluator.evaluate(test_loader_target, dataset_target.query, dataset_target.gallery, cmc_flag=True)\n\n end_time = time.monotonic()\n print('Total running time: ', timedelta(seconds=end_time - start_time))\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description=\"Self-paced contrastive learning on UDA re-ID\")\n # data\n parser.add_argument('-ds', '--dataset-source', type=str, default='dukemtmc')\n parser.add_argument('-dt', '--dataset-target', type=str, default='market1501')\n parser.add_argument('--combine-all', action='store_true',\n help=\"if True: combinall train, query, gallery for training;\")\n parser.add_argument('-b', '--batch-size', type=int, default=64)\n parser.add_argument('-j', '--workers', type=int, default=4)\n parser.add_argument('--height', type=int, default=256, help=\"input height\")\n parser.add_argument('--width', type=int, default=128, help=\"input width\")\n parser.add_argument('--num-instances', type=int, default=4,\n help=\"each minibatch consist of \"\n \"(batch_size // num_instances) identities, and \"\n \"each identity has num_instances instances, \"\n \"default: 0 (NOT USE)\")\n # cluster\n parser.add_argument('--eps', type=float, default=0.6,\n help=\"max neighbor distance for DBSCAN\")\n parser.add_argument('--k1', type=int, default=30,\n help=\"hyperparameter for jaccard distance\")\n parser.add_argument('--k2', type=int, default=6,\n help=\"hyperparameter for jaccard distance\")\n parser.add_argument('--nclass', type=int, default=1000,\n help=\"number of classes (source+target)\")\n parser.add_argument('--s-class', type=int, default=1000,\n help=\"number of classes (source)\")\n parser.add_argument('--t-class', type=int, default=1000,\n help=\"number of classes (target)\")\n # loss\n parser.add_argument('--margin', type=float, default=0.3,\n help=\"margin for triplet loss\")\n parser.add_argument('--mu1', type=float, default=0.5,\n help=\"weight for loss_bridge_pred\")\n parser.add_argument('--mu2', type=float, default=0.1,\n help=\"weight for loss_bridge_feat\")\n parser.add_argument('--mu3', type=float, default=1,\n help=\"weight for loss_div\")\n\n # model\n parser.add_argument('-a', '--arch', type=str, default='resnet50_idm',\n choices=models.names())\n parser.add_argument('--features', type=int, default=0)\n parser.add_argument('--dropout', type=float, default=0)\n\n # xbm parameters\n parser.add_argument('--memorySize', type=int, default=8192,\n help='meomory bank size')\n parser.add_argument('--ratio', type=float, default=1,\n help='memorySize=ratio*data_size')\n parser.add_argument('--featureSize', type=int, default=2048)\n parser.add_argument('--use-xbm', action='store_true', help=\"if True: strong baseline; if False: naive baseline\")\n\n # optimizer\n parser.add_argument('--lr', type=float, default=0.00035,\n help=\"learning rate\")\n parser.add_argument('--weight-decay', type=float, default=5e-4)\n parser.add_argument('--epochs', type=int, default=60)\n parser.add_argument('--iters', type=int, default=200)\n parser.add_argument('--step-size', type=int, default=30)\n # training configs\n parser.add_argument('--seed', type=int, default=1)\n parser.add_argument('--print-freq', type=int, default=50)\n parser.add_argument('--eval-step', type=int, default=10)\n\n # path\n working_dir = osp.dirname(osp.abspath(__file__))\n parser.add_argument('--data-dir', type=str, default='/data/datasets')\n parser.add_argument('--logs-dir', type=str, metavar='PATH',\n default=osp.join(working_dir, 'logs'))\n\n # hbchen\n parser.add_argument('--csdn', type=bool, default=False)\n main()\n\n",
"from __future__ import division, print_function, absolute_import\nimport glob\nimport numpy as np\nimport os.path as osp\n\nfrom ..utils.tools import read_json, write_json\n\nfrom ..utils.data import BaseImageDataset\n\n\nclass VIPeR(BaseImageDataset):\n \"\"\"VIPeR.\n\n Reference:\n Gray et al. Evaluating appearance models for recognition, reacquisition, and tracking. PETS 2007.\n\n URL: `<https://vision.soe.ucsc.edu/node/178>`_\n \n Dataset statistics:\n - identities: 632.\n - images: 632 x 2 = 1264.\n - cameras: 2.\n \"\"\"\n dataset_dir = 'viper'\n dataset_url = 'http://users.soe.ucsc.edu/~manduchi/VIPeR.v1.0.zip'\n dataset_name = 'viper'\n\n def __init__(self, root='', split_id=0, verbose=True, combineall=False, **kwargs):\n super(VIPeR, self).__init__()\n self.root = osp.abspath(osp.expanduser(root))\n self.dataset_dir = self.root\n self.download_dataset(self.dataset_dir, self.dataset_url)\n\n self.cam_a_dir = osp.join(self.dataset_dir, 'VIPeR', 'cam_a')\n self.cam_b_dir = osp.join(self.dataset_dir, 'VIPeR', 'cam_b')\n self.split_path = osp.join(self.dataset_dir, 'splits.json')\n\n required_files = [self.dataset_dir, self.cam_a_dir, self.cam_b_dir]\n self.check_before_run(required_files)\n\n self.prepare_split()\n splits = read_json(self.split_path)\n if split_id >= len(splits):\n raise ValueError(\n 'split_id exceeds range, received {}, '\n 'but expected between 0 and {}'.format(\n split_id,\n len(splits) - 1\n )\n )\n split = splits[split_id]\n\n train = split['train']\n query = split['query'] # query and gallery share the same images\n gallery = split['gallery']\n\n train = [tuple(item) for item in train]\n query = [tuple(item) for item in query]\n gallery = [tuple(item) for item in gallery]\n\n self.train = train\n self.query = query\n self.gallery = gallery\n\n if combineall:\n self.train = self.combine_all(train, query, gallery)\n\n if verbose:\n print(\"=> VIPeR loaded\")\n self.print_dataset_statistics(self.train, query, gallery)\n\n self.num_train_pids, self.num_train_imgs, self.num_train_cams = self.get_imagedata_info(self.train)\n self.num_query_pids, self.num_query_imgs, self.num_query_cams = self.get_imagedata_info(self.query)\n self.num_gallery_pids, self.num_gallery_imgs, self.num_gallery_cams = self.get_imagedata_info(self.gallery)\n\n\n\n def prepare_split(self):\n if not osp.exists(self.split_path):\n print('Creating 10 random splits of train ids and test ids')\n\n cam_a_imgs = sorted(glob.glob(osp.join(self.cam_a_dir, '*.bmp')))\n cam_b_imgs = sorted(glob.glob(osp.join(self.cam_b_dir, '*.bmp')))\n assert len(cam_a_imgs) == len(cam_b_imgs)\n num_pids = len(cam_a_imgs)\n print('Number of identities: {}'.format(num_pids))\n num_train_pids = num_pids // 2\n \"\"\"\n In total, there will be 20 splits because each random split creates two\n sub-splits, one using cameraA as query and cameraB as gallery\n while the other using cameraB as query and cameraA as gallery.\n Therefore, results should be averaged over 20 splits (split_id=0~19).\n \n In practice, a model trained on split_id=0 can be applied to split_id=0&1\n as split_id=0&1 share the same training data (so on and so forth).\n \"\"\"\n splits = []\n for _ in range(10):\n order = np.arange(num_pids)\n np.random.shuffle(order)\n train_idxs = order[:num_train_pids]\n test_idxs = order[num_train_pids:]\n assert not bool(set(train_idxs) & set(test_idxs)), \\\n 'Error: train and test overlap'\n\n train = []\n for pid, idx in enumerate(train_idxs):\n cam_a_img = cam_a_imgs[idx]\n cam_b_img = cam_b_imgs[idx]\n train.append((cam_a_img, pid, 0))\n train.append((cam_b_img, pid, 1))\n\n test_a = []\n test_b = []\n for pid, idx in enumerate(test_idxs):\n cam_a_img = cam_a_imgs[idx]\n cam_b_img = cam_b_imgs[idx]\n test_a.append((cam_a_img, pid, 0))\n test_b.append((cam_b_img, pid, 1))\n\n # use cameraA as query and cameraB as gallery\n split = {\n 'train': train,\n 'query': test_a,\n 'gallery': test_b,\n 'num_train_pids': num_train_pids,\n 'num_query_pids': num_pids - num_train_pids,\n 'num_gallery_pids': num_pids - num_train_pids\n }\n splits.append(split)\n\n # use cameraB as query and cameraA as gallery\n split = {\n 'train': train,\n 'query': test_b,\n 'gallery': test_a,\n 'num_train_pids': num_train_pids,\n 'num_query_pids': num_pids - num_train_pids,\n 'num_gallery_pids': num_pids - num_train_pids\n }\n splits.append(split)\n\n print('Totally {} splits are created'.format(len(splits)))\n write_json(splits, self.split_path)\n print('Split file saved to {}'.format(self.split_path))\n"
] | [
[
"torch.nn.functional.normalize",
"torch.nn.BatchNorm1d",
"torch.nn.Dropout",
"torch.cat",
"torch.nn.init.constant_",
"torch.nn.Linear",
"torch.nn.functional.relu",
"torch.nn.AdaptiveAvgPool2d",
"torch.nn.init.normal_",
"torch.chunk",
"torch.nn.init.kaiming_normal_"
],
[
"torch.optim.Adam",
"numpy.random.seed",
"torch.manual_seed",
"torch.nn.DataParallel",
"torch.optim.lr_scheduler.StepLR"
],
[
"numpy.arange",
"numpy.random.shuffle"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
zhanghaohit/incubator-tvm | [
"ee0af843f3c5a3429e888079afb5f30789bd9bee",
"ee0af843f3c5a3429e888079afb5f30789bd9bee",
"ee0af843f3c5a3429e888079afb5f30789bd9bee",
"ee0af843f3c5a3429e888079afb5f30789bd9bee",
"ee0af843f3c5a3429e888079afb5f30789bd9bee"
] | [
"tests/python/relay/test_op_level1.py",
"tests/python/unittest/test_custom_datatypes_mybfloat16.py",
"tests/python/relay/test_op_qnn_add.py",
"tests/python/unittest/test_codegen_device.py",
"python/tvm/autotvm/graph_tuner/base_graph_tuner.py"
] | [
"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\nimport numpy as np\nimport pytest\nimport tvm\nimport scipy\nfrom tvm import relay\nfrom tvm.relay import transform\nfrom tvm.relay.testing import ctx_list\nimport topi.testing\nfrom tvm.contrib.nvcc import have_fp16\n\ndef run_infer_type(expr):\n mod = relay.Module.from_expr(expr)\n mod = transform.InferType()(mod)\n entry = mod[\"main\"]\n return entry if isinstance(expr, relay.Function) else entry.body\n\ndef sigmoid(x):\n one = np.ones_like(x)\n return one / (one + np.exp(-x))\n\ndef relu(x):\n x_copy = np.copy(x)\n np.maximum(x_copy, 0, x_copy)\n return x_copy\n\ndef rsqrt(x):\n one = np.ones_like(x)\n return one / np.sqrt(x)\n\ndef test_unary_op():\n def check_single_op(opfunc, ref, dtype):\n shape = (10, 4)\n dtype = dtype\n tp = relay.TensorType(shape)\n x = relay.var(\"x\", tp, dtype=dtype)\n y = opfunc(x)\n # test printer\n assert (\"{}(%x)\".format(y.op.name)) in y.astext()\n # test type inference\n yy = run_infer_type(y)\n assert yy.checked_type == tp\n\n if ref is not None:\n data = np.random.rand(*shape).astype(dtype)\n ref_res = ref(data)\n func = relay.Function([x], y)\n for target, ctx in ctx_list():\n # use graph by execuor default for testing, as we need\n # create function explicitly to avoid constant-folding.\n if dtype == 'float16' and target == 'cuda' and not have_fp16(tvm.gpu(0).compute_version):\n continue\n intrp = relay.create_executor(\"graph\", ctx=ctx, target=target)\n op_res = intrp.evaluate(func)(data)\n np.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=0.01)\n\n\n for opfunc, ref in [(tvm.relay.log, np.log),\n (tvm.relay.exp, np.exp),\n (tvm.relay.erf, scipy.special.erf),\n (tvm.relay.sqrt, np.sqrt),\n (tvm.relay.rsqrt, rsqrt),\n (tvm.relay.sigmoid, sigmoid),\n (tvm.relay.tanh, np.tanh),\n (relay.nn.relu, relu),\n (tvm.relay.cos, np.cos),\n (tvm.relay.sin, np.sin),\n (tvm.relay.atan, np.arctan)]:\n for dtype in ['float16', 'float32']:\n check_single_op(opfunc, ref, dtype)\n\n\ndef test_binary_op():\n def inst(vars, sh):\n return [vars.get(s, s) for s in sh]\n\n def check_binary_op(opfunc, ref, dtype):\n # TODO(@jroesch): this piece of code improperly uses type variables.\n n = tvm.var(\"n\")\n s1 = (5, n, 5)\n s2 = (n, 1)\n t1 = relay.TensorType(s1)\n t2 = relay.TensorType(s2)\n x = relay.var(\"x\", t1, dtype=dtype)\n y = relay.var(\"y\", t2, dtype=dtype)\n z = opfunc(x, y)\n # test printer\n assert (\"{}(%x, %y)\".format(z.op.name)) in z.astext()\n zz = run_infer_type(z)\n assert zz.checked_type == t1\n\n if ref is not None:\n t1 = relay.TensorType((5, 10, 5))\n t2 = relay.TensorType((5, 10, 5))\n x = relay.var(\"x\", t1, dtype=dtype)\n y = relay.var(\"y\", t2, dtype=dtype)\n z = opfunc(x, y)\n x_data = np.random.rand(5, 10, 5).astype(dtype)\n y_data = np.random.rand(5, 10, 5).astype(dtype)\n ref_res = ref(x_data, y_data)\n func = relay.Function([x, y], z)\n\n for target, ctx in ctx_list():\n # use graph by execuor default for testing, as we need\n # create function explicitly to avoid constant-folding.\n if dtype == 'float16' and target == 'cuda' and not have_fp16(tvm.gpu(0).compute_version):\n continue\n intrp = relay.create_executor(\"graph\", ctx=ctx, target=target)\n op_res = intrp.evaluate(func)(x_data, y_data)\n np.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=0.01)\n\n for opfunc, ref in [(relay.add, np.add),\n (relay.subtract, np.subtract),\n (relay.multiply, np.multiply),\n (relay.divide, np.divide),\n (relay.floor_divide, np.floor_divide),\n (relay.floor_mod, np.fmod)]:\n for dtype in ['float16', 'float32']:\n check_binary_op(opfunc, ref, dtype)\n\n\ndef test_expand_dims():\n # based on topi test\n def verify_expand_dims(dshape, dtype, oshape, axis, num_newaxis):\n x = relay.Var(\"x\", relay.TensorType(dshape, dtype))\n func = relay.Function([x], relay.expand_dims(x, axis, num_newaxis))\n for target, ctx in ctx_list():\n if dtype == 'float16' and target == 'cuda' and not have_fp16(tvm.gpu(0).compute_version):\n continue\n data = np.random.uniform(size=dshape).astype(dtype)\n ref_res = data.reshape(oshape)\n intrp = relay.create_executor(\"graph\", ctx=ctx, target=target)\n op_res = intrp.evaluate(func)(data)\n np.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=0.01)\n for dtype in ['float16', 'float32']:\n verify_expand_dims((3, 10), dtype, (3, 10, 1, 1), 2, 2)\n verify_expand_dims((3, 10), dtype, (1, 3, 10), -3, 1)\n\n\ndef test_bias_add():\n for dtype in ['float16', 'float32']:\n xshape=(10, 2, 3, 4)\n bshape=(2,)\n rtol = 1e-2 if dtype == 'float16' else 1e-5\n x = relay.var(\"x\", shape=xshape, dtype=dtype)\n bias = relay.var(\"bias\", dtype=dtype)\n z = relay.nn.bias_add(x, bias)\n zz = run_infer_type(z)\n assert \"axis=\" not in zz.astext()\n assert zz.args[1].checked_type == relay.TensorType(bshape, dtype)\n\n func = relay.Function([x, bias], z)\n x_data = np.random.uniform(size=xshape).astype(dtype)\n y_data = np.random.uniform(size=bshape).astype(dtype)\n ref_res = x_data + y_data.reshape((2, 1, 1))\n for target, ctx in ctx_list():\n if dtype == 'float16' and target == 'cuda' and not have_fp16(tvm.gpu(0).compute_version):\n continue\n intrp = relay.create_executor(\"graph\", ctx=ctx, target=target)\n op_res = intrp.evaluate(func)(x_data, y_data)\n np.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=rtol)\n\n\ndef test_expand_dims_infer_type():\n for dtype in ['float16', 'float32']:\n n, t, d = tvm.size_var(\"n\"), tvm.size_var(\"t\"), 100\n x = relay.var(\"x\", shape=(n, t, d), dtype=dtype)\n y = relay.expand_dims(x, axis=2)\n assert \"axis=2\" in y.astext()\n yy = run_infer_type(y)\n assert yy.checked_type == relay.TensorType((n, t, 1, 100), dtype)\n\n\ndef test_softmax():\n for dtype in ['float16', 'float32']:\n # Softmax accuracy for float16 is poor\n if dtype == 'float16':\n return\n shape = (10, 4)\n x = relay.var(\"x\", shape=shape, dtype=dtype)\n y = relay.nn.softmax(x, axis=1)\n assert \"nn.softmax\" in y.astext()\n yy = run_infer_type(y)\n assert yy.checked_type == relay.TensorType(shape, dtype)\n func = relay.Function([x], y)\n x_data = np.random.uniform(size=shape).astype(dtype)\n ref_res = topi.testing.softmax_python(x_data)\n for target, ctx in ctx_list():\n intrp = relay.create_executor(\"graph\", ctx=ctx, target=target)\n op_res = intrp.evaluate(func)(x_data)\n np.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-5)\n\n\ndef test_log_softmax():\n for dtype in ['float16', 'float32']:\n # Softmax accuracy for float16 is poor\n if dtype == 'float16':\n return\n shape = (10, 4)\n x = relay.var(\"x\", shape=shape, dtype=dtype)\n y = relay.nn.log_softmax(x, axis=1)\n assert \"nn.log_softmax\" in y.astext()\n yy = run_infer_type(y)\n assert yy.checked_type == relay.TensorType(shape, dtype)\n func = relay.Function([x], y)\n x_data = np.random.uniform(size=shape).astype(dtype)\n ref_res = topi.testing.log_softmax_python(x_data)\n for target, ctx in ctx_list():\n intrp = relay.create_executor(\"graph\", ctx=ctx, target=target)\n op_res = intrp.evaluate(func)(x_data)\n np.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-5)\n\n\ndef test_concatenate():\n for dtype in ['float16', 'float32']:\n n, t, d = tvm.size_var(\"n\"), tvm.size_var(\"t\"), 100\n x = relay.var(\"x\", shape=(n, t, d))\n y = relay.var(\"y\", shape=(n, t, d))\n z = relay.concatenate((x, y), axis=-1)\n assert \"axis=\" in z.astext()\n zz = run_infer_type(z)\n assert zz.checked_type == relay.TensorType((n, t, 200))\n \n x = relay.exp(x)\n z = relay.concatenate((x, y), axis=2)\n zz = run_infer_type(z)\n assert zz.checked_type == relay.TensorType((n, t, 200))\n \n z = relay.concatenate((x, y), axis=1)\n zz = run_infer_type(z)\n assert zz.checked_type == relay.TensorType((n, t + t, 100))\n \n # check shape mismatches (the following case is expected to raise tvm._ffi.base.TVMError.\n try:\n x = relay.var('p1', shape=(2, 5))\n y = relay.var('p2', shape=(2, 3))\n c = relay.concatenate([x, y], axis=0)\n func = relay.Function([x, y], c)\n zz = run_infer_type(func)\n except tvm._ffi.base.TVMError:\n pass\n else:\n assert False\n \n x = relay.var(\"x\", shape=(10, 5), dtype=dtype)\n y = relay.var(\"y\", shape=(10, 5), dtype=dtype)\n t = relay.var(\"z\", shape=(), dtype=dtype)\n z = relay.concatenate((x, y), axis=1)\n z = relay.add(z, t)\n # Check result.\n func = relay.Function([x, y, t], z)\n x_data = np.random.rand(10, 5).astype(dtype)\n y_data = np.random.rand(10, 5).astype(dtype)\n t_data = np.random.uniform(size=()).astype(dtype)\n ref_res = np.concatenate((x_data, y_data), axis=1) + t_data\n \n for target, ctx in ctx_list():\n if dtype == 'float16' and target == 'cuda' and not have_fp16(tvm.gpu(0).compute_version):\n continue\n intrp1 = relay.create_executor(\"graph\", ctx=ctx, target=target)\n intrp2 = relay.create_executor(\"debug\", ctx=ctx, target=target)\n op_res1 = intrp1.evaluate(func)(x_data, y_data, t_data)\n tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=0.01)\n op_res2 = intrp2.evaluate(func)(x_data, y_data, t_data)\n tvm.testing.assert_allclose(op_res2.asnumpy(), ref_res, rtol=0.01)\n\ndef test_dropout():\n for dtype in ['float16', 'float32']:\n n, t, d = tvm.size_var(\"n\"), tvm.size_var(\"t\"), tvm.size_var(\"d\")\n input_ty = relay.TensorType((n, t, d), dtype)\n x = relay.var(\"x\", input_ty)\n y = relay.nn.dropout(x, rate=0.75)\n assert \"rate=\" in y.astext()\n yy = run_infer_type(y)\n assert yy.checked_type == input_ty\n\n\ndef test_batch_norm():\n for dtype in ['float16', 'float32']:\n # beta and gamma ignored\n data = relay.var(\"data\", relay.TensorType((3, 2, 1), dtype))\n beta = relay.var(\"beta\", relay.TensorType((2,), dtype))\n gamma = relay.var(\"gamma\", relay.TensorType((2,), dtype))\n moving_mean = relay.var(\"moving_mean\", relay.TensorType((2,), dtype))\n moving_var = relay.var(\"moving_var\", relay.TensorType((2,), dtype))\n y = relay.nn.batch_norm(data, gamma, beta, moving_mean, moving_var,\n center=False, scale=False)\n yy = run_infer_type(y.astuple())\n assert \"center=\" in yy.astext()\n assert yy.checked_type == relay.ty.TupleType(tvm.convert([\n relay.TensorType((3, 2, 1), dtype),\n relay.TensorType((2,), dtype),\n relay.TensorType((2,), dtype)\n ]))\n\n beta = relay.var(\"beta\", relay.TensorType((3,), dtype))\n gamma = relay.var(\"gamma\", relay.TensorType((3,), dtype))\n moving_mean = relay.var(\"moving_mean\", relay.TensorType((3,), dtype))\n moving_var = relay.var(\"moving_var\", relay.TensorType((3,), dtype))\n\n y = relay.nn.batch_norm(data, gamma, beta, moving_mean, moving_var,\n axis=0, center=False, scale=False)\n yy = run_infer_type(y.astuple())\n assert yy.checked_type == relay.ty.TupleType(tvm.convert([\n relay.ty.TensorType((3, 2, 1), dtype),\n relay.ty.TensorType((3,), dtype),\n relay.ty.TensorType((3,), dtype)\n ]))\n\n # axis=-1\n data = relay.var(\"data\", relay.TensorType((1, 2, 3), dtype))\n beta = relay.var(\"beta\", relay.TensorType((3,), dtype))\n gamma = relay.var(\"gamma\", relay.TensorType((3,), dtype))\n moving_mean = relay.var(\"moving_mean\", relay.TensorType((3,), dtype))\n moving_var = relay.var(\"moving_var\", relay.TensorType((3,), dtype))\n y = relay.nn.batch_norm(data, gamma, beta, moving_mean, moving_var,\n axis=-1, center=False, scale=False)\n yy = run_infer_type(y.astuple())\n assert yy.checked_type == relay.ty.TupleType(tvm.convert([\n relay.ty.TensorType((1, 2, 3), dtype),\n relay.ty.TensorType((3,), dtype),\n relay.ty.TensorType((3,), dtype)\n ]))\n\[email protected]\ndef test_dense_type_check():\n dtype = 'float16'\n n, c , h, w = 2, 2 , 2 ,2\n x = relay.var(\"x\", relay.TensorType((n, c, h, w), dtype))\n # it should fail since it does not match with m(2)\n mismatch_w = 3\n w = relay.var(\"w\", relay.TensorType((2, mismatch_w), dtype))\n y = relay.nn.dense(x, w)\n yy = run_infer_type(y)\n\ndef test_dense():\n for dtype in ['float16', 'float32']:\n # Dense accuracy for float16 is poor\n if dtype == 'float16':\n return\n n, c , h, w = tvm.size_var(\"n\"), tvm.size_var(\"c\"), tvm.size_var(\"h\"), tvm.size_var(\"w\")\n x = relay.var(\"x\", relay.TensorType((n, c, h, w), dtype))\n w = relay.var(\"w\", relay.TensorType((2, w), dtype))\n y = relay.nn.dense(x, w, units=2)\n assert \"units=2\" in y.astext()\n yy = run_infer_type(y)\n assert yy.checked_type == relay.TensorType((n, c, h, 2), dtype)\n\n n, c , h, w = tvm.size_var(\"n\"), tvm.size_var(\"c\"), tvm.size_var(\"h\"), 2\n x = relay.var(\"x\", relay.TensorType((n, c, h, w), dtype))\n wh, ww = tvm.size_var(\"wh\"), tvm.size_var(\"ww\")\n w = relay.var(\"w\", relay.TensorType((ww, wh), dtype))\n y = relay.nn.dense(x, w)\n yy = run_infer_type(y)\n assert yy.checked_type == relay.TensorType((n, c, h, ww), dtype)\n\n n, c , h, w = tvm.size_var(\"n\"), tvm.size_var(\"c\"), tvm.size_var(\"h\"), 2\n x = relay.var(\"x\", relay.TensorType((n, c, h, w), dtype))\n w = relay.var(\"w\", relay.IncompleteType())\n y = relay.nn.dense(x, w, units=2)\n yy = run_infer_type(y)\n assert yy.checked_type == relay.TensorType((n, c, h, 2), dtype)\n\n x = relay.var(\"x\", shape=(10, 5), dtype=dtype)\n w = relay.var(\"w\", shape=(2, 5), dtype=dtype)\n z = relay.nn.dense(x, w)\n\n # Check result.\n func = relay.Function([x, w], z)\n x_data = np.random.rand(10, 5).astype(dtype)\n w_data = np.random.rand(2, 5).astype(dtype)\n ref_res = np.dot(x_data, w_data.T)\n\n for target, ctx in ctx_list():\n intrp1 = relay.create_executor(\"graph\", ctx=ctx, target=target)\n intrp2 = relay.create_executor(\"debug\", ctx=ctx, target=target)\n op_res1 = intrp1.evaluate(func)(x_data, w_data)\n tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5)\n op_res2 = intrp2.evaluate(func)(x_data, w_data)\n tvm.testing.assert_allclose(op_res2.asnumpy(), ref_res, rtol=1e-5)\n\n\ndef test_dense_dtype():\n data_dtype = 'uint8'\n weight_dtype = 'int8'\n out_dtype = 'uint8'\n n, c , h, w = tvm.size_var(\"n\"), tvm.size_var(\"c\"), tvm.size_var(\"h\"), tvm.size_var(\"w\")\n x = relay.var(\"x\", relay.TensorType((n, c, h, w), data_dtype))\n w = relay.var(\"w\", relay.TensorType((2, w), weight_dtype))\n y = relay.nn.dense(x, w, units=2, out_dtype=out_dtype)\n assert \"units=2\" in y.astext()\n yy = run_infer_type(y)\n assert yy.checked_type == relay.TensorType((n, c, h, 2), out_dtype)\n assert run_infer_type(yy.args[0]).checked_type.dtype == 'uint8'\n assert run_infer_type(yy.args[1]).checked_type.dtype == 'int8'\n\n\ndef test_bitserial_dense():\n m, k = tvm.size_var(\"m\"), tvm.size_var(\"k\")\n x = relay.var(\"x\", relay.TensorType((m, k), \"int16\"))\n w = relay.var(\"w\", relay.TensorType((k, 32), \"int16\"))\n y = relay.nn.bitserial_dense(x, w, units=32)\n \"units=8\" in y.astext()\n yy = run_infer_type(y)\n assert yy.checked_type == relay.TensorType((m, 32), \"int16\")\n\n\nif __name__ == \"__main__\":\n test_concatenate()\n test_bias_add()\n test_unary_op()\n test_binary_op()\n test_expand_dims_infer_type()\n test_expand_dims()\n test_softmax()\n test_log_softmax()\n test_dropout()\n test_batch_norm()\n test_dense()\n test_bitserial_dense()\n test_dense_dtype()\n",
"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\nimport tvm\nfrom ctypes import *\nimport topi\nimport tvm.ir_pass as ir_pass\nimport numpy as np\n\ntgt = \"llvm\"\n\n\ndef setup_module():\n # You must first load the library containing the datatype implementation.\n # In this case, we have built the test functions used below right into TVM.\n # CDLL(\"libmybfloat16.so\", RTLD_GLOBAL)\n\n tvm.datatype.register(\"bfloat\", 129)\n\n tvm.datatype.register_op(\n tvm.datatype.create_lower_func(\"FloatToBFloat16_wrapper\"), \"Cast\",\n \"llvm\", \"bfloat\", \"float\")\n tvm.datatype.register_op(\n tvm.datatype.create_lower_func(\"BFloat16ToFloat_wrapper\"), \"Cast\",\n \"llvm\", \"float\", \"bfloat\")\n tvm.datatype.register_op(\n tvm.datatype.create_lower_func(\"BFloat16Add_wrapper\"), \"Add\", \"llvm\",\n \"bfloat\")\n tvm.datatype.register_op(\n tvm.datatype.create_lower_func(\"FloatToBFloat16_wrapper\"), \"FloatImm\",\n \"llvm\", \"bfloat\")\n\ndef lower_datatypes_and_build(schedule, args):\n \"\"\"Create schedule and lower, manually lowering datatypes.\n\n Once datatype lowering is integrated directly into TVM's lower/build\n process, we won't need to do this manually.\n TODO(gus) integrate datatype lowering into build process; change this test\"\"\"\n flist = tvm.lower(schedule, args)\n flist = [flist]\n flist = [ir_pass.LowerCustomDatatypes(func, tgt) for func in flist]\n return tvm.build(flist[0], target=tgt)\n\ndef test_bfloat_add_and_cast_1():\n X = tvm.placeholder((3, ), name=\"X\")\n Y = tvm.placeholder((3, ), name=\"Y\")\n Z = topi.cast(\n topi.cast(X, dtype=\"custom[bfloat]16\") +\n topi.cast(Y, dtype=\"custom[bfloat]16\"),\n dtype=\"float\")\n\n s = tvm.create_schedule([Z.op])\n built_cast = lower_datatypes_and_build(s, [X,Y,Z])\n\n ctx = tvm.context(tgt, 0)\n\n # Used float32 calculator at http://www.weitz.de/ieee/. Generated float32s\n # with at most 7-bit mantissas which, when added, produce a result with at\n # most 7-bit mantissas. This is to ensure there are no errors due to\n # float32->bfloat16 conversions.\n x = tvm.nd.array(\n np.array([4.4103796E-32, 14942208.0, 1.78125]).astype(\"float32\"),\n ctx=ctx)\n y = tvm.nd.array(\n np.array([-3.330669E-14, 19660800.0, 2.25]).astype(\"float32\"), ctx=ctx)\n z_expected = np.array([-3.330669E-14, 34603008.0,\n 4.03125]).astype(\"float32\")\n z = tvm.nd.empty(Z.shape, dtype=Z.dtype, ctx=ctx)\n\n built_cast(x, y, z)\n\n assert np.array_equal(z_expected, z.asnumpy())\n\n\ndef test_bfloat_add_and_cast_2():\n X = tvm.placeholder((3, ), name=\"X\")\n Y = tvm.placeholder((3, ), name=\"Y\")\n Z = topi.cast(\n topi.cast(X, dtype=\"custom[bfloat]16\") +\n topi.cast(Y, dtype=\"custom[bfloat]16\"),\n dtype=\"float\")\n\n s = tvm.create_schedule([Z.op])\n built_cast = lower_datatypes_and_build(s, [X,Y,Z])\n\n ctx = tvm.context(tgt, 0)\n\n # Used float32 calculator at http://www.weitz.de/ieee/. Generated\n # unconstrained float32s for the operands and copied them in to x and y.\n # Then, to simulate float32->bfloat16 conversion implemented by the mybfloat\n # library, I cut off all but 7 bits of the mantissa. I then added the\n # numbers. To simulate bfloat16 add implemented in mybfloat, I cut off all\n # but 7 bits of the result's mantissa. I then copied that value into\n # z_expected.\n x = tvm.nd.array(\n np.array([1.2348297, -1.0298302E25, 1.2034023E-30]).astype(\"float32\"),\n ctx=ctx)\n y = tvm.nd.array(\n np.array([-2.4992788, -9.888288E19, 9.342338E-29]).astype(\"float32\"),\n ctx=ctx)\n z_expected = np.array([-1.25, -1.027587E25,\n 9.426888E-29]).astype(\"float32\")\n z = tvm.nd.empty(Z.shape, dtype=Z.dtype, ctx=ctx)\n\n built_cast(x, y, z)\n\n assert np.array_equal(z_expected, z.asnumpy())\n\n\ndef test_bfloat_add_and_cast_FloatImm():\n X = tvm.placeholder((3, ), name=\"X\")\n Z = topi.cast(\n topi.add(\n topi.cast(X, dtype=\"custom[bfloat]16\"),\n tvm.expr.FloatImm(\"custom[bfloat]16\", 1.5)),\n dtype=\"float\")\n\n s = tvm.create_schedule([Z.op])\n built_cast = lower_datatypes_and_build(s, [X,Z])\n\n ctx = tvm.context(tgt, 0)\n\n x = tvm.nd.array(np.array([0.0, 1.0, 1.5]).astype(\"float32\"), ctx=ctx)\n z_expected = np.array([1.5, 2.5, 3.0]).astype(\"float32\")\n z = tvm.nd.empty(Z.shape, dtype=Z.dtype, ctx=ctx)\n\n built_cast(x, z)\n\n assert np.array_equal(z_expected, z.asnumpy())\n\n\nif __name__ == \"__main__\":\n setup_module()\n test_bfloat_add_and_cast_1()\n test_bfloat_add_and_cast_2()\n test_bfloat_add_and_cast_FloatImm()\n",
"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\nimport tvm\nimport numpy as np\nfrom tvm import relay\nfrom tvm.contrib import graph_runtime\nimport topi.testing\n\ndef test_tflite_same_io_qnn_params():\n data_dtype = 'uint8'\n\n x = relay.var(\"x\", shape=(1, 4), dtype=data_dtype)\n y = relay.var(\"y\", shape=(1, 4), dtype=data_dtype)\n z = relay.qnn.op.add(lhs=x, rhs=y,\n lhs_scale=relay.const(0.00784314, 'float32'),\n lhs_zero_point=relay.const(127, 'int32'),\n rhs_scale=relay.const(0.00784314, 'float32'),\n rhs_zero_point=relay.const(127, 'int32'),\n output_scale=relay.const(0.00784314, 'float32'),\n output_zero_point=relay.const(127, 'int32'))\n\n func = relay.Function([x, y], z)\n mod = relay.Module.from_expr(func)\n mod = relay.qnn.transform.CanonicalizeOps()(mod)\n func = mod[\"main\"]\n\n x_datas = [np.array((140, 153, 165, 178)).reshape((1,4)),\n np.array((25, 153, 178, 216)).reshape((1,4)),\n np.array((25, 153, 216, 165)).reshape((1,4))]\n y_datas = [np.array((204, 178, 165, 140)).reshape((1,4)),\n np.array((204, 178, 191, 25)).reshape((1,4)),\n np.array((204, 178, 25, 191)).reshape((1,4))]\n golden_outputs = [np.array((217,204,203,191)).reshape((1, 4)),\n np.array((102, 204, 242, 114)).reshape((1,4)),\n np.array((102, 204, 114, 229)).reshape((1,4))]\n\n for i in range(0, 3):\n x_data = x_datas[i]\n y_data = y_datas[i]\n golden_output = golden_outputs[i]\n\n intrp = relay.create_executor(\"graph\", ctx=tvm.cpu(0), target=\"llvm\")\n op_res = intrp.evaluate(func)(x_data, y_data)\n np.testing.assert_equal(op_res.asnumpy(), golden_output)\n\n\ndef test_tflite_different_io_qnn_params():\n data_dtype = 'uint8'\n\n x = relay.var(\"x\", shape=(1, 4), dtype=data_dtype)\n y = relay.var(\"y\", shape=(1, 4), dtype=data_dtype)\n z = relay.qnn.op.add(lhs=x, rhs=y,\n lhs_scale=relay.const(0.0156863, 'float32'),\n lhs_zero_point=relay.const(127, 'int32'),\n rhs_scale=relay.const(0.0117647, 'float32'),\n rhs_zero_point=relay.const(85, 'int32'),\n output_scale=relay.const(0.0235294, 'float32'),\n output_zero_point=relay.const(128, 'int32'))\n\n func = relay.Function([x, y], z)\n mod = relay.Module.from_expr(func)\n mod = relay.qnn.transform.CanonicalizeOps()(mod)\n func = mod[\"main\"]\n\n x_datas = [np.array((76, 140, 153, 172)).reshape((1,4)),\n np.array((133, 140, 146, 153)).reshape((1,4)),\n np.array((76, 140, 172, 146)).reshape((1,4))]\n y_datas = [np.array((136, 119, 128, 17)).reshape((1,4)),\n np.array((136, 119, 111, 94)).reshape((1,4)),\n np.array((136, 119, 17, 128)).reshape((1,4))]\n golden_outputs = [np.array((120, 154, 167, 124)).reshape((1, 4)),\n np.array((158, 154, 154, 150)).reshape((1,4)),\n np.array((120, 154, 124, 163)).reshape((1,4))]\n\n for i in range(0, 3):\n x_data = x_datas[i]\n y_data = y_datas[i]\n golden_output = golden_outputs[i]\n\n intrp = relay.create_executor(\"graph\", ctx=tvm.cpu(0), target=\"llvm\")\n op_res = intrp.evaluate(func)(x_data, y_data)\n np.testing.assert_equal(op_res.asnumpy(), golden_output)\n\n\ndef test_saturation():\n # Same params\n data_dtype = 'uint8'\n x = relay.var(\"x\", shape=(1, 4), dtype=data_dtype)\n y = relay.var(\"y\", shape=(1, 4), dtype=data_dtype)\n z = relay.qnn.op.add(lhs=x, rhs=y,\n lhs_scale=relay.const(0.125, 'float32'),\n lhs_zero_point=relay.const(0, 'int32'),\n rhs_scale=relay.const(0.125, 'float32'),\n rhs_zero_point=relay.const(0, 'int32'),\n output_scale=relay.const(0.125, 'float32'),\n output_zero_point=relay.const(0, 'int32'))\n\n func = relay.Function([x, y], z)\n mod = relay.Module.from_expr(func)\n mod = relay.qnn.transform.CanonicalizeOps()(mod)\n func = mod[\"main\"]\n\n x_data = np.array((255, 1, 1, 0)).reshape((1,4))\n y_data = np.array((255, 255, 128, 0)).reshape((1,4))\n golden_output = np.array((255, 255, 129, 0)).reshape((1, 4))\n\n intrp = relay.create_executor(\"graph\", ctx=tvm.cpu(0), target=\"llvm\")\n op_res = intrp.evaluate(func)(x_data, y_data)\n np.testing.assert_equal(op_res.asnumpy(), golden_output)\n\n # Same params, different scale\n z = relay.qnn.op.add(lhs=x, rhs=y,\n lhs_scale=relay.const(0.125, 'float32'),\n lhs_zero_point=relay.const(0, 'int32'),\n rhs_scale=relay.const(0.125, 'float32'),\n rhs_zero_point=relay.const(0, 'int32'),\n output_scale=relay.const(0.25, 'float32'),\n output_zero_point=relay.const(0, 'int32'))\n\n func = relay.Function([x, y], z)\n mod = relay.Module.from_expr(func)\n mod = relay.qnn.transform.CanonicalizeOps()(mod)\n func = mod[\"main\"]\n\n x_data = np.array((255, 1, 1, 0)).reshape((1,4))\n y_data = np.array((255, 255, 127, 0)).reshape((1,4))\n golden_output = np.array((255, 129, 65, 0)).reshape((1, 4))\n\n intrp = relay.create_executor(\"graph\", ctx=tvm.cpu(0), target=\"llvm\")\n op_res = intrp.evaluate(func)(x_data, y_data)\n np.testing.assert_equal(op_res.asnumpy(), golden_output)\n\n # Same io params, different output scale\n z = relay.qnn.op.add(lhs=x, rhs=y,\n lhs_scale=relay.const(0.125, 'float32'),\n lhs_zero_point=relay.const(0, 'int32'),\n rhs_scale=relay.const(0.125, 'float32'),\n rhs_zero_point=relay.const(0, 'int32'),\n output_scale=relay.const(0.25, 'float32'),\n output_zero_point=relay.const(0, 'int32'))\n\n func = relay.Function([x, y], z)\n mod = relay.Module.from_expr(func)\n mod = relay.qnn.transform.CanonicalizeOps()(mod)\n func = mod[\"main\"]\n\n x_data = np.array((255, 1, 1, 0)).reshape((1,4))\n y_data = np.array((255, 255, 127, 0)).reshape((1,4))\n golden_output = np.array((255, 129, 65, 0)).reshape((1, 4))\n\n intrp = relay.create_executor(\"graph\", ctx=tvm.cpu(0), target=\"llvm\")\n op_res = intrp.evaluate(func)(x_data, y_data)\n np.testing.assert_equal(op_res.asnumpy(), golden_output)\n\n # All params different\n z = relay.qnn.op.add(lhs=x, rhs=y,\n lhs_scale=relay.const(0.5, 'float32'),\n lhs_zero_point=relay.const(0, 'int32'),\n rhs_scale=relay.const(0.25, 'float32'),\n rhs_zero_point=relay.const(0, 'int32'),\n output_scale=relay.const(0.125, 'float32'),\n output_zero_point=relay.const(0, 'int32'))\n\n func = relay.Function([x, y], z)\n mod = relay.Module.from_expr(func)\n mod = relay.qnn.transform.CanonicalizeOps()(mod)\n func = mod[\"main\"]\n\n x_data = np.array((255, 0, 1, 0)).reshape((1,4))\n y_data = np.array((0, 128, 64, 0)).reshape((1,4))\n golden_output = np.array((255, 255, 132, 0)).reshape((1, 4))\n\n intrp = relay.create_executor(\"graph\", ctx=tvm.cpu(0), target=\"llvm\")\n op_res = intrp.evaluate(func)(x_data, y_data)\n np.testing.assert_equal(op_res.asnumpy(), golden_output)\n\n\nif __name__ == '__main__':\n test_tflite_same_io_qnn_params()\n test_tflite_different_io_qnn_params()\n test_saturation()\n",
"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\nimport tvm\nfrom tvm.contrib import util\nimport numpy as np\n\ndef test_large_uint_imm():\n value = (1 << 63) + 123\n other = tvm.const(3, \"uint64\")\n n = 12\n num_thread = 2\n\n A = tvm.compute((n,), lambda *i: tvm.const(value, \"uint64\") + other, name='A')\n s = tvm.create_schedule(A.op)\n xo, xi = s[A].split(A.op.axis[0], factor=num_thread)\n s[A].bind(xi, tvm.thread_axis(\"threadIdx.x\"))\n s[A].bind(xo, tvm.thread_axis(\"blockIdx.x\"))\n\n def check_target(device):\n ctx = tvm.context(device, 0)\n if not ctx.exist:\n return\n f = tvm.build(s, [A], device)\n # launch the kernel.\n a = tvm.nd.empty((n, ), dtype=A.dtype, ctx=ctx)\n f(a)\n assert a.asnumpy()[0] == value + 3\n\n check_target(\"cuda\")\n check_target(\"vulkan\")\n\n\ndef test_add_pipeline():\n n = tvm.size_var('n')\n A = tvm.placeholder((n,), name='A')\n B = tvm.placeholder((), name='B')\n C = tvm.compute(A.shape, lambda *i: A(*i) + B(), name='C')\n D = tvm.compute(A.shape, lambda *i: C(*i) + 1, name='D')\n s = tvm.create_schedule(D.op)\n\n # GPU schedule have to split by gridIdx and threadIdx\n num_thread = 256\n xo, xi = s[C].split(C.op.axis[0], factor=num_thread)\n s[C].bind(xi, tvm.thread_axis(\"threadIdx.x\"))\n s[C].bind(xo, tvm.thread_axis(\"blockIdx.x\"))\n\n xo, xi = s[D].split(D.op.axis[0], factor=num_thread)\n s[D].bind(xi, tvm.thread_axis(\"threadIdx.x\"))\n s[D].bind(xo, tvm.thread_axis(\"blockIdx.x\"))\n\n # compile to IR\n s = s.normalize()\n bounds = tvm.schedule.InferBound(s)\n stmt = tvm.schedule.ScheduleOps(s, bounds)\n Ab = tvm.decl_buffer(A.shape, A.dtype, name='A')\n Bb = tvm.decl_buffer(B.shape, B.dtype, name='B')\n Db = tvm.decl_buffer(D.shape, D.dtype, name='D')\n stmt = tvm.ir_pass.LoopPartition(stmt, False)\n stmt = tvm.ir_pass.StorageFlatten(stmt, {A: Ab, B:Bb, D:Db}, 64)\n stmt = tvm.ir_pass.Simplify(stmt)\n fapi = tvm.ir_pass.MakeAPI(stmt, \"myadd\", [Ab, Bb, Db], 0, True)\n fsplits = [x for x in tvm.ir_pass.SplitHostDevice(fapi)]\n # lower the floordiv(use stackvm rules so it works for all targets)\n fsplits = [tvm.ir_pass.LowerIntrin(x, \"stackvm\") for x in fsplits]\n fsplits[0] = tvm.ir_pass.LowerTVMBuiltin(fsplits[0])\n\n def check_target(device, host=\"stackvm\"):\n ctx = tvm.context(device, 0)\n if not ctx.exist:\n return\n if not tvm.module.enabled(host):\n return\n mhost = tvm.codegen.build_module(fsplits[0], host)\n mdev = tvm.codegen.build_module(fsplits[1:], device)\n mhost.import_module(mdev)\n code = mdev.get_source()\n f = mhost.entry_func\n # launch the kernel.\n n = 1027\n a = tvm.nd.array(np.random.uniform(size=n).astype(Ab.dtype), ctx)\n b = tvm.nd.array(np.random.uniform(size=()).astype(Bb.dtype), ctx)\n d = tvm.nd.array(np.zeros(n, dtype=Db.dtype), ctx)\n f(a, b, d)\n tvm.testing.assert_allclose(\n d.asnumpy(), a.asnumpy() + b.asnumpy() + 1)\n\n def check_module_save(device, host=\"stackvm\"):\n ctx = tvm.context(device, 0)\n if not ctx.exist:\n return\n if not tvm.module.enabled(host):\n return\n if device == \"cuda\":\n fmt = \"ptx\"\n elif device == \"rocm\":\n fmt = \"hsaco\"\n else:\n fmt = device\n mhost = tvm.codegen.build_module(fsplits[0], host)\n mdev = tvm.codegen.build_module(fsplits[1:], device)\n temp = util.tempdir()\n mpath = temp.relpath(\"test.%s\" % fmt)\n mdev.save(mpath)\n mdev2 = tvm.module.load(mpath)\n mhost.import_module(mdev2)\n f = mhost.entry_func\n # launch the kernel.\n n = 1027\n a = tvm.nd.array(np.random.uniform(size=n).astype(Ab.dtype), ctx)\n b = tvm.nd.array(np.random.uniform(size=()).astype(Bb.dtype), ctx)\n d = tvm.nd.array(np.zeros(n, dtype=Db.dtype), ctx)\n f(a, b, d)\n tvm.testing.assert_allclose(\n d.asnumpy(), a.asnumpy() + b.asnumpy() + 1)\n\n check_target(\"cuda\", host=\"stackvm\")\n check_target(\"cuda\", host=\"llvm\")\n check_module_save(\"cuda\", host=\"stackvm\")\n check_target(\"nvptx\", host=\"llvm\")\n check_target(\"vulkan\", host=\"llvm\")\n check_module_save(\"vulkan\", host=\"stackvm\")\n check_target(\"rocm\", host=\"llvm\")\n check_module_save(\"rocm\", host=\"llvm\")\n\n\nif __name__ == \"__main__\":\n test_large_uint_imm()\n test_add_pipeline()\n",
"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n# pylint: disable=too-many-arguments,too-many-locals,too-many-statements,too-many-instance-attributes,too-many-branches,too-many-nested-blocks,invalid-name,unused-argument,unused-variable,no-member,no-value-for-parameter\n\"\"\"Base class for graph tuner.\"\"\"\nimport logging\nfrom abc import abstractmethod\n\nimport numpy as np\nimport topi\n\nimport tvm\nfrom tvm import autotvm, relay\nfrom tvm.autotvm.task import get_config\nfrom tvm.autotvm.task.topi_integration import deserialize_args, serialize_args\nfrom tvm.autotvm.record import encode, load_from_file\nfrom tvm.autotvm.measure import MeasureResult, MeasureInput\n\nfrom ... import target as _target\nfrom .utils import is_boundary_node, get_in_nodes, get_out_nodes, has_multiple_inputs, \\\n bind_inputs, expr2graph\nfrom ._base import INVALID_LAYOUT_TIME\n\n\n# Setup topi_op_name -> layout function\n# NOTE: To add more ops, change the following dictionary.\nOP2LAYOUT = {\n \"topi_nn_conv2d\": topi.nn.conv2d_infer_layout,\n \"topi_nn_depthwise_conv2d_nchw\": topi.nn.depthwise_conv2d_infer_layout,\n}\n\n\[email protected]\ndef layout_transform(*args):\n \"\"\"Autotvm layout transform template.\"\"\"\n args = deserialize_args(args)\n cfg = get_config()\n cfg.add_flop(-1)\n data = args[0]\n out = topi.layout_transform(*args)\n sch = topi.generic.schedule_injective([out])\n return sch, [data, out]\n\n\nclass BaseGraphTuner(object):\n \"\"\"Class to search schedules considering both kernel execution time and\n layout transformation time.\n\n Before creating a Graph Executor instance, schedule candidates for all kernels in\n graph should be provided through tensor-level tuning.\n \"\"\"\n def __init__(self, graph, input_shapes, records, target_ops,\n target, max_sch_num=20, dtype=\"float32\", verbose=True,\n log_file=\"graph_tuner.log\", log_level=logging.DEBUG,\n name=\"graph_tuner\"):\n \"\"\"Create a GlobalTuner instance. Local schedule searching for all nodes with\n target_op in the input graph and layout transformation benchmark need to be\n executed before initialization.\n\n graph : tvm.relay.Expr.Function\n Input graph\n\n input_shapes : dict of str to tuple.\n Input shapes of graph\n\n records : str or iterator of (MeasureInput, MeasureResult)\n Collection of kernel level tuning records.\n If it is str, then it should be the filename of a records log file.\n Each row of this file is an encoded record pair.\n Otherwise, it is an iterator.\n\n target_ops : List of str\n Target tuning operators.\n\n target : str or tvm.target\n Compilation target.\n\n max_sch_num : int, optional\n Maximum number of schedule candidates for each workload.\n\n dtype : str, optional\n Data type.\n\n log_file : str, optional\n graph tuner log file name\n\n name : str, optional\n Name of global tuner.\n \"\"\"\n self._node_list = []\n self._layout_transform_perf_records = {}\n self._layout_transform_interlayer_cost = {}\n self._input_shapes = input_shapes\n self._target_ops = [op.__name__ for op in target_ops]\n\n self._name = name\n self._max_sch_num = max_sch_num\n self._optimal_sch_dict = {}\n self._records = records\n self._dtype = dtype\n if isinstance(target, str):\n target = _target.create(target)\n self._target = target\n self._optimal_record_dict = {}\n\n # Set up logger\n self._verbose = verbose\n self._logger = logging.getLogger(name + \"_logger\")\n need_file_handler = need_console_handler = True\n for handler in self._logger.handlers:\n if handler.__class__.__name__ == 'FileHandler':\n need_file_handler = False\n if handler.__class__.__name__ == 'StreamHandler':\n need_console_handler = False\n self._log_level = log_level\n self._log_file = log_file\n self._formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')\n self._logger.setLevel(log_level)\n if need_file_handler:\n file_handler = logging.FileHandler(log_file)\n file_handler.setFormatter(self._formatter)\n self._logger.addHandler(file_handler)\n if self._verbose and need_console_handler:\n console_handler = logging.StreamHandler()\n console_handler.setFormatter(self._formatter)\n self._logger.addHandler(console_handler)\n self._logger.setLevel(log_level)\n self._logger.propagate = False\n\n # Generate workload and schedule dictionaries.\n if isinstance(graph, relay.Module):\n graph = graph[\"main\"]\n\n if isinstance(graph, relay.expr.Function):\n node_dict = {}\n graph = bind_inputs(graph, input_shapes, dtype)\n expr2graph(graph, self._target_ops, node_dict, self._node_list)\n else:\n raise RuntimeError(\"Unsupported graph type: %s\" % str(type(graph)))\n\n self._graph = graph\n self._in_nodes_dict = get_in_nodes(self._node_list, self._target_ops, input_shapes.keys())\n self._out_nodes_dict = get_out_nodes(self._in_nodes_dict)\n self._fetch_cfg()\n\n # Setup infer_layout for elemwise-like nodes\n # Note: graph tuner currently only supports tuning of single input and single output\n # op as target op, such as conv2d, dense and conv2d_transpose. In this case, we can\n # reuse infer_layout function from target ops for elemwise-like nodes. The behavior\n # is to modify the first tensor shape of input workload to the output shape of\n # elemwise-like node, and use infer_layout function from input op to generate layouts.\n input_names = self._input_shapes.keys()\n for idx in sorted(self._in_nodes_dict.keys()):\n if has_multiple_inputs(self._node_list, idx, input_names):\n node_entry = self._node_list[idx]\n node_entry[\"topi_op\"] = []\n node_entry[\"workloads\"] = []\n for input_idx in self._in_nodes_dict[idx]:\n input_node = self._node_list[input_idx]\n if not is_boundary_node(input_node, input_names):\n input_topi_op = input_node[\"topi_op\"][0]\n node_entry[\"topi_op\"].append(input_topi_op)\n # Only replace the first input tensor\n input_workload = input_node[\"workloads\"][0]\n first_tensor = input_workload[1]\n dtype = first_tensor[-1]\n new_shape = tuple([val.value for val in node_entry[\"types\"][0].shape])\n actual_workload = (input_workload[0],) + \\\n ((new_shape + (dtype,)),) + input_workload[2:]\n node_entry[\"workloads\"].append(actual_workload)\n if \"record_candidates\" not in node_entry:\n node_entry[\"record_candidates\"] = input_node[\"record_candidates\"]\n else:\n node_entry[\"topi_op\"].append(None)\n node_entry[\"workloads\"].append(None)\n\n\n def _fetch_cfg(self):\n \"\"\"Read and pre-process input schedules.\"\"\"\n if isinstance(self._records, str):\n records = load_from_file(self._records)\n else:\n records = self._records\n cfg_dict = {}\n for record in records:\n in_measure, _ = record\n workload = in_measure.task.workload\n if workload not in cfg_dict:\n cfg_dict[workload] = []\n cfg_dict[workload].append(record)\n\n cache_dict = {}\n for key in self._in_nodes_dict:\n node_entry = self._node_list[key]\n if node_entry[\"op\"] not in self._target_ops:\n continue\n workload = node_entry[\"workloads\"][0]\n if workload in cache_dict:\n node_entry[\"record_candidates\"] = cache_dict[workload]\n continue\n record_candidates = []\n infer_layout_func = OP2LAYOUT[node_entry[\"topi_op\"][0]]\n layout_tracking_dict = {}\n for record in cfg_dict[workload]:\n in_measure, out_measure = record\n workload = in_measure.task.workload\n cfg = in_measure.config\n # For multiple cfgs which produces the same in/out layouts,\n # only the most efficient one is preserved.\n with self._target:\n layouts = infer_layout_func(workload, cfg)\n if layouts in layout_tracking_dict:\n cost = out_measure.costs[0]\n current_best_cost = layout_tracking_dict[layouts][1].costs[0]\n if cost < current_best_cost:\n layout_tracking_dict[layouts] = record\n else:\n layout_tracking_dict[layouts] = record\n sorted_records = sorted(layout_tracking_dict.values(),\n key=lambda item: item[1].costs[0])\n for i in range(min(self._max_sch_num, len(sorted_records))):\n record_candidates.append(sorted_records[i])\n node_entry[\"record_candidates\"] = record_candidates\n cache_dict[workload] = record_candidates\n\n def _iterate_layout_transform(self, callback):\n \"\"\"Iterate all possible layout transformations and execute callback for each\n iteration. callback function accepts 6 arguments: from_node_idx, to_node_idx,\n from_sch_idx, to_sch_idx, args which represent the argument list of layout\n transformation and is_valid showing whether this is a valid layout transformation.\n \"\"\"\n input_names = self._input_shapes.keys()\n pair_tracker = set()\n for key, val in self._in_nodes_dict.items():\n node_entry = self._node_list[key]\n target_input_idx = -1\n target_input_pos = -1\n if has_multiple_inputs(self._node_list, key, input_names):\n for i, item in enumerate(val):\n node = self._node_list[item]\n if not is_boundary_node(node, input_names):\n target_input_idx = item\n target_input_pos = i\n break\n\n for i, item in enumerate(val):\n i_idx = item\n in_node_entry = self._node_list[i_idx]\n if is_boundary_node(in_node_entry, input_names):\n continue\n\n if node_entry[\"op\"] in self._target_ops:\n o_idx = key\n o_infer_layout_func = OP2LAYOUT[node_entry[\"topi_op\"][0]]\n o_wkl = node_entry[\"workloads\"][0]\n i_topi_op = in_node_entry[\"topi_op\"][0]\n i_wkl = in_node_entry[\"workloads\"][0]\n pivot = 0\n while not i_wkl:\n pivot += 1\n i_topi_op = in_node_entry[\"topi_op\"][pivot]\n i_wkl = in_node_entry[\"workloads\"][pivot]\n i_infer_layout_func = OP2LAYOUT[i_topi_op]\n else:\n o_idx = target_input_idx\n if i <= target_input_pos:\n continue\n o_infer_layout_func = OP2LAYOUT[node_entry[\"topi_op\"][0]]\n o_wkl = node_entry[\"workloads\"][target_input_pos]\n i_infer_layout_func = OP2LAYOUT[node_entry[\"topi_op\"][i]]\n i_wkl = node_entry[\"workloads\"][i]\n\n if (i_idx, o_idx) in pair_tracker:\n continue\n pair_tracker.add((i_idx, o_idx))\n\n for m, i_record in enumerate(in_node_entry[\"record_candidates\"]):\n for n, o_record in enumerate(node_entry[\"record_candidates\"]):\n i_cfg, o_cfg = i_record[0].config, o_record[0].config\n with self._target:\n i_input_info, i_output_info = i_infer_layout_func(i_wkl, i_cfg)\n o_input_info, o_output_info = o_infer_layout_func(o_wkl, o_cfg)\n if len(i_input_info) > 1 or len(i_output_info) > 1 or \\\n len(o_input_info) > 1 or len(o_output_info) > 1:\n raise RuntimeError(\"Graph tuner only supports target operator \"\n \"with single input and single output. \"\n \"Please check target_ops argument.\")\n\n in_shape, in_layout = i_output_info[0]\n if node_entry[\"op\"] in self._target_ops:\n _, out_layout = o_input_info[0]\n else:\n _, out_layout = o_output_info[0]\n data_placeholder = tvm.placeholder(in_shape, name=\"data\",\n dtype=self._dtype)\n args = [data_placeholder, in_layout, out_layout]\n callback(i_idx, o_idx, m, n, args)\n\n\n def _create_matrix_callback(self, from_node_idx, to_node_idx, from_sch_idx,\n to_sch_idx, args):\n \"\"\"Create dictionary containing matrix format of layout transformation\n between nodes.\"\"\"\n sargs = serialize_args(args)\n in_layout, out_layout = args[1], args[2]\n ltf_workload = ('layout_transform',) + autotvm.task.args_to_workload(sargs)\n idx_pair_key = (from_node_idx, to_node_idx)\n\n if in_layout == out_layout:\n layout_transform_time = 0\n else:\n layout_transform_time = \\\n self._layout_transform_perf_records[ltf_workload][1].costs[0]\n\n if idx_pair_key not in self._layout_transform_interlayer_cost:\n self._layout_transform_interlayer_cost[idx_pair_key] = []\n if len(self._layout_transform_interlayer_cost[idx_pair_key]) <= from_sch_idx:\n self._layout_transform_interlayer_cost[idx_pair_key].append([])\n self._layout_transform_interlayer_cost[idx_pair_key][from_sch_idx]\\\n .append(layout_transform_time)\n\n def benchmark_layout_transform(self, min_exec_num=100, timeout=10,\n use_rpc=False, device_key=None, host=\"localhost\",\n port=9190, n_parallel=1, build_func='default',\n layout_records=None, target_host=None, infer_layout=False):\n \"\"\"Benchmark all possible layout transformation in the graph,\n given a set of schedule candidates for each workload of target operator.\n\n Parameters\n ----------\n min_exec_num : int, optional\n Minimum number of execution. Final execution time is the average of\n all execution time.\n\n timeout : int, optional\n Time out for each execution.\n\n use_rpc : boolean, optional\n Whether to use rpc mode for benchmarking.\n\n device_key : str, optional\n Remote device key which can be queried by\n python -m tvm.exec.query_rpc_tracker --host=0.0.0.0 --port=9190\n\n host : str, optional\n IP address used to create RPC tracker on host machine.\n\n port : int, optional\n Port number used to create RPC tracker on host machine.\n\n n_parallel: int, optional\n The number of measurement task that can run in parallel.\n Set this according to the number of cpu cores (for compilation) and\n the number of devices you have (for measuring generate code).\n\n build_func: str or callable, optional\n 'default': call default builder. This works for normal target (llvm, cuda)\n\n 'ndk': use Android NDK to create shared library. Use this for android target.\n\n callable: customized build function for other backends (e.g. VTA).\n See autotvm/measure/measure_methods.py::default_build_func for example.\n\n layout_records : str or iterator of (MeasureInput, MeasureResult). optional\n Collection of layout_transform benchmarking records.\n If is str, then it should be the filename of a records log file.\n Each row of this file is an encoded record pair.\n Otherwise, it is an iterator.\n\n If this argument is set, graph tuner will first check whether layout_transform\n workload already exists in records and skip benchmarking if possible.\n\n target_host : str, optional\n str or :any:`tvm.target.Target` optional\n Host compilation target, if target is device.\n When TVM compiles device specific program such as CUDA,\n we also need host(CPU) side code to interact with the driver\n setup the dimensions and parameters correctly.\n target_host is used to specify the host side codegen target.\n By default, llvm is used if it is enabled,\n otherwise a stackvm intepreter is used.\n\n infer_layout : bool, optional\n Whether to infer layout transformation time if it doesn't exist in records, instead\n of benchmarking on target device.\n\n This might bring performance loss comparing to benchmarking layout transformation.\n \"\"\"\n self._logger.info(\"Start to benchmark layout transformation...\")\n if layout_records is None and infer_layout:\n raise RuntimeError(\"Requires some records to infer layout transformation time.\")\n\n if isinstance(layout_records, str):\n layout_records = load_from_file(layout_records)\n if not layout_records and infer_layout:\n raise RuntimeError(\"Records must be non-empty to infer layout transformation time.\")\n\n if isinstance(layout_records, str):\n layout_records = load_from_file(layout_records)\n num_flops, total_time = 0, 0\n if layout_records is not None:\n for record in layout_records:\n ltf_wkl = record[0].task.workload\n self._layout_transform_perf_records[ltf_wkl] = record\n input_shape = ltf_wkl[1][1]\n flops = np.prod(input_shape)\n num_flops += flops\n total_time += record[1].costs[0]\n avg_time = total_time / num_flops if num_flops > 0 else 0\n\n args_list = []\n def _fetch_args_callback(from_node_idx, to_node_idx, from_sch_idx,\n to_sch_idx, args):\n \"\"\"Callback function to fetch layout transform args\"\"\"\n _, in_layout, out_layout = args\n if in_layout != out_layout:\n args_list.append(args)\n\n self._iterate_layout_transform(_fetch_args_callback)\n\n def _log_to_list(record_list):\n \"\"\"Callback to log result to a list.\"\"\"\n def _callback(_, inputs, results):\n \"\"\"Callback implementation\"\"\"\n record_list.append((inputs[0], results[0]))\n return _callback\n\n builder = autotvm.LocalBuilder(n_parallel=n_parallel, build_func=build_func)\n runner = autotvm.LocalRunner(number=min_exec_num, repeat=1, timeout=timeout)\n if use_rpc:\n if device_key is None:\n raise RuntimeError(\"device_key need to be set to use rpc tracker mode.\")\n runner = autotvm.measure.RPCRunner(device_key, host, port, n_parallel=n_parallel,\n number=min_exec_num, repeat=1,\n timeout=timeout)\n measure_option = autotvm.measure_option(builder=builder, runner=runner)\n for args in args_list:\n data, in_layout, out_layout = args\n args = serialize_args(args)\n ltf_workload = ('layout_transform',) + autotvm.task.args_to_workload(args)\n if ltf_workload in self._layout_transform_perf_records:\n continue\n\n if infer_layout:\n input_shape = ltf_workload[1][1]\n flops = 1\n for i in input_shape:\n flops *= i\n\n # Rule out invalid layout transformations\n out = topi.layout_transform(data, in_layout, out_layout)\n out_flops = 1\n for i in topi.util.get_const_tuple(out.shape):\n out_flops *= i\n\n if flops != out_flops:\n inferred_time = INVALID_LAYOUT_TIME\n else:\n inferred_time = flops * avg_time\n\n record_input = MeasureInput(target=self._target, task=None, config=None)\n record_output = MeasureResult(costs=(inferred_time,), error_no=0,\n all_cost=-1, timestamp=-1)\n self._layout_transform_perf_records[ltf_workload] = (record_input, record_output)\n continue\n\n records = []\n task = autotvm.task.create(layout_transform, args=args, target=self._target,\n target_host=target_host)\n task.workload = ltf_workload\n tuner = autotvm.tuner.GridSearchTuner(task)\n tuner.tune(n_trial=1, measure_option=measure_option,\n callbacks=[_log_to_list(records)])\n if not isinstance(records[0][1].costs[0], float):\n records[0] = (records[0][0], records[0][1]._replace(costs=(INVALID_LAYOUT_TIME,)))\n self._layout_transform_perf_records[ltf_workload] = records[0]\n\n self._iterate_layout_transform(self._create_matrix_callback)\n self._logger.info(\"Benchmarking layout transformation successful.\")\n\n @property\n def layout_transform_perf_records(self):\n \"\"\"Get layout transformation dictionary for input graph.\n\n Returns\n -------\n layout_transform_perf_records : dict of tuple to (MeasureInput, MeasureResult)\n Layout transformation dictionary for input graph.\n \"\"\"\n return self._layout_transform_perf_records\n\n\n def get_optimal_records(self):\n \"\"\"Convert optimal record dictionary to a list of records\n with ascending order of node index in graph.\n\n Returns\n -------\n sch_list : list of tuple\n List of records with ascending order of node index in graph.\n \"\"\"\n ordered_index_list = sorted(self._optimal_record_dict.keys())\n ret = []\n for index in ordered_index_list:\n node_entry = self._node_list[index]\n if node_entry[\"op\"] not in self._target_ops:\n continue\n ret.append(node_entry[\"record_candidates\"][self._optimal_record_dict[index]])\n return ret\n\n def write_opt_sch2record_file(self, record_file=\"graph_opt_schedule.log\"):\n \"\"\"Write graph level optimal schedules into file.\n\n Parameters\n ----------\n record_file : str, optional\n Output schedule file.\n \"\"\"\n with open(record_file, \"a\") as out_file:\n records = self.get_optimal_records()\n for record in records:\n out_file.write(encode(record[0], record[1]) + \"\\n\")\n msg = \"Writing optimal schedules to %s successfully.\" % record_file\n self._logger.info(msg)\n\n @abstractmethod\n def run(self, **kwargs):\n \"\"\"Run graph tuning.\"\"\"\n pass\n"
] | [
[
"numpy.dot",
"numpy.ones_like",
"numpy.maximum",
"numpy.sqrt",
"numpy.concatenate",
"numpy.copy",
"numpy.random.rand",
"numpy.random.uniform",
"numpy.exp"
],
[
"numpy.array"
],
[
"numpy.array"
],
[
"numpy.random.uniform",
"numpy.zeros"
],
[
"numpy.prod"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.