repo_name
stringlengths
8
130
hexsha
sequence
file_path
sequence
code
sequence
apis
sequence
tkrons/SPFlow_topdownrules
[ "32233bf29d107c62f0f727b0e64aaa74b37cfe1e" ]
[ "src/spn/experiments/AQP/leaves/static/InferenceRange.py" ]
[ "\"\"\"\nCreated on June 21, 2018\n\n@author: Moritz\n\"\"\"\n\nimport numpy as np\n\nfrom spn.algorithms.Inference import add_node_likelihood\nfrom spn.experiments.AQP.leaves.static.StaticNumeric import StaticNumeric\n\n\ndef static_likelihood_range(node, ranges, dtype=np.float64, **kwargs):\n assert len(node.scope) == 1, node.scope\n\n probs = np.ones((ranges.shape[0], 1), dtype=dtype)\n ranges = ranges[:, node.scope[0]]\n\n for i, rang in enumerate(ranges):\n\n # Skip if no range is specified aka use a log-probability of 0 for that instance\n if rang is None:\n continue\n\n # Skip if no values for the range are provided\n if rang.is_impossible():\n probs[i] = 0\n\n # Compute the sum of the probability of all possible values\n probs[i] = sum([_compute_probability_for_range(node, interval) for interval in rang.get_ranges()])\n\n return probs\n\n\ndef _compute_probability_for_range(node, interval):\n\n if len(interval) == 1:\n if node.val == interval[0]:\n return 1\n else:\n return 0\n else:\n lower = interval[0]\n higher = interval[1]\n\n if lower <= node.val and node.val <= higher:\n return 1\n else:\n return 0\n\n\ndef add_static_inference_range_support():\n add_node_likelihood(StaticNumeric, static_likelihood_range)\n" ]
[ [ "numpy.ones" ] ]
kk2487/3dresnet
[ "d7161a70ed6c2f8dcbe89f9b6bad2ef6cc5b5d94" ]
[ "models/resnet_lgdv2.py" ]
[ "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nimport math\nfrom functools import partial\nimport numpy as np\n\n__all__ = [\n 'ResNet', 'resnet10', 'resnet18', 'resnet34', 'resnet50', 'resnet101',\n 'resnet152', 'resnet200'\n]\n\ndef look_bottleneck_global(glo):\n if look_bottleneck_global:\n if glo is None:\n print('first bottleneck-> no global content!')\n else:\n print('glo has content!')\n\n# Can print the model structure\ndef model_info(model, report='summary'):\n # Plots a line-by-line description of a PyTorch model\n n_p = sum(x.numel() for x in model.parameters()) # number parameters\n n_g = sum(x.numel() for x in model.parameters() if x.requires_grad) # number gradients\n if report is 'full':\n print('%5s %40s %9s %12s %20s %10s %10s' % ('layer', 'name', 'gradient', 'parameters', 'shape', 'mu', 'sigma'))\n for i, (name, p) in enumerate(model.named_parameters()):\n name = name.replace('module_list.', '')\n print('%5g %40s %9s %12g %20s %10.3g %10.3g' %\n (i, name, p.requires_grad, p.numel(), list(p.shape), p.mean(), p.std()))\n print('Model Summary: %g layers, %g parameters, %g gradients' % (len(list(model.parameters())), n_p, n_g))\n\ndef conv3x3x3(in_planes, out_planes, stride=1):\n # 3x3x3 convolution with padding\n return nn.Conv3d(\n in_planes,\n out_planes,\n kernel_size=3,\n stride=stride,\n padding=1,\n bias=False)\n\n# Implement of bottleneck with se block\nclass BottleneckX(nn.Module):\n expansion = 4\n def __init__(self, inplanes, planes, stride=1, downsample=None, first_block=False):\n super(BottleneckX, self).__init__()\n self.conv1 = nn.Conv3d(inplanes, planes, kernel_size=1, bias=False)\n self.bn1 = nn.BatchNorm3d(planes)\n #self.bn1 = nn.GroupNorm(4, planes)\n\n self.conv2 = nn.Conv3d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)\n self.bn2 = nn.BatchNorm3d(planes)\n #self.bn2 = nn.GroupNorm(4, planes)\n\n self.conv3 = nn.Conv3d(planes, planes * 4, kernel_size=1, bias=False)\n self.bn3 = nn.BatchNorm3d(planes * 4)\n #self.bn3 = nn.GroupNorm(4, planes * 4)\n\n #self.relu = nn.ReLU(inplace=True)\n self.downsample = downsample\n self.stride = stride\n # If first bottleneckX, it does not contain global path\n self.first_block = first_block\n # If downsampling occurs, set true\n self.ds = False\n #self.se_module = SEModule(planes * 4, reduction=16, first_block=self.first_block)\n self.avg_pool = nn.AdaptiveAvgPool3d(1)\n\n #Implement LGD block\n self.fc1 = nn.Conv3d(planes * 4 // 2, planes * 4, kernel_size=1, stride=1, padding=0, bias=False)\n #self.fc2 = nn.Conv3d(planes * 4 // 16, planes * 4, kernel_size=1, stride=1, padding=0, bias=False)\n self.bn4 = nn.BatchNorm3d(planes * 4)\n #self.bn4 = nn.GroupNorm(4, planes * 4)\n\n self.fc3 = nn.Conv3d(planes * 4, planes * 4 // 16, kernel_size=1, stride=1, padding=0, bias=False)\n self.fc4 = nn.Conv3d(planes * 4 // 16, planes * 4, kernel_size=1, stride=1, padding=0, bias=False)\n\n self.fc5 = nn.Conv3d(planes * 4, planes * 4 // 16, kernel_size=1, stride=1, padding=0, bias=False)\n self.fc6 = nn.Conv3d(planes * 4 // 16, planes * 4, kernel_size=1, stride=1, padding=0, bias=False)\n self.sigmoid = nn.Sigmoid()\n self.relu = nn.LeakyReLU(inplace=True)\n\n def forward(self, xx):\n # xx contains two element: input->x and global path->glo\n x = xx[0]\n glo = xx[1]\n residual = x\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n #out = self.relu(out)\n\n # If downsample, downsampleing global path & residual channels\n if self.downsample is not None:\n if glo is not None:\n glo = self.avg_pool(glo)\n glo = self.fc1(glo)\n glo = self.relu(glo)\n residual = self.downsample(x)\n #LGD block\n if glo is not None:\n glo = self.fc3(glo)\n glo = self.relu(glo)\n glo = self.fc4(glo)\n glo = self.sigmoid(glo)\n\n out = out * glo\n #out = self.relu(out)\n\n glo2 = self.avg_pool(out)\n glo2 = self.fc5(glo2)\n glo2 = self.relu(glo2)\n glo2 = self.fc6(glo2)\n glo2 = self.sigmoid(glo2)\n\n g = glo + glo2\n g = self.relu(g)\n out = out + residual\n out = self.relu(out)\n outg = [out, g]\n # Normal bottleneck\n else:\n out = out + residual\n out = self.relu(out)\n outg = [out, residual]\n\n return outg\n\n\nclass ResNet(nn.Module):\n\n def __init__(self,\n blockx,\n layers,\n sample_size,\n sample_duration,\n shortcut_type='B',\n num_classes=400):\n self.inplanes = 64\n super(ResNet, self).__init__()\n self.conv1 = nn.Conv3d(\n 3,\n 64,\n kernel_size=7,\n stride=(1, 2, 2),\n padding=(3, 3, 3),\n bias=False)\n self.bn1 = nn.BatchNorm3d(64)\n #self.bn1 = nn.GroupNorm(4, 64)\n\n self.relu = nn.LeakyReLU(inplace=True)\n self.maxpool = nn.MaxPool3d(kernel_size=(3, 3, 3), stride=2, padding=1)\n self.layer1 = self._make_layer(blockx, 64, layers[0], shortcut_type, first_block=True)\n self.layer2 = self._make_layer(blockx, 128, layers[1], shortcut_type, stride=2, first_block=False)\n self.layer3 = self._make_layer(blockx, 256, layers[2], shortcut_type, stride=2, first_block=False)\n self.layer4 = self._make_layer(blockx, 512, layers[3], shortcut_type, stride=2, first_block=False)\n last_duration = int(math.ceil(sample_duration / 16))\n last_size = int(math.ceil(sample_size / 32))\n #last_size = 4\n self.avgpool = nn.AvgPool3d(\n (last_duration, last_size, last_size), stride=1)\n self.fc = nn.Linear(512 * blockx.expansion, num_classes)\n #self.fusion = nn.Conv3d(512 * block.expansion * 2, 512 * block.expansion, kernel_size=1, stride=1, padding=0, bias=False)\n\n for m in self.modules():\n if isinstance(m, nn.Conv3d):\n m.weight = nn.init.kaiming_normal(m.weight, mode='fan_out')\n elif isinstance(m, nn.BatchNorm3d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n\n def _make_layer(self, block, planes, blocks, shortcut_type, stride=1, first_block=False):\n downsample = None\n if stride != 1 or self.inplanes != planes * block.expansion:\n if shortcut_type == 'A':\n downsample = partial(\n downsample_basic_block,\n planes=planes * block.expansion,\n stride=stride)\n else:\n downsample = nn.Sequential(\n nn.Conv3d(\n self.inplanes,\n planes * block.expansion,\n kernel_size=1,\n stride=stride,\n bias=False), nn.BatchNorm3d(planes * block.expansion))\n\n layers = []\n layers.append(block(self.inplanes, planes, stride, downsample, first_block))\n self.inplanes = planes * block.expansion\n for i in range(1, blocks):\n layers.append(block(self.inplanes, planes))\n\n return nn.Sequential(*layers)\n\n def forward(self, x):\n #print('lgd')\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n x = self.maxpool(x)\n\n lookshape = False\n # First time need to give two element to model\n xx = [x, None]\n x = self.layer1(xx)\n if lookshape:\n print('\\nlayer1-------------')\n print(np.shape(x[0]))\n print(np.shape(x[1]))\n print('--------------')\n\n x = self.layer2(x)\n if lookshape:\n print('\\nlayer2-------------')\n print(np.shape(x[0]))\n print(np.shape(x[1]))\n print('--------------')\n\n x = self.layer3(x)\n if lookshape:\n print('\\nlayer3-------------')\n print(np.shape(x[0]))\n print(np.shape(x[1]))\n print('--------------')\n\n x = self.layer4(x)\n if lookshape:\n print('\\nlayer4-------------')\n print(np.shape(x[0]))\n print(np.shape(x[1]))\n print('--------------')\n\n # After bottlenck part\n loc, g = x[0], x[1]\n #print(g)\n if lookshape:\n print('loc & g:--------')\n print(np.shape(loc))\n print(np.shape(g))\n print('----------------')\n\n\n x = self.avgpool(loc)\n\n #x = x + g\n #x = self.bn2(x)\n #x = self.relu(x)\n\n if lookshape:\n print('\\nlayer5-------------')\n print(np.shape(x))\n print('--------------')\n\n # Test local and global path feature maps fusion type below\n \n # 3d conv\n #x = torch.cat((x, g), 1)\n #x = self.fusion(x)\n #x = self.bn2(x)\n #x = self.relu(x)\n\n # concat (need to change fc layer filter number)\n #x = torch.cat((x, g), 1)\n #x = self.relu(x)\n\n x = x.view(x.size(0), -1)\n if lookshape:\n print('\\nlayer6-------------')\n print(np.shape(x))\n print('--------------')\n\n x = self.fc(x)\n\n if lookshape:\n print('\\nlayer7-------------')\n print(np.shape(x))\n print('--------------')\n\n return x\n\ndef get_fine_tuning_parameters(model, ft_begin_index):\n #if ft_begin_index == 0:\n # return model.parameters()\n print('ohraaaa')\n ft_module_names = []\n for i in range(ft_begin_index, 5):\n ft_module_names.append('layer{}'.format(i))\n ft_module_names.append('fc')\n\n # Look the content of ft_module\n print('ft: ', ft_module_names)\n\n parameters = []\n ii = 0\n\n '''\n for k, v in model.named_parameters():\n for ft_module in ft_module_names:\n if ii >= 271: #220 271\n print(ii)\n parameters.append({'params': v})\n else:\n print('notfc')\n print(ii)\n parameters.append({'params': v, 'lr': 0.0})\n #parameters.append({'params': v})\n print(k)\n ii = ii+1\n return parameters\n '''\n \n # bakup code\n for k, v in model.named_parameters():\n for ft_module in ft_module_names:\n if ft_module in k:\n #if ii >= 271:\n print('fc')\n #print(ii)\n parameters.append({'params': v})\n break\n else:\n print('notfc')\n #print(ii)\n #parameters.append({'params': v, 'lr': 0.0})\n parameters.append({'params': v})\n print(k)\n ii = ii+1\n return parameters\n \n\ndef resnet10(**kwargs):\n \"\"\"Constructs a ResNet-18 model.\n \"\"\"\n model = ResNet(BasicBlock, [1, 1, 1, 1], **kwargs)\n return model\n\n\ndef resnet18(**kwargs):\n \"\"\"Constructs a ResNet-18 model.\n \"\"\"\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n return model\n\n\ndef resnet34(**kwargs):\n \"\"\"Constructs a ResNet-34 model.\n \"\"\"\n model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)\n return model\n\n\ndef resnet50(**kwargs):\n \"\"\"Constructs a ResNet-50 model.\n \"\"\"\n model = ResNet(BottleneckX, [3, 4, 6, 3], **kwargs)\n #model = ResNet(Bottleneck, BottleneckX, [3, 4, 23, 3], **kwargs)\n #model_info(model,'full')\n return model\n\n\ndef resnet101(**kwargs):\n \"\"\"Constructs a ResNet-101 model.\n \"\"\"\n model = ResNet(BottleneckX, [3, 4, 23, 3], **kwargs)\n return model\n\n\ndef resnet152(**kwargs):\n \"\"\"Constructs a ResNet-101 model.\n \"\"\"\n model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)\n return model\n\n\ndef resnet200(**kwargs):\n \"\"\"Constructs a ResNet-101 model.\n \"\"\"\n model = ResNet(Bottleneck, [3, 24, 36, 3], **kwargs)\n return model" ]
[ [ "torch.nn.init.kaiming_normal", "torch.nn.MaxPool3d", "torch.nn.BatchNorm3d", "torch.nn.Linear", "numpy.shape", "torch.nn.Sequential", "torch.nn.AvgPool3d", "torch.nn.AdaptiveAvgPool3d", "torch.nn.Sigmoid", "torch.nn.Conv3d", "torch.nn.LeakyReLU" ] ]
yarikoptic/statsmodels
[ "844381797a475a01c05a4e162592a5a6e3a48032" ]
[ "statsmodels/tsa/tests/results/arima111nc_css_results.py" ]
[ "import numpy as np\n\nllf = np.array([-242.89663276735])\n\nnobs = np.array([ 202])\n\nk = np.array([ 3])\n\nk_exog = np.array([ 1])\n\nsigma = np.array([ .8053519404535])\n\nchi2 = np.array([ 15723.381396967])\n\ndf_model = np.array([ 2])\n\nk_ar = np.array([ 1])\n\nk_ma = np.array([ 1])\n\nparams = np.array([ .99479180506163, \n -.84461527652809, \n .64859174799221])\n\ncov_params = np.array([ .00008904968254, \n -.00023560410507, \n .00012795903324, \n -.00023560410507, \n .00131628534915, \n -.00022462340695, \n .00012795903324, \n -.00022462340695, \n .0005651128627]).reshape(3,3)\n\nxb = np.array([ 0, \n 0, \n .02869686298072, \n .05651443824172, \n .0503994859755, \n .06887971609831, \n .05940540507436, \n .08067328482866, \n .08167565613985, \n .06429278105497, \n .07087650150061, \n .06886467337608, \n .06716959923506, \n .08230647444725, \n .07099691033363, \n .08401278406382, \n .07996553182602, \n .07354256510735, \n .09366323798895, \n .08811800926924, \n .10296355187893, \n .08846370875835, \n .0852297320962, \n .08700425922871, \n .09751411527395, \n .09737934917212, \n .11228405684233, \n .1053489819169, \n .12352022528648, \n .16439816355705, \n .1643835157156, \n .19891132414341, \n .17551273107529, \n .17827558517456, \n .19562774896622, \n .21028305590153, \n .23767858743668, \n .24580039083958, \n .28269505500793, \n .29883882403374, \n .31247469782829, \n .35402658581734, \n .37410452961922, \n .39106267690659, \n .42040377855301, \n .44518512487411, \n .43608102202415, \n .44340893626213, \n .44959822297096, \n .40977239608765, \n .42118826508522, \n .40079545974731, \n .38357082009315, \n .36902260780334, \n .35673499107361, \n .36137464642525, \n .38031083345413, \n .47139286994934, \n .47323387861252, \n .60994738340378, \n .69538277387619, \n .7825602889061, \n .84117436408997, \n .9657689332962, \n 1.0109325647354, \n .95897275209427, \n .96013957262039, \n .9461076259613, \n .9342554807663, \n .83413934707642, \n .83968591690063, \n .84437066316605, \n .83330947160721, \n .8990553021431, \n .87949693202972, \n .86297762393951, \n .89407861232758, \n .93536442518234, \n 1.0303052663803, \n 1.1104937791824, \n 1.1481873989105, \n 1.2851470708847, \n 1.4458787441254, \n 1.5515991449356, \n 1.7309991121292, \n 1.8975404500961, \n 1.8579913377762, \n 1.8846583366394, \n 1.9672524929047, \n 1.9469071626663, \n 2.0048115253448, \n 1.9786299467087, \n 1.8213576078415, \n 1.6284521818161, \n 1.7508568763733, \n 1.5689061880112, \n 1.2950873374939, \n 1.2290096282959, \n 1.1882168054581, \n 1.1537625789642, \n 1.1697143316269, \n 1.1681711673737, \n 1.106795668602, \n 1.0849931240082, \n 1.006507396698, \n 1.0453414916992, \n .98803448677063, \n .95465070009232, \n 1.0165599584579, \n .67838954925537, \n .69311982393265, \n .69054269790649, \n .76345545053482, \n .84005492925644, \n .87471830844879, \n .91901183128357, \n .92638796567917, \n .96265280246735, \n 1.0083012580872, \n 1.0618740320206, \n 1.0921038389206, \n 1.2077431678772, \n 1.2303256988525, \n 1.174311041832, \n 1.3072115182877, \n 1.314337015152, \n 1.3503924608231, \n 1.5760731697083, \n 1.5264053344727, \n 1.34929728508, \n 1.304829955101, \n 1.2522557973862, \n 1.222869515419, \n 1.198047041893, \n 1.1770839691162, \n 1.1743944883347, \n 1.1571066379547, \n 1.1274864673615, \n 1.0574153661728, \n 1.058304309845, \n .99898308515549, \n .9789143204689, \n 1.0070173740387, \n 1.000718832016, \n 1.0104174613953, \n 1.0486439466476, \n 1.0058424472809, \n .98470783233643, \n 1.0119106769562, \n 1.0649236440659, \n 1.0346088409424, \n 1.0540577173233, \n 1.0704846382141, \n .97923594713211, \n .90216588973999, \n .9271782040596, \n .85819715261459, \n .75488126277924, \n .78776079416275, \n .77047789096832, \n .77089905738831, \n .8313245177269, \n .82229107618332, \n .90476810932159, \n .94439232349396, \n 1.0379292964935, \n 1.1469690799713, \n 1.1489590406418, \n 1.2257302999496, \n 1.1554099321365, \n 1.1260533332825, \n .9811190366745, \n .8436843752861, \n .95287209749222, \n .90993344783783, \n .94875508546829, \n 1.0115815401077, \n .94450175762177, \n .87282890081406, \n .91741597652435, \n .98511207103729, \n .9972335100174, \n 1.0975805521011, \n 1.1823329925537, \n 1.1487929821014, \n 1.270641207695, \n 1.2083609104156, \n 1.696394443512, \n 1.4628355503082, \n 1.4307631254196, \n 1.5087975263596, \n 1.1542117595673, \n 1.2262620925903, \n 1.3880327939987, \n 1.3853038549423, \n 1.4396153688431, \n 1.7208145856857, \n 1.678991317749, \n 2.110867023468, \n 1.524417757988, \n .57946246862411, \n .56406193971634, \n .74643105268478])\n\ny = np.array([np.nan, \n 28.979999542236, \n 29.178695678711, \n 29.40651512146, \n 29.420400619507, \n 29.608880996704, \n 29.609405517578, \n 29.830673217773, \n 29.921676635742, \n 29.874292373657, \n 29.990877151489, \n 30.048864364624, \n 30.10717010498, \n 30.292304992676, \n 30.290996551514, \n 30.464012145996, \n 30.519966125488, \n 30.553541183472, \n 30.783664703369, \n 30.838117599487, \n 31.042964935303, \n 31.038463592529, \n 31.105230331421, \n 31.207004547119, \n 31.377513885498, \n 31.477378845215, \n 31.692283630371, \n 31.755348205566, \n 32.003520965576, \n 32.444396972656, \n 32.61438369751, \n 33.048908233643, \n 33.07551574707, \n 33.278274536133, \n 33.595630645752, \n 33.91028213501, \n 34.337677001953, \n 34.645801544189, \n 35.182697296143, \n 35.598838806152, \n 36.012474060059, \n 36.654026031494, \n 37.174102783203, \n 37.691062927246, \n 38.320404052734, \n 38.94518661499, \n 39.336082458496, \n 39.843410491943, \n 40.349597930908, \n 40.509769439697, \n 41.021186828613, \n 41.300796508789, \n 41.583572387695, \n 41.869022369385, \n 42.156734466553, \n 42.561374664307, \n 43.080310821533, \n 44.171394348145, \n 44.673233032227, \n 46.209945678711, \n 47.495380401611, \n 48.882556915283, \n 50.141174316406, \n 51.965770721436, \n 53.310932159424, \n 53.958972930908, \n 54.960140228271, \n 55.84610748291, \n 56.734252929688, \n 56.934139251709, \n 57.839687347412, \n 58.744373321533, \n 59.533309936523, \n 60.899055480957, \n 61.679496765137, \n 62.46297454834, \n 63.594078063965, \n 64.83536529541, \n 66.530303955078, \n 68.210494995117, \n 69.64818572998, \n 71.885147094727, \n 74.445877075195, \n 76.751594543457, \n 79.731002807617, \n 82.797538757324, \n 84.457992553711, \n 86.584655761719, \n 89.167251586914, \n 91.046905517578, \n 93.504814147949, \n 95.378631591797, \n 96.22135925293, \n 96.628448486328, \n 99.250854492188, \n 99.668907165527, \n 99.195091247559, \n 100.0290145874, \n 100.98822021484, \n 101.95376586914, \n 103.26971435547, \n 104.46817779541, \n 105.20679473877, \n 106.1849899292, \n 106.70650482178, \n 108.0453414917, \n 108.68803405762, \n 109.45465087891, \n 110.91656494141, \n 109.37838745117, \n 110.19312286377, \n 110.89054107666, \n 112.16345977783, \n 113.54005432129, \n 114.67472076416, \n 115.91901397705, \n 116.92639160156, \n 118.16265106201, \n 119.50830078125, \n 120.96187591553, \n 122.29209899902, \n 124.30773925781, \n 125.7303237915, \n 126.57431030273, \n 128.8072052002, \n 130.21432495117, \n 131.85038757324, \n 134.97607421875, \n 136.22640991211, \n 136.44931030273, \n 137.50482177734, \n 138.45225524902, \n 139.5228729248, \n 140.59803771973, \n 141.67707824707, \n 142.87438964844, \n 143.95710754395, \n 144.92749023438, \n 145.55741882324, \n 146.65830993652, \n 147.29898071289, \n 148.17890930176, \n 149.40701293945, \n 150.40071105957, \n 151.51042175293, \n 152.84864807129, \n 153.60585021973, \n 154.48471069336, \n 155.7119140625, \n 157.16493225098, \n 158.03460693359, \n 159.25405883789, \n 160.47047424316, \n 160.87922668457, \n 161.30215454102, \n 162.42718505859, \n 162.85820007324, \n 162.95487976074, \n 163.98776245117, \n 164.67047119141, \n 165.47090148926, \n 166.73132324219, \n 167.52229309082, \n 169.00477600098, \n 170.24440002441, \n 171.93792724609, \n 173.84696960449, \n 175.04895019531, \n 176.82572937012, \n 177.55540466309, \n 178.52604675293, \n 178.58113098145, \n 178.54368591309, \n 180.25286865234, \n 180.90992736816, \n 182.14875793457, \n 183.61158752441, \n 184.14450073242, \n 184.5728302002, \n 185.81741333008, \n 187.28511047363, \n 188.39723205566, \n 190.19758605957, \n 191.98233032227, \n 192.94879150391, \n 195.07064819336, \n 195.90835571289, \n 200.89639282227, \n 200.86282348633, \n 202.13075256348, \n 204.20880126953, \n 203.05419921875, \n 204.80026245117, \n 207.3080291748, \n 208.72329711914, \n 210.57261657715, \n 214.21580505371, \n 215.67597961426, \n 220.72087097168, \n 218.41342163086, \n 212.75346374512, \n 213.23506164551, \n 215.21542358398])\n\nresid = np.array([np.nan, \n .17000007629395, \n .17130389809608, \n -.03651398047805, \n .11960058659315, \n -.05888139456511, \n .14059536159039, \n .00932686589658, \n -.11167634278536, \n .04570783302188, \n -.0108770346269, \n -.00886330008507, \n .10282856971025, \n -.07230624556541, \n .08900293707848, \n -.0240114107728, \n -.03996651992202, \n .13645842671394, \n -.03366377204657, \n .10188252478838, \n -.09296332299709, \n -.01846401393414, \n .01477065030485, \n .0729955881834, \n .00248436117545, \n .10262141376734, \n -.04228436201811, \n .12465056031942, \n .27647939324379, \n .00560382334515, \n .23561419546604, \n -.1489082723856, \n .02448422275484, \n .12172746658325, \n .10437148809433, \n .18971465528011, \n .06232447177172, \n .25419962406158, \n .11730266362429, \n .10116269439459, \n .2875237762928, \n .14597341418266, \n .12589547038078, \n .20893961191177, \n .17959471046925, \n -.04518361017108, \n .06391899287701, \n .05659105628729, \n -.24960128962994, \n .09022761881351, \n -.12118522822857, \n -.10079623758793, \n -.08357158303261, \n -.06902338564396, \n .04326653853059, \n .13862533867359, \n .61968916654587, \n .02860714122653, \n .92676383256912, \n .59005337953568, \n .60461646318436, \n .41744044423103, \n .85882639884949, \n .33423033356667, \n -.31093180179596, \n .04102724045515, \n -.06013804674149, \n -.04610994458199, \n -.63425624370575, \n .06586220860481, \n .06031560897827, \n -.04437142238021, \n .46668976545334, \n -.09905604273081, \n -.07949769496918, \n .23702463507652, \n .30592212080956, \n .66463404893875, \n .56969320774078, \n .28950771689415, \n .95181107521057, \n 1.1148544549942, \n .75411820411682, \n 1.2484039068222, \n 1.1690024137497, \n -.1975435167551, \n .24200716614723, \n .6153416633606, \n -.06725100427866, \n .45309436321259, \n -.10480991750956, \n -.97863000631332, \n -1.2213591337204, \n .8715478181839, \n -1.1508584022522, \n -1.7689031362534, \n -.39508575201035, \n -.22900961339474, \n -.18821682035923, \n .14623281359673, \n .03029025532305, \n -.36817568540573, \n -.10679569840431, \n -.48499462008476, \n .29349562525749, \n -.34534454345703, \n -.18803144991398, \n .44535079598427, \n -2.2165644168854, \n .12161350995302, \n .00687709869817, \n .50946187973022, \n .53653997182846, \n .25995117425919, \n .32527860999107, \n .08098815381527, \n .27360898256302, \n .33735024929047, \n .39170032739639, \n .23812144994736, \n .80789774656296, \n .19225835800171, \n -.33032417297363, \n .92568749189377, \n .09278241544962, \n .28566908836365, \n 1.5496014356613, \n -.27607008814812, \n -1.1263961791992, \n -.24930645525455, \n -.30482992529869, \n -.15224970877171, \n -.12287864089012, \n -.09804095327854, \n .02291300706565, \n -.07438835501671, \n -.15710659325123, \n -.42748948931694, \n .04259072244167, \n -.35830733180046, \n -.09898918122053, \n .22108262777328, \n -.00701736938208, \n .0992873236537, \n .28958559036255, \n -.24864092469215, \n -.10584850609303, \n .21528913080692, \n .38809850811958, \n -.16492980718613, \n .16538816690445, \n .1459391862154, \n -.57048463821411, \n -.47923597693443, \n .19784018397331, \n -.4271782040596, \n -.65820020437241, \n .24511873722076, \n -.0877638310194, \n .02952514961362, \n .42909786105156, \n -.03132146969438, \n .57771807909012, \n .29522883892059, \n .6555985212326, \n .76207375526428, \n .05302781611681, \n .55105316638947, \n -.42574247717857, \n -.15540990233421, \n -.92604118585587, \n -.88112819194794, \n .75632172822952, \n -.25287514925003, \n .29006350040436, \n .45125409960747, \n -.41159069538116, \n -.44450175762177, \n .32716807723045, \n .48259317874908, \n .11487878113985, \n .70277869701385, \n .60241633653641, \n -.18233296275139, \n .85120695829391, \n -.37064728140831, \n 3.2916390895844, \n -1.4963974952698, \n -.16283248364925, \n .56923681497574, \n -2.3088004589081, \n .51979947090149, \n 1.1197309494019, \n .02996650896966, \n .40969428420067, \n 1.9223841428757, \n -.21881568431854, \n 2.9340152740479, \n -3.8318600654602, \n -6.239429473877, \n -.08245316892862, \n 1.2339268922806, \n 1.1695692539215])\n\nyr = np.array([np.nan, \n .17000007629395, \n .17130389809608, \n -.03651398047805, \n .11960058659315, \n -.05888139456511, \n .14059536159039, \n .00932686589658, \n -.11167634278536, \n .04570783302188, \n -.0108770346269, \n -.00886330008507, \n .10282856971025, \n -.07230624556541, \n .08900293707848, \n -.0240114107728, \n -.03996651992202, \n .13645842671394, \n -.03366377204657, \n .10188252478838, \n -.09296332299709, \n -.01846401393414, \n .01477065030485, \n .0729955881834, \n .00248436117545, \n .10262141376734, \n -.04228436201811, \n .12465056031942, \n .27647939324379, \n .00560382334515, \n .23561419546604, \n -.1489082723856, \n .02448422275484, \n .12172746658325, \n .10437148809433, \n .18971465528011, \n .06232447177172, \n .25419962406158, \n .11730266362429, \n .10116269439459, \n .2875237762928, \n .14597341418266, \n .12589547038078, \n .20893961191177, \n .17959471046925, \n -.04518361017108, \n .06391899287701, \n .05659105628729, \n -.24960128962994, \n .09022761881351, \n -.12118522822857, \n -.10079623758793, \n -.08357158303261, \n -.06902338564396, \n .04326653853059, \n .13862533867359, \n .61968916654587, \n .02860714122653, \n .92676383256912, \n .59005337953568, \n .60461646318436, \n .41744044423103, \n .85882639884949, \n .33423033356667, \n -.31093180179596, \n .04102724045515, \n -.06013804674149, \n -.04610994458199, \n -.63425624370575, \n .06586220860481, \n .06031560897827, \n -.04437142238021, \n .46668976545334, \n -.09905604273081, \n -.07949769496918, \n .23702463507652, \n .30592212080956, \n .66463404893875, \n .56969320774078, \n .28950771689415, \n .95181107521057, \n 1.1148544549942, \n .75411820411682, \n 1.2484039068222, \n 1.1690024137497, \n -.1975435167551, \n .24200716614723, \n .6153416633606, \n -.06725100427866, \n .45309436321259, \n -.10480991750956, \n -.97863000631332, \n -1.2213591337204, \n .8715478181839, \n -1.1508584022522, \n -1.7689031362534, \n -.39508575201035, \n -.22900961339474, \n -.18821682035923, \n .14623281359673, \n .03029025532305, \n -.36817568540573, \n -.10679569840431, \n -.48499462008476, \n .29349562525749, \n -.34534454345703, \n -.18803144991398, \n .44535079598427, \n -2.2165644168854, \n .12161350995302, \n .00687709869817, \n .50946187973022, \n .53653997182846, \n .25995117425919, \n .32527860999107, \n .08098815381527, \n .27360898256302, \n .33735024929047, \n .39170032739639, \n .23812144994736, \n .80789774656296, \n .19225835800171, \n -.33032417297363, \n .92568749189377, \n .09278241544962, \n .28566908836365, \n 1.5496014356613, \n -.27607008814812, \n -1.1263961791992, \n -.24930645525455, \n -.30482992529869, \n -.15224970877171, \n -.12287864089012, \n -.09804095327854, \n .02291300706565, \n -.07438835501671, \n -.15710659325123, \n -.42748948931694, \n .04259072244167, \n -.35830733180046, \n -.09898918122053, \n .22108262777328, \n -.00701736938208, \n .0992873236537, \n .28958559036255, \n -.24864092469215, \n -.10584850609303, \n .21528913080692, \n .38809850811958, \n -.16492980718613, \n .16538816690445, \n .1459391862154, \n -.57048463821411, \n -.47923597693443, \n .19784018397331, \n -.4271782040596, \n -.65820020437241, \n .24511873722076, \n -.0877638310194, \n .02952514961362, \n .42909786105156, \n -.03132146969438, \n .57771807909012, \n .29522883892059, \n .6555985212326, \n .76207375526428, \n .05302781611681, \n .55105316638947, \n -.42574247717857, \n -.15540990233421, \n -.92604118585587, \n -.88112819194794, \n .75632172822952, \n -.25287514925003, \n .29006350040436, \n .45125409960747, \n -.41159069538116, \n -.44450175762177, \n .32716807723045, \n .48259317874908, \n .11487878113985, \n .70277869701385, \n .60241633653641, \n -.18233296275139, \n .85120695829391, \n -.37064728140831, \n 3.2916390895844, \n -1.4963974952698, \n -.16283248364925, \n .56923681497574, \n -2.3088004589081, \n .51979947090149, \n 1.1197309494019, \n .02996650896966, \n .40969428420067, \n 1.9223841428757, \n -.21881568431854, \n 2.9340152740479, \n -3.8318600654602, \n -6.239429473877, \n -.08245316892862, \n 1.2339268922806, \n 1.1695692539215])\n\nmse = np.array([ 1.1112809181213, \n .6632194519043, \n .65879660844803, \n .65575885772705, \n .65364873409271, \n .65217137336731, \n .65113133192062, \n .6503963470459, \n .64987552165985, \n .64950579404831, \n .64924287796021, \n .64905577898026, \n .64892256259918, \n .64882761240005, \n .64875996112823, \n .64871168136597, \n .64867728948593, \n .64865279197693, \n .64863526821136, \n .64862281084061, \n .64861387014389, \n .64860755205154, \n .64860302209854, \n .64859980344772, \n .64859747886658, \n .64859586954117, \n .64859467744827, \n .64859384298325, \n .6485932469368, \n .64859282970428, \n .64859253168106, \n .64859229326248, \n .64859211444855, \n .64859199523926, \n .64859193563461, \n .64859187602997, \n .64859187602997, \n .64859181642532, \n .64859181642532, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068, \n .64859175682068])\n\nstdp = np.array([ 0, \n 0, \n .02869686298072, \n .05651443824172, \n .0503994859755, \n .06887971609831, \n .05940540507436, \n .08067328482866, \n .08167565613985, \n .06429278105497, \n .07087650150061, \n .06886467337608, \n .06716959923506, \n .08230647444725, \n .07099691033363, \n .08401278406382, \n .07996553182602, \n .07354256510735, \n .09366323798895, \n .08811800926924, \n .10296355187893, \n .08846370875835, \n .0852297320962, \n .08700425922871, \n .09751411527395, \n .09737934917212, \n .11228405684233, \n .1053489819169, \n .12352022528648, \n .16439816355705, \n .1643835157156, \n .19891132414341, \n .17551273107529, \n .17827558517456, \n .19562774896622, \n .21028305590153, \n .23767858743668, \n .24580039083958, \n .28269505500793, \n .29883882403374, \n .31247469782829, \n .35402658581734, \n .37410452961922, \n .39106267690659, \n .42040377855301, \n .44518512487411, \n .43608102202415, \n .44340893626213, \n .44959822297096, \n .40977239608765, \n .42118826508522, \n .40079545974731, \n .38357082009315, \n .36902260780334, \n .35673499107361, \n .36137464642525, \n .38031083345413, \n .47139286994934, \n .47323387861252, \n .60994738340378, \n .69538277387619, \n .7825602889061, \n .84117436408997, \n .9657689332962, \n 1.0109325647354, \n .95897275209427, \n .96013957262039, \n .9461076259613, \n .9342554807663, \n .83413934707642, \n .83968591690063, \n .84437066316605, \n .83330947160721, \n .8990553021431, \n .87949693202972, \n .86297762393951, \n .89407861232758, \n .93536442518234, \n 1.0303052663803, \n 1.1104937791824, \n 1.1481873989105, \n 1.2851470708847, \n 1.4458787441254, \n 1.5515991449356, \n 1.7309991121292, \n 1.8975404500961, \n 1.8579913377762, \n 1.8846583366394, \n 1.9672524929047, \n 1.9469071626663, \n 2.0048115253448, \n 1.9786299467087, \n 1.8213576078415, \n 1.6284521818161, \n 1.7508568763733, \n 1.5689061880112, \n 1.2950873374939, \n 1.2290096282959, \n 1.1882168054581, \n 1.1537625789642, \n 1.1697143316269, \n 1.1681711673737, \n 1.106795668602, \n 1.0849931240082, \n 1.006507396698, \n 1.0453414916992, \n .98803448677063, \n .95465070009232, \n 1.0165599584579, \n .67838954925537, \n .69311982393265, \n .69054269790649, \n .76345545053482, \n .84005492925644, \n .87471830844879, \n .91901183128357, \n .92638796567917, \n .96265280246735, \n 1.0083012580872, \n 1.0618740320206, \n 1.0921038389206, \n 1.2077431678772, \n 1.2303256988525, \n 1.174311041832, \n 1.3072115182877, \n 1.314337015152, \n 1.3503924608231, \n 1.5760731697083, \n 1.5264053344727, \n 1.34929728508, \n 1.304829955101, \n 1.2522557973862, \n 1.222869515419, \n 1.198047041893, \n 1.1770839691162, \n 1.1743944883347, \n 1.1571066379547, \n 1.1274864673615, \n 1.0574153661728, \n 1.058304309845, \n .99898308515549, \n .9789143204689, \n 1.0070173740387, \n 1.000718832016, \n 1.0104174613953, \n 1.0486439466476, \n 1.0058424472809, \n .98470783233643, \n 1.0119106769562, \n 1.0649236440659, \n 1.0346088409424, \n 1.0540577173233, \n 1.0704846382141, \n .97923594713211, \n .90216588973999, \n .9271782040596, \n .85819715261459, \n .75488126277924, \n .78776079416275, \n .77047789096832, \n .77089905738831, \n .8313245177269, \n .82229107618332, \n .90476810932159, \n .94439232349396, \n 1.0379292964935, \n 1.1469690799713, \n 1.1489590406418, \n 1.2257302999496, \n 1.1554099321365, \n 1.1260533332825, \n .9811190366745, \n .8436843752861, \n .95287209749222, \n .90993344783783, \n .94875508546829, \n 1.0115815401077, \n .94450175762177, \n .87282890081406, \n .91741597652435, \n .98511207103729, \n .9972335100174, \n 1.0975805521011, \n 1.1823329925537, \n 1.1487929821014, \n 1.270641207695, \n 1.2083609104156, \n 1.696394443512, \n 1.4628355503082, \n 1.4307631254196, \n 1.5087975263596, \n 1.1542117595673, \n 1.2262620925903, \n 1.3880327939987, \n 1.3853038549423, \n 1.4396153688431, \n 1.7208145856857, \n 1.678991317749, \n 2.110867023468, \n 1.524417757988, \n .57946246862411, \n .56406193971634, \n .74643105268478])\n\nicstats = np.array([ 202, \n np.nan, \n -242.89663276735, \n 3, \n 491.79326553469, \n 501.7180686269])\n\nclass Bunch(dict):\n def __init__(self, **kw):\n dict.__init__(self, kw)\n self.__dict__ = self\n\n\nresults = Bunch(llf=llf, nobs=nobs, k=k, k_exog=k_exog, sigma=sigma, chi2=chi2, df_model=df_model, k_ar=k_ar, k_ma=k_ma, params=params, cov_params=cov_params, xb=xb, y=y, resid=resid, yr=yr, mse=mse, stdp=stdp, icstats=icstats, )\n\n" ]
[ [ "numpy.array" ] ]
LingxiaoShawn/pytorch_geometric
[ "50b7bfc4a59b5b6f7ec547ff862985f3b2e22798", "50b7bfc4a59b5b6f7ec547ff862985f3b2e22798" ]
[ "test/nn/conv/test_film_conv.py", "torch_geometric/transforms/remove_isolated_nodes.py" ]
[ "import torch\nfrom torch_sparse import SparseTensor\n\nfrom torch_geometric.nn import FiLMConv\n\n\ndef test_film_conv():\n x1 = torch.randn(4, 4)\n x2 = torch.randn(2, 16)\n edge_index = torch.tensor([[0, 1, 1, 2, 2, 3], [0, 0, 1, 0, 1, 1]])\n edge_type = torch.tensor([0, 1, 1, 0, 0, 1])\n row, col = edge_index\n adj = SparseTensor(row=row, col=col, value=edge_type, sparse_sizes=(4, 4))\n\n conv = FiLMConv(4, 32)\n assert conv.__repr__() == 'FiLMConv(4, 32, num_relations=1)'\n out1 = conv(x1, edge_index)\n assert out1.size() == (4, 32)\n assert conv(x1, adj.t().set_value(None)).tolist() == out1.tolist()\n\n t = '(Tensor, Tensor, OptTensor) -> Tensor'\n jit = torch.jit.script(conv.jittable(t))\n assert jit(x1, edge_index).tolist() == out1.tolist()\n t = '(Tensor, SparseTensor, OptTensor) -> Tensor'\n jit = torch.jit.script(conv.jittable(t))\n assert jit(x1, adj.t().set_value(None)).tolist() == out1.tolist()\n\n conv = FiLMConv(4, 32, num_relations=2)\n assert conv.__repr__() == 'FiLMConv(4, 32, num_relations=2)'\n out1 = conv(x1, edge_index, edge_type)\n assert out1.size() == (4, 32)\n assert conv(x1, adj.t()).tolist() == out1.tolist()\n\n t = '(Tensor, Tensor, OptTensor) -> Tensor'\n jit = torch.jit.script(conv.jittable(t))\n assert jit(x1, edge_index, edge_type).tolist() == out1.tolist()\n t = '(Tensor, SparseTensor, OptTensor) -> Tensor'\n jit = torch.jit.script(conv.jittable(t))\n assert jit(x1, adj.t()).tolist() == out1.tolist()\n\n adj = adj.sparse_resize((4, 2))\n\n conv = FiLMConv((4, 16), 32)\n assert conv.__repr__() == 'FiLMConv((4, 16), 32, num_relations=1)'\n out1 = conv((x1, x2), edge_index)\n assert out1.size() == (2, 32)\n assert conv((x1, x2), adj.t().set_value(None)).tolist() == out1.tolist()\n\n t = '(PairTensor, Tensor, OptTensor) -> Tensor'\n jit = torch.jit.script(conv.jittable(t))\n assert jit((x1, x2), edge_index).tolist() == out1.tolist()\n t = '(PairTensor, SparseTensor, OptTensor) -> Tensor'\n jit = torch.jit.script(conv.jittable(t))\n assert jit((x1, x2), adj.t().set_value(None)).tolist() == out1.tolist()\n\n conv = FiLMConv((4, 16), 32, num_relations=2)\n assert conv.__repr__() == 'FiLMConv((4, 16), 32, num_relations=2)'\n out1 = conv((x1, x2), edge_index, edge_type)\n assert out1.size() == (2, 32)\n assert conv((x1, x2), adj.t()).tolist() == out1.tolist()\n\n t = '(PairTensor, Tensor, OptTensor) -> Tensor'\n jit = torch.jit.script(conv.jittable(t))\n assert jit((x1, x2), edge_index, edge_type).tolist() == out1.tolist()\n t = '(PairTensor, SparseTensor, OptTensor) -> Tensor'\n jit = torch.jit.script(conv.jittable(t))\n assert jit((x1, x2), adj.t()).tolist() == out1.tolist()\n", "import re\n\nimport torch\n\nfrom torch_geometric.transforms import BaseTransform\nfrom torch_geometric.utils import remove_isolated_nodes\n\n\nclass RemoveIsolatedNodes(BaseTransform):\n r\"\"\"Removes isolated nodes from the graph.\"\"\"\n def __call__(self, data):\n num_nodes = data.num_nodes\n out = remove_isolated_nodes(data.edge_index, data.edge_attr, num_nodes)\n data.edge_index, data.edge_attr, mask = out\n\n if hasattr(data, '__num_nodes__'):\n data.num_nodes = int(mask.sum())\n\n for key, item in data:\n if bool(re.search('edge', key)):\n continue\n if torch.is_tensor(item) and item.size(0) == num_nodes:\n data[key] = item[mask]\n\n return data\n" ]
[ [ "torch.randn", "torch.tensor" ], [ "torch.is_tensor" ] ]
LucilleN/MH-Stigma-in-Masked-LMs
[ "9e30817cb32358a8e6004faf7ef88ad69d6148f6" ]
[ "main/pronouns.py" ]
[ "# Comparing He vs She\nfrom transformers import pipeline\nimport pandas as pd\nfrom utils import diagnoses, get_target_probability, get_top_k, print_stats, plot_male_and_female\nimport matplotlib.pylab as plt\nimport seaborn as sns\nimport argparse\n\ntemplates = [\n \"<mask> has [diagnosis]\",\n \"<mask> is diagnosed with [diagnosis]\",\n \"<mask> struggles with [diagnosis]\",\n \"<mask> suffers from [diagnosis]\",\n \"<mask> is in recovery from [diagnosis]\",\n \"<mask> is in treatment for [diagnosis]\",\n \"<mask> is being treated for [diagnosis]\",\n \"<mask> sees a psychiatrist for [diagnosis]\",\n \"<mask> sees a therapist for [diagnosis]\",\n \"<mask> is going to therapy for [diagnosis]\",\n \"<mask> takes medication for [diagnosis]\"\n]\n# templates = [\n# \"[MASK] has [diagnosis]\",\n# \"[MASK] is diagnosed with [diagnosis]\",\n# \"[MASK] struggles with [diagnosis]\",\n# \"[MASK] suffers from [diagnosis]\",\n# \"[MASK] is in recovery from [diagnosis]\",\n# \"[MASK] is in treatment for [diagnosis]\",\n# \"[MASK] is being treated for [diagnosis]\",\n# \"[MASK] sees a psychiatrist for [diagnosis]\",\n# \"[MASK] sees a therapist for [diagnosis]\",\n# \"[MASK] is going to therapy for [diagnosis]\",\n# \"[MASK] takes medication for [diagnosis]\"\n# ]\n\n# Aggregates for the following She/He experiments\nTOP_K = 20\nfemale_untreated, female_treated, female_total = {}, {}, {}\nmale_untreated, male_treated, male_total = {}, {}, {}\nall_df = None\n\ndef update_aggregates(male_mean, female_mean, template, treated=False):\n female_total[template] = female_mean\n male_total[template] = male_mean\n\n if treated:\n female_treated[template] = female_mean\n male_treated[template] = male_mean\n else:\n female_untreated[template] = female_mean\n male_untreated[template] = male_mean\n\n\ndef add_to_df(male, female, template): # , current_df=None):\n global all_df\n new_add = pd.DataFrame({'probability': male+female, 'gender': ['male']*11+[\n 'female']*11, 'diagnosis': diagnoses*2, 'prompt': [template]*22})\n all_df = new_add if (all_df is None) else pd.concat([all_df, new_add])\n\n\n\ndef run_experiment(template):\n male_mask = \"He\"\n female_mask = \"She\"\n\n print(f\"TOP {TOP_K} OUTPUTS FOR THE TEMPLATE {template}\")\n print(get_top_k(template, nlp_fill, TOP_K))\n\n female_outputs = get_target_probability(template, female_mask, nlp_fill)\n female_scores = [element['score'] for element in female_outputs]\n\n print(\"FEMALE SCORES:\")\n print(female_scores)\n\n male_outputs = get_target_probability(template, male_mask, nlp_fill)\n male_scores = [element['score'] for element in male_outputs]\n\n male_mean, female_mean = print_stats(male=male_scores, female=female_scores)\n\n if args.scatter_plot:\n update_aggregates(male_mean, female_mean, template, treated=False)\n plot_male_and_female(template, male_mask, female_mask, male_scores, female_scores)\n\n if args.box_plot:\n add_to_df(male_scores, female_scores, template)\n\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(\n usage=\"To run all experiments, execute this script without any additional arguments. \\\n To specify specific experiments, and to turn on outputting graphs, use the options below.\")\n\n parser.add_argument(\"-exp0\", \"--has\",\n help=\"Run experiment 0: She/He has X.\", action=\"store_true\")\n parser.add_argument(\"-exp1\", \"--is_diagnosed_with\",\n help=\"Run experiment 1: She/He is diagnosed with X.\", action=\"store_true\")\n parser.add_argument(\"-exp2\", \"--struggles_with\",\n help=\"Run experiment 2: She/He struggles with X.\", action=\"store_true\")\n parser.add_argument(\"-exp3\", \"--suffers_from\",\n help=\"Run experiment 3: She/He suffers from X.\", action=\"store_true\")\n parser.add_argument(\"-exp4\", \"--is_in_recovery_from\",\n help=\"Run experiment 4: She/He is in recovery from X.\", action=\"store_true\")\n parser.add_argument(\"-exp5\", \"--is_in_treatment_for\",\n help=\"Run experiment 5: She/He is in treatment for X.\", action=\"store_true\")\n parser.add_argument(\"-exp6\", \"--is_being_treated_for\",\n help=\"Run experiment 6: She/He is being treated for X.\", action=\"store_true\")\n parser.add_argument(\"-exp7\", \"--sees_a_psychiatrist_for\",\n help=\"Run experiment 7: She/He sees a psychiatrist for X.\", action=\"store_true\")\n parser.add_argument(\"-exp8\", \"--sees_a_therapist_for\",\n help=\"Run experiment 8: She/He sees a therapist for X.\", action=\"store_true\")\n parser.add_argument(\"-exp9\", \"--is_going_to_therapy_for\",\n help=\"Run experiment 9: She/He is going to therapy for X.\", action=\"store_true\")\n parser.add_argument(\"-exp10\", \"--takes_medication_for\",\n help=\"Run experiment 10: She/He takes medication for X.\", action=\"store_true\")\n parser.add_argument(\"-bp\", \"--box_plot\",\n help=\"Generate a box and whisker plot to summarize all the experiments that were run.\", action=\"store_true\")\n parser.add_argument(\"-sp\", \"--scatter_plot\",\n help=\"Generate a scatter plot for each experiment that was run.\", action=\"store_true\")\n\n args = parser.parse_args()\n\n exps_to_run = []\n i = 0\n for arg in vars(args):\n if getattr(args, arg):\n exps_to_run.append(i)\n i += 1\n if i == 10:\n break\n if len(exps_to_run) == 0:\n exps_to_run = list(range(11))\n\n nlp_fill = pipeline('fill-mask', top_k=TOP_K, model=\"roberta-large\")\n # nlp_fill = pipeline('fill-mask', model=\"mental/mental-roberta-base\")\n # nlp_fill = pipeline('fill-mask', model=\"emilyalsentzer/Bio_ClinicalBERT\")\n # nlp_fill = pipeline('fill-mask', model=\"yikuan8/Clinical-Longformer\")\n # nlp_fill = pipeline('fill-mask', model=\"Tsubasaz/clinical-pubmed-bert-base-512\")\n # nlp_fill = pipeline('fill-mask', model=\"nlp4good/psych-search\")\n\n\n for exp_number in exps_to_run:\n print(f'running experiment {exp_number}')\n template = templates[exp_number]\n run_experiment(template)\n\n if args.scatter_plot:\n female_total_sum = sum_dictionary(female_total)\n female_untreated_sum = sum_dictionary(female_untreated)\n female_treated_sum = sum_dictionary(female_treated)\n\n male_total_sum = sum_dictionary(male_total)\n male_untreated_sum = sum_dictionary(male_untreated)\n male_treated_sum = sum_dictionary(male_treated)\n\n print(\n f\"FEMALE: total={female_total_sum}, untreated={female_untreated_sum}, treated={female_treated_sum}\")\n print(\n f\"MALE: total={male_total_sum}, untreated={male_untreated_sum}, treated={male_treated_sum}\")\n\n if args.box_plot:\n ax = sns.boxplot(x=\"prompt\", y=\"probability\", hue=\"gender\",\n data=all_df, width=0.3, showfliers=False)\n sns.despine(offset=10)\n sns.set(rc={'figure.figsize': (18, 6)}, font_scale=1.2)\n\n plt.xticks(rotation=45, ha='right', fontsize=12)\n ax.set_ylim([0, 0.6])\n plt.title(\"Probabilities of predicting gendered pronouns\")\n plt.savefig(\"../plots/boxplot_pronouns_roberta.pdf\", bbox_inches=\"tight\")\n # plt.savefig(\"../plots/boxplot_pronouns_mentalroberta.pdf\", bbox_inches=\"tight\")\n # plt.savefig(\"../plots/boxplot_pronouns_clinicalbert.pdf\", bbox_inches=\"tight\")\n # plt.savefig(\"../plots/boxplot_pronouns_clinicallongformer.pdf\", bbox_inches=\"tight\")\n # plt.savefig(\"../plots/boxplot_pronouns_clinicalpubmedbert.pdf\", bbox_inches=\"tight\")\n # plt.savefig(\"../plots/boxplot_pronouns_psychsearch.pdf\", bbox_inches=\"tight\")" ]
[ [ "matplotlib.pylab.savefig", "matplotlib.pylab.title", "pandas.DataFrame", "matplotlib.pylab.xticks", "pandas.concat" ] ]
cww97/visual-language-grasping
[ "f96404c9997ef55ede07293ce319ca19a39ae5ec" ]
[ "envs/simulation/robot.py" ]
[ "import os\nimport time\n\nimport numpy as np\nimport yaml\n\nimport utils\nfrom . import vrep\nfrom ..robot import Robot as BaseRobot\nfrom ..robot import Reward\nfrom ..data import Data as TextData\nimport random\nfrom bisect import bisect_right\nimport cv2\nimport os\n\n\nclass SimRobot(BaseRobot):\n\tdef __init__(self, obj_mesh_dir, num_obj, *args):\n\t\tBaseRobot.__init__(self, *args)\n\t\tself.text_data = TextData()\n\n\t\t# Define colors for object meshes (Tableau palette)\n\t\tself.color_name = ['blue', 'green', 'brown', 'orange', 'yellow', 'gray', 'red', 'purple', 'cyan', 'pink']\n\t\tself.color_space = np.asarray([[78.0, 121.0, 167.0], # blue\n\t\t\t\t\t\t\t\t\t\t[89.0, 161.0, 79.0], # green\n\t\t\t\t\t\t\t\t\t\t[156, 117, 95], # brown\n\t\t\t\t\t\t\t\t\t\t[242, 142, 43], # orange\n\t\t\t\t\t\t\t\t\t\t[237.0, 201.0, 72.0], # yellow\n\t\t\t\t\t\t\t\t\t\t[186, 176, 172], # gray\n\t\t\t\t\t\t\t\t\t\t[255.0, 87.0, 89.0], # red\n\t\t\t\t\t\t\t\t\t\t[176, 122, 161], # purple\n\t\t\t\t\t\t\t\t\t\t[118, 183, 178], # cyan\n\t\t\t\t\t\t\t\t\t\t[255, 157, 167]]) / 255.0 # pink\n\n\t\t# Read files in object mesh directory\n\t\tself.obj_mesh_dir = obj_mesh_dir\n\t\tself.num_obj = num_obj\n\t\tself.mesh_list = list(filter(lambda x: x.endswith('.obj'), os.listdir(self.obj_mesh_dir)))\n\n\t\ttry:\n\t\t\twith open(os.path.join(obj_mesh_dir, 'blocks.yml')) as f:\n\t\t\t\tyaml_dict = yaml.safe_load(f)\n\t\t\tself.groups = yaml_dict['groups']\n\t\t\tself.mesh_name = yaml_dict['names']\n\t\t\tfor obj in self.mesh_list:\n\t\t\t\tif obj not in self.mesh_name.keys():\n\t\t\t\t\traise Exception\n\t\texcept Exception:\n\t\t\tprint('Failed to read block names/groups')\n\t\t\texit(1)\n\n\t\t# Make sure to have the server side running in V-REP:\n\t\t# in a child script of a V-REP scene, add following command\n\t\t# to be executed just once, at simulation start:\n\t\t#\n\t\t# simExtRemoteApiStart(19999)\n\t\t#\n\t\t# then start simulation, and run this program.\n\t\t#\n\t\t# IMPORTANT: for each successful call to simxStart, there\n\t\t# should be a corresponding call to simxFinish at the end!\n\n\t\t# MODIFY remoteApiConnections.txt\n\n\t\t# Connect to simulator\n\t\tvrep.simxFinish(-1) # Just in case, close all opened connections\n\t\t# Connect to V-REP on port 19997\n\t\tself.sim_client = vrep.simxStart('127.0.0.1', 19997, True, True, 5000, 5)\n\t\tif self.sim_client == -1:\n\t\t\tprint('Failed to connect to simulation (V-REP remote API server). Exiting.')\n\t\t\texit()\n\t\telse:\n\t\t\tprint('Connected to simulation.')\n\t\t\t# self.restart_sim()\n\t\tself.MODE = vrep.simx_opmode_blocking \n\n\t\t# Setup virtual camera in simulation\n\t\tself.setup_sim_camera()\n\t\tself.object_handles = []\n\t\tself.object_left_handles = []\n\t\tself.target_handle = None\n\n\t\t# Add objects to simulation environment\n\t\t# self.add_objects()\n\n\tdef setup_sim_camera(self):\n\n\t\t# Get handle to camera\n\t\tsim_ret, self.cam_handle = vrep.simxGetObjectHandle(self.sim_client, 'Vision_sensor_persp', self.MODE)\n\t\t_, self.up_cam_handle = vrep.simxGetObjectHandle(self.sim_client, 'Vision_sensor_ortho', self.MODE)\n\n\t\t# Get camera pose and intrinsics in simulationo\n\t\tsim_ret, cam_position = vrep.simxGetObjectPosition(self.sim_client, self.cam_handle, -1, self.MODE)\n\t\tsim_ret, cam_orientation = vrep.simxGetObjectOrientation(self.sim_client, self.cam_handle, -1, self.MODE)\n\t\tcam_trans = np.eye(4, 4)\n\t\tcam_trans[0:3, 3] = np.asarray(cam_position)\n\t\tcam_orientation = [-cam_orientation[0], -cam_orientation[1], -cam_orientation[2]]\n\t\tcam_rotm = np.eye(4, 4)\n\t\tcam_rotm[0:3, 0:3] = np.linalg.inv(utils.euler2rotm(cam_orientation))\n\t\t# Compute rigid transformation representating camera pose\n\t\tself.cam_pose = np.dot(cam_trans, cam_rotm)\n\t\tself.cam_intrinsics = np.asarray([[618.62, 0, 320], [0, 618.62, 240], [0, 0, 1]])\n\t\tself.cam_depth_scale = 1\n\n\t\t# Get background image\n\t\tself.bg_color_img, self.bg_depth_img = self.get_camera_data()\n\t\tself.bg_depth_img = self.bg_depth_img * self.cam_depth_scale\n\n\tdef add_objects(self, mesh_idx=-1, mesh_color=-1):\n\t\t# TODO\n\t\t# handle <-> ind <-> obj -> name\n\t\t# Just for debug\n\t\t# print([self.mesh_list[ind] for ind in self.obj_mesh_ind])\n\t\t# self.obj_mesh_ind = np.array(range(len(self.mesh_list)))\n\t\t# self.obj_mesh_color = self.color_space[np.asarray(range(self.num_obj)) % 10, :]\n\t\t# Randomly choose objects to add to scene\n\n\t\tif mesh_idx == -1:\n\t\t\tgroup_chosen = np.random.choice(self.groups, size=self.num_obj, replace=False)\n\t\t\tself.obj_mesh_ind = np.array([self.mesh_list.index(np.random.choice(obj)) for obj in group_chosen])\n\t\t\tself.obj_mesh_color = self.color_space[np.random.choice(np.arange(self.color_space.shape[0]), size=self.num_obj, replace=False)]\n\t\telse:\n\t\t\tself.obj_mesh_ind = np.array([mesh_idx])\n\t\t\tself.obj_mesh_color = np.array([mesh_color])\n\t\t\t# import pdb; pdb.set_trace()\n\t\t\n\t\t# Add each object to robot workspace at x,y location and orientation (random or pre-loaded)\n\t\tself.object_handles = []\n\t\tfor object_idx in range(len(self.obj_mesh_ind)):\n\t\t\tcurr_mesh_file = os.path.join(self.obj_mesh_dir, self.mesh_list[self.obj_mesh_ind[object_idx]])\n\t\t\tcurr_shape_name = 'shape_%02d' % object_idx\n\t\t\tdrop_x = (self.workspace_limits[0][1] - self.workspace_limits[0][0] - 0.2) * np.random.random_sample() + self.workspace_limits[0][0] + 0.1\n\t\t\tdrop_y = (self.workspace_limits[1][1] - self.workspace_limits[1][0] - 0.2) * np.random.random_sample() + self.workspace_limits[1][0] + 0.1\n\t\t\tobject_position = [drop_x, drop_y, 0.15]\n\t\t\tobject_orientation = [2 * np.pi * np.random.random_sample(), 2 * np.pi * np.random.random_sample(), 2 * np.pi * np.random.random_sample()]\n\t\t\tobject_color = [self.obj_mesh_color[object_idx][0], self.obj_mesh_color[object_idx][1], self.obj_mesh_color[object_idx][2]]\n\t\t\tret_resp, ret_ints, ret_floats, ret_strings, ret_buffer = vrep.simxCallScriptFunction(self.sim_client, 'remoteApiCommandServer', vrep.sim_scripttype_childscript, 'importShape', [0, 0, 255, 0], object_position + object_orientation + object_color, [curr_mesh_file, curr_shape_name], bytearray(), vrep.simx_opmode_blocking)\n\t\t\tif ret_resp == 8:\n\t\t\t\tprint('Failed to add new objects to simulation. Please restart.')\n\t\t\t\texit()\n\t\t\t# print(ret_ints, ret_ints[0])\n\t\t\tcurr_shape_handle = ret_ints[0]\n\t\t\tself.object_handles.append(curr_shape_handle)\n\t\t\ttime.sleep(2)\n\t\tself.object_left_handles = self.object_handles.copy()\n\t\tself.prev_obj_positions = []\n\t\tself.obj_positions = []\n\t\tself.get_instruction() # nb\n\t\t# import pdb; pdb.set_trace()\n\n\tdef restart_sim(self):\n\t\tsim_ret, self.UR5_target_handle = vrep.simxGetObjectHandle(self.sim_client, 'UR5_target', vrep.simx_opmode_blocking)\n\t\tvrep.simxSetObjectPosition(self.sim_client, self.UR5_target_handle, -1, (-0.5, 0, 0.3), vrep.simx_opmode_blocking)\n\t\tvrep.simxStopSimulation(self.sim_client, vrep.simx_opmode_blocking)\n\t\tvrep.simxStartSimulation(self.sim_client, vrep.simx_opmode_blocking)\n\t\ttime.sleep(1)\n\t\tsim_ret, self.RG2_tip_handle = vrep.simxGetObjectHandle(self.sim_client, 'UR5_tip', vrep.simx_opmode_blocking)\n\t\tsim_ret, gripper_position = vrep.simxGetObjectPosition(self.sim_client, self.RG2_tip_handle, -1, vrep.simx_opmode_blocking)\n\t\t# V-REP bug requiring multiple starts and stops to restart\n\t\twhile gripper_position[2] > 0.4:\n\t\t\tvrep.simxStopSimulation(self.sim_client, vrep.simx_opmode_blocking)\n\t\t\tvrep.simxStartSimulation(self.sim_client, vrep.simx_opmode_blocking)\n\t\t\ttime.sleep(1)\n\t\t\tsim_ret, gripper_position = vrep.simxGetObjectPosition(self.sim_client, self.RG2_tip_handle, -1, vrep.simx_opmode_blocking)\n\n\tdef is_stable(self):\n\t\t# Check if simulation is stable by checking if gripper is within workspace\n\t\tsim_ret, gripper_position = vrep.simxGetObjectPosition(self.sim_client, self.RG2_tip_handle, -1, vrep.simx_opmode_blocking)\n\t\tsim_is_ok = gripper_position[0] > self.workspace_limits[0][0] - 0.1 and \\\n\t\t\t\t\tgripper_position[0] < self.workspace_limits[0][1] + 0.1 and \\\n\t\t\t\t\tgripper_position[1] > self.workspace_limits[1][0] - 0.1 and \\\n\t\t\t\t\tgripper_position[1] < self.workspace_limits[1][1] + 0.1 and \\\n\t\t\t\t\tgripper_position[2] > self.workspace_limits[2][0] and \\\n\t\t\t\t\tgripper_position[2] < self.workspace_limits[2][1]\n\t\tif not sim_is_ok:\n\t\t\tprint('Simulation unstable, Reset.')\n\t\treturn sim_is_ok\n\n\tdef reset(self):\n\t\tself.restart_sim()\n\t\tself.add_objects()\n\n\t# def stop_sim(self):objects/blocks\n\t# if self.is_sim:\n\t# # Now send some data to V-REP in a non-blocking fashion:\n\t# # vrep.simxAddStatusbarMessage(sim_client,'Hello V-REP!',vrep.simx_opmode_oneshot)\n\n\t# # # Start the simulation\n\t# # vrep.simxStartSimulation(sim_client,vrep.simx_opmode_oneshot_wait)\n\n\t# # # Stop simulation:\n\t# # vrep.simxStopSimulation(sim_client,vrep.simx_opmode_oneshot_wait)\n\n\t# # Before closing the connection to V-REP, make sure that the last command sent out had time to arrive. You can guarantee this with (for example):\n\t# vrep.simxGetPingTime(self.sim_client)\n\n\t# # Now close the connection to V-REP:\n\t# vrep.simxFinish(self.sim_client)\n\n\tdef get_task_score(self):\n\n\t\tkey_positions = np.asarray([[-0.625, 0.125, 0.0], # red\n\t\t\t\t\t\t\t\t\t[-0.625, -0.125, 0.0], # blue\n\t\t\t\t\t\t\t\t\t[-0.375, 0.125, 0.0], # green\n\t\t\t\t\t\t\t\t\t[-0.375, -0.125, 0.0]]) # yellow\n\n\t\tobj_positions = np.asarray(self.get_obj_positions())\n\t\tobj_positions.shape = (1, obj_positions.shape[0], obj_positions.shape[1])\n\t\tobj_positions = np.tile(obj_positions, (key_positions.shape[0], 1, 1))\n\n\t\tkey_positions.shape = (key_positions.shape[0], 1, key_positions.shape[1])\n\t\tkey_positions = np.tile(key_positions, (1, obj_positions.shape[1], 1))\n\n\t\tkey_dist = np.sqrt(np.sum(np.power(obj_positions - key_positions, 2), axis=2))\n\t\tkey_nn_idx = np.argmin(key_dist, axis=0)\n\n\t\treturn np.sum(key_nn_idx == np.asarray(range(self.num_obj)) % 4)\n\n\tdef check_goal_reached(self, handle):\n\t\t# goal_reached = self.get_task_score() == self.num_obj\n\t\tgoal_reached = self.target_handle == handle\n\t\treturn goal_reached\n\n\tdef get_obj_positions(self):\n\n\t\tobj_positions = []\n\t\tfor object_handle in self.object_handles:\n\t\t\tsim_ret, object_position = vrep.simxGetObjectPosition(self.sim_client, object_handle, -1, vrep.simx_opmode_blocking)\n\t\t\tobj_positions.append(object_position)\n\n\t\treturn obj_positions\n\n\tdef get_obj_positions_and_orientations(self):\n\n\t\tobj_positions = []\n\t\tobj_orientations = []\n\t\tfor object_handle in self.object_handles:\n\t\t\tsim_ret, object_position = vrep.simxGetObjectPosition(self.sim_client, object_handle, -1, self.MODE)\n\t\t\tsim_ret, object_orientation = vrep.simxGetObjectOrientation(self.sim_client, object_handle, -1, self.MODE)\n\t\t\tobj_positions.append(object_position)\n\t\t\tobj_orientations.append(object_orientation)\n\n\t\treturn obj_positions, obj_orientations\n\n\tdef reposition_objects(self, workspace_limits):\n\t\t# Move gripper out of the way\n\t\tself.move_to([-0.1, 0, 0.3], None)\n\t\t# sim_ret, UR5_target_handle = vrep.simxGetObjectHandle(self.sim_client,'UR5_target', self.MODE)\n\t\t# vrep.simxSetObjectPosition(self.sim_client, UR5_target_handle, -1, (-0.5,0,0.3), self.MODE)\n\t\t# time.sleep(1)\n\n\t\tfor object_handle in self.object_handles:\n\t\t\t# Drop object at random x,y location and random orientation in robot workspace\n\t\t\tdrop_x = (workspace_limits[0][1] - workspace_limits[0][0] - 0.2) * np.random.random_sample() + workspace_limits[0][0] + 0.1\n\t\t\tdrop_y = (workspace_limits[1][1] - workspace_limits[1][0] - 0.2) * np.random.random_sample() + workspace_limits[1][0] + 0.1\n\t\t\tobject_position = [drop_x, drop_y, 0.15]\n\t\t\tobject_orientation = [2 * np.pi * np.random.random_sample(), 2 * np.pi * np.random.random_sample(), 2 * np.pi * np.random.random_sample()]\n\t\t\tvrep.simxSetObjectPosition(self.sim_client, object_handle, -1, object_position, self.MODE)\n\t\t\tvrep.simxSetObjectOrientation(self.sim_client, object_handle, -1, object_orientation, self.MODE)\n\t\t\ttime.sleep(2)\n\n\tdef get_camera_data(self, handle=-1):\n\t\tif handle == -1:\n\t\t\thandle = self.cam_handle\n\t\t# Get color image from simulation\n\t\tsim_ret, resolution, raw_image = vrep.simxGetVisionSensorImage(self.sim_client, handle, 0, self.MODE)\n\t\tcolor_img = np.asarray(raw_image)\n\t\tcolor_img.shape = (resolution[1], resolution[0], 3)\n\t\tcolor_img = color_img.astype(np.float) / 255\n\t\tcolor_img[color_img < 0] += 1\n\t\tcolor_img *= 255\n\t\tcolor_img = np.fliplr(color_img)\n\t\tcolor_img = color_img.astype(np.uint8)\n\n\t\t# Get depth image from simulation\n\t\tsim_ret, resolution, depth_buffer = vrep.simxGetVisionSensorDepthBuffer(self.sim_client, handle, self.MODE)\n\t\tdepth_img = np.asarray(depth_buffer)\n\t\tdepth_img.shape = (resolution[1], resolution[0])\n\t\tdepth_img = np.fliplr(depth_img)\n\t\tzNear = 0.01\n\t\tzFar = 10\n\t\tdepth_img = depth_img * (zFar - zNear) + zNear\n\n\t\treturn color_img, depth_img\n\n\tdef get_instruction(self):\n\t\t# TODO\n\t\t# add more template\n\t\tinstruction_template = \"pick up the {color} {shape}.\"\n\t\tind = np.random.randint(0, self.num_obj)\n\t\tcolor = utils.get_mush_color_name(self.obj_mesh_color[ind])\n\t\tshape = np.random.choice(self.mesh_name[self.mesh_list[self.obj_mesh_ind[ind]]])\n\t\tself.target_handle = self.object_handles[ind]\n\t\tself.instruction_str = instruction_template.format(color=color, shape=shape) # nb\n\t\tself.instruction = self.text_data.get_tensor(self.instruction_str)\n\t\treturn self.instruction\n\n\tdef close_gripper(self, _async=False):\n\t\tgripper_motor_velocity = -0.5\n\t\tgripper_motor_force = 100\n\t\tsim_ret, RG2_gripper_handle = vrep.simxGetObjectHandle(self.sim_client, 'RG2_openCloseJoint', vrep.simx_opmode_blocking)\n\t\tsim_ret, gripper_joint_position = vrep.simxGetJointPosition(self.sim_client, RG2_gripper_handle, vrep.simx_opmode_blocking)\n\t\tvrep.simxSetJointForce(self.sim_client, RG2_gripper_handle, gripper_motor_force, vrep.simx_opmode_blocking)\n\t\tvrep.simxSetJointTargetVelocity(self.sim_client, RG2_gripper_handle, gripper_motor_velocity, vrep.simx_opmode_blocking)\n\t\tgripper_fully_closed = False\n\t\twhile gripper_joint_position > -0.047: # Block until gripper is fully closed\n\t\t\tsim_ret, new_gripper_joint_position = vrep.simxGetJointPosition(self.sim_client, RG2_gripper_handle, vrep.simx_opmode_blocking)\n\t\t\t# print(gripper_joint_position)\n\t\t\tif new_gripper_joint_position >= gripper_joint_position:\n\t\t\t\treturn gripper_fully_closed\n\t\t\tgripper_joint_position = new_gripper_joint_position\n\t\tgripper_fully_closed = True\n\n\t\treturn gripper_fully_closed\n\n\tdef open_gripper(self, _async=False):\n\t\tgripper_motor_velocity = 0.5\n\t\tgripper_motor_force = 20\n\t\tsim_ret, RG2_gripper_handle = vrep.simxGetObjectHandle(self.sim_client, 'RG2_openCloseJoint', vrep.simx_opmode_blocking)\n\t\tsim_ret, gripper_joint_position = vrep.simxGetJointPosition(self.sim_client, RG2_gripper_handle, vrep.simx_opmode_blocking)\n\t\tvrep.simxSetJointForce(self.sim_client, RG2_gripper_handle, gripper_motor_force, vrep.simx_opmode_blocking)\n\t\tvrep.simxSetJointTargetVelocity(self.sim_client, RG2_gripper_handle, gripper_motor_velocity, vrep.simx_opmode_blocking)\n\t\twhile gripper_joint_position < 0.0536: # Block until gripper is fully open\n\t\t\tsim_ret, gripper_joint_position = vrep.simxGetJointPosition(self.sim_client, RG2_gripper_handle, vrep.simx_opmode_blocking)\n\n\tdef move_to(self, tool_position, tool_orientation):\n\t\t# sim_ret, UR5_target_handle = vrep.simxGetObjectHandle(self.sim_client,'UR5_target',vrep.simx_opmode_blocking)\n\t\tsim_ret, UR5_target_position = vrep.simxGetObjectPosition(self.sim_client, self.UR5_target_handle, -1, vrep.simx_opmode_blocking)\n\n\t\tmove_direction = np.asarray([tool_position[0] - UR5_target_position[0], tool_position[1] - UR5_target_position[1], tool_position[2] - UR5_target_position[2]])\n\t\tmove_magnitude = np.linalg.norm(move_direction)\n\t\tmove_step = 0.02 * move_direction / move_magnitude\n\t\tnum_move_steps = int(np.floor(move_magnitude / 0.02))\n\n\t\tfor step_iter in range(num_move_steps):\n\t\t\tvrep.simxSetObjectPosition(self.sim_client, self.UR5_target_handle, -1, (UR5_target_position[0] + move_step[0], UR5_target_position[1] + move_step[1], UR5_target_position[2] + move_step[2]), vrep.simx_opmode_blocking)\n\t\t\tsim_ret, UR5_target_position = vrep.simxGetObjectPosition(self.sim_client, self.UR5_target_handle, -1, vrep.simx_opmode_blocking)\n\t\tvrep.simxSetObjectPosition(self.sim_client, self.UR5_target_handle, -1, (tool_position[0], tool_position[1], tool_position[2]), vrep.simx_opmode_blocking)\n\n\t# Primitives ----------------------------------------------------------\n\n\tdef random_grasp_action(self):\n\t\t'''\n\t\tangles = []\n\t\tfor i in range(8):\n\t\t\tangle = np.deg2rad(i * (360.0 / 16))\n\t\t\ttool_rotation_angle = (angle % np.pi) - np.pi / 2\n\t\t\tangles.append(tool_rotation_angle)\n\t\tprint(angles)\n\t\t'''\n\t\t# assert len(self.object_left_handles) > 0\n\t\tobject_handle = random.sample(self.object_left_handles, 1)[0]\n\t\t\n\t\t_, orientation = vrep.simxGetObjectOrientation(self.sim_client, object_handle, -1, self.MODE)\n\t\tall_angles = [-1.5708, -1.1781, -0.7854, -0.3927, 0.0, 0.3927, 0.7854, 1.1781]\n\t\tpossible_angles = [orientation[1], orientation[1] - np.pi/2.0]\n\t\tanegle = random.sample(possible_angles, 1)[0]\n\t\tangle = max(0, bisect_right(all_angles, orientation[1]) - 1)\n\t\t\n\t\t_, position = vrep.simxGetObjectPosition(self.sim_client, object_handle, -1, self.MODE)\n\t\taction_x = (position[1] - self.workspace_limits[1][0]) / self.heightmap_resolution\n\t\taction_y = (position[0] - self.workspace_limits[0][0]) / self.heightmap_resolution\n\t\taction_x = min(action_x, 223)\n\t\taction_y = min(action_y, 223)\n\t\taction = (angle, int(action_x), int(action_y))\n\t\t# print(object_handle, action)\n\t\t# import pdb; pdb.set_trace()\n\t\treturn action\n\t\n\tdef step(self, action, valid_depth_heightmap, num_rotations, heightmap_resolution):\n\t\t# Compute 3D position of pixel\n\t\tangle = np.deg2rad(action[0] * (360.0 / num_rotations))\n\t\tbest_pix_x = action[2]\n\t\tbest_pix_y = action[1]\n\t\tprimitive_position = [\n\t\t\tbest_pix_x * heightmap_resolution + self.workspace_limits[0][0], \n\t\t\tbest_pix_y * heightmap_resolution + self.workspace_limits[1][0],\n\t\t\tvalid_depth_heightmap[best_pix_y][best_pix_x] + self.workspace_limits[2][0]\n\t\t]\n\n\t\treward = self.grasp(primitive_position, angle)\n\t\tdone = (reward == Reward.SUCCESS)\n\t\t# print(reward, done)\n\t\treturn reward.value, done\n\n\tdef grasp(self, position, heightmap_rotation_angle):\n\t\t# print('Executing: grasp at (%f, %f, %f)' % (position[0], position[1], position[2]))\n\t\t# Compute tool orientation from heightmap rotation angle\n\t\ttool_rotation_angle = (heightmap_rotation_angle % np.pi) - np.pi / 2\n\n\t\t# Avoid collision with floor\n\t\tposition = np.asarray(position).copy()\n\t\tposition[2] = max(position[2] - 0.04, self.workspace_limits[2][0] + 0.02)\n\n\t\t# Move gripper to location above grasp target\n\t\tgrasp_location_margin = 0.15\n\t\t# sim_ret, UR5_target_handle = vrep.simxGetObjectHandle(self.sim_client,'UR5_target',vrep.simx_opmode_blocking)\n\t\tlocation_above_grasp_target = (position[0], position[1], position[2] + grasp_location_margin)\n\n\t\t# Compute gripper position and linear movement increments\n\t\ttool_position = location_above_grasp_target\n\t\tsim_ret, UR5_target_position = vrep.simxGetObjectPosition(self.sim_client, self.UR5_target_handle, -1, vrep.simx_opmode_blocking)\n\t\tmove_direction = np.asarray([tool_position[0] - UR5_target_position[0], tool_position[1] - UR5_target_position[1], tool_position[2] - UR5_target_position[2]])\n\t\tmove_magnitude = np.linalg.norm(move_direction)\n\t\tmove_step = 0.05 * move_direction / move_magnitude\n\t\t# if np.floor(move_direction[0] / move_step[0]) == np.nan or move_step[0] == 0: import pdb; pdb.set_trace() \n\t\tnum_move_steps = int(np.floor(move_direction[0] / move_step[0])) if move_step[0] != 0 else 1\n\n\t\t# Compute gripper orientation and rotation increments\n\t\tsim_ret, gripper_orientation = vrep.simxGetObjectOrientation(self.sim_client, self.UR5_target_handle, -1, vrep.simx_opmode_blocking)\n\t\trotation_step = 0.3 if (tool_rotation_angle - gripper_orientation[1] > 0) else -0.3\n\t\tnum_rotation_steps = int(np.floor((tool_rotation_angle - gripper_orientation[1]) / rotation_step))\n\n\t\t# Simultaneously move and rotate gripper\n\t\tfor step_iter in range(max(num_move_steps, num_rotation_steps)):\n\t\t\tvrep.simxSetObjectPosition(self.sim_client, self.UR5_target_handle, -1, (UR5_target_position[0] + move_step[0] * min(step_iter, num_move_steps), UR5_target_position[1] + move_step[1] * min(step_iter, num_move_steps), UR5_target_position[2] + move_step[2] * min(step_iter, num_move_steps)), vrep.simx_opmode_blocking)\n\t\t\tvrep.simxSetObjectOrientation(self.sim_client, self.UR5_target_handle, -1, (np.pi / 2, gripper_orientation[1] + rotation_step * min(step_iter, num_rotation_steps), np.pi / 2), vrep.simx_opmode_blocking)\n\t\tvrep.simxSetObjectPosition(self.sim_client, self.UR5_target_handle, -1, (tool_position[0], tool_position[1], tool_position[2]), vrep.simx_opmode_blocking)\n\t\tvrep.simxSetObjectOrientation(self.sim_client, self.UR5_target_handle, -1, (np.pi / 2, tool_rotation_angle, np.pi / 2), vrep.simx_opmode_blocking)\n\n\t\t# Ensure gripper is open\n\t\tself.open_gripper()\n\n\t\t# Approach grasp target\n\t\tself.move_to(position, None)\n\n\t\t# Close gripper to grasp target\n\t\tgripper_full_closed = self.close_gripper()\n\n\t\t# Move gripper to location above grasp target\n\t\tself.move_to(location_above_grasp_target, None)\n\n\t\t# Check if grasp is successful\n\t\tgripper_full_closed = self.close_gripper()\n\t\tgrasp_sth = not gripper_full_closed\n\n\t\t# Move the grasped object elsewhere\n\t\tif grasp_sth:\n\t\t\tobject_positions = np.asarray(self.get_obj_positions())\n\t\t\tobject_positions = object_positions[:, 2]\n\t\t\tgrasped_object_ind = np.argmax(object_positions)\n\t\t\tgrasped_object_handle = self.object_handles[grasped_object_ind]\n\t\t\tvrep.simxSetObjectPosition(self.sim_client, grasped_object_handle, -1, (-0.5, 0.5 + 0.05 * float(grasped_object_ind), 0.1), self.MODE)\n\t\t\tself.object_left_handles.remove(grasped_object_handle)\n\t\t\tif grasped_object_handle == self.target_handle:\n\t\t\t\treturn Reward.SUCCESS\n\t\t\telse:\n\t\t\t\treturn Reward.WRONG\n\t\telse:\n\t\t\treturn Reward.FAIL\n\n\tdef push(self, position, heightmap_rotation_angle, workspace_limits):\n\t\t# print('Executing: push at (%f, %f, %f)' % (position[0], position[1], position[2]))\n\t\t# Compute tool orientation from heightmap rotation angle\n\t\ttool_rotation_angle = (heightmap_rotation_angle % np.pi) - np.pi / 2\n\n\t\t# Adjust pushing point to be on tip of finger\n\t\tposition[2] = position[2] + 0.026\n\n\t\t# Compute pushing direction\n\t\tpush_orientation = [1.0, 0.0]\n\t\tpush_direction = np.asarray([push_orientation[0] * np.cos(heightmap_rotation_angle) - push_orientation[1] * np.sin(heightmap_rotation_angle), push_orientation[0] * np.sin(heightmap_rotation_angle) + push_orientation[1] * np.cos(heightmap_rotation_angle)])\n\n\t\t# Move gripper to location above pushing point\n\t\tpushing_point_margin = 0.1\n\t\tlocation_above_pushing_point = (position[0], position[1], position[2] + pushing_point_margin)\n\n\t\t# Compute gripper position and linear movement increments\n\t\ttool_position = location_above_pushing_point\n\t\tsim_ret, UR5_target_position = vrep.simxGetObjectPosition(self.sim_client, self.UR5_target_handle, -1, vrep.simx_opmode_blocking)\n\t\tmove_direction = np.asarray([tool_position[0] - UR5_target_position[0], tool_position[1] - UR5_target_position[1], tool_position[2] - UR5_target_position[2]])\n\t\tmove_magnitude = np.linalg.norm(move_direction)\n\t\tmove_step = 0.05 * move_direction / move_magnitude\n\t\tnum_move_steps = int(np.floor(move_direction[0] / move_step[0]))\n\n\t\t# Compute gripper orientation and rotation increments\n\t\tsim_ret, gripper_orientation = vrep.simxGetObjectOrientation(self.sim_client, self.UR5_target_handle, -1, vrep.simx_opmode_blocking)\n\t\trotation_step = 0.3 if (tool_rotation_angle - gripper_orientation[1] > 0) else -0.3\n\t\tnum_rotation_steps = int(np.floor((tool_rotation_angle - gripper_orientation[1]) / rotation_step))\n\n\t\t# Simultaneously move and rotate gripper\n\t\tfor step_iter in range(max(num_move_steps, num_rotation_steps)):\n\t\t\tvrep.simxSetObjectPosition(self.sim_client, self.UR5_target_handle, -1, (UR5_target_position[0] + move_step[0] * min(step_iter, num_move_steps), UR5_target_position[1] + move_step[1] * min(step_iter, num_move_steps), UR5_target_position[2] + move_step[2] * min(step_iter, num_move_steps)), vrep.simx_opmode_blocking)\n\t\t\tvrep.simxSetObjectOrientation(self.sim_client, self.UR5_target_handle, -1, (np.pi / 2, gripper_orientation[1] + rotation_step * min(step_iter, num_rotation_steps), np.pi / 2), vrep.simx_opmode_blocking)\n\t\tvrep.simxSetObjectPosition(self.sim_client, self.UR5_target_handle, -1, (tool_position[0], tool_position[1], tool_position[2]), vrep.simx_opmode_blocking)\n\t\tvrep.simxSetObjectOrientation(self.sim_client, self.UR5_target_handle, -1, (np.pi / 2, tool_rotation_angle, np.pi / 2), vrep.simx_opmode_blocking)\n\n\t\t# Ensure gripper is closed\n\t\tself.close_gripper()\n\n\t\t# Approach pushing point\n\t\tself.move_to(position, None)\n\n\t\t# Compute target location (push to the right)\n\t\tpush_length = 0.1\n\t\ttarget_x = min(max(position[0] + push_direction[0] * push_length, workspace_limits[0][0]), workspace_limits[0][1])\n\t\ttarget_y = min(max(position[1] + push_direction[1] * push_length, workspace_limits[1][0]), workspace_limits[1][1])\n\t\tpush_length = np.sqrt(np.power(target_x - position[0], 2) + np.power(target_y - position[1], 2))\n\n\t\t# Move in pushing direction towards target location\n\t\tself.move_to([target_x, target_y, position[2]], None)\n\n\t\t# Move gripper to location above grasp target\n\t\tself.move_to([target_x, target_y, location_above_pushing_point[2]], None)\n\n\t\tpush_success = True\n\t\treturn push_success\n\n\t# def place(self, position, heightmap_rotation_angle, workspace_limits):\n\t# print('Executing: place at (%f, %f, %f)' % (position[0], position[1], position[2]))\n\n\t# # Compute tool orientation from heightmap rotation angle\n\t# tool_rotation_angle = (heightmap_rotation_angle % np.pi) - np.pi/2\n\n\t# # Avoid collision with floor\n\t# position[2] = max(position[2] + 0.04 + 0.02, workspace_limits[2][0] + 0.02)\n\n\t# # Move gripper to location above place target\n\t# place_location_margin = 0.1\n\t# sim_ret, UR5_target_handle = vrep.simxGetObjectHandle(self.sim_client,'UR5_target',vrep.simx_opmode_blocking)\n\t# location_above_place_target = (position[0], position[1], position[2] + place_location_margin)\n\t# self.move_to(location_above_place_target, None)\n\n\t# sim_ret,gripper_orientation = vrep.simxGetObjectOrientation(self.sim_client, UR5_target_handle, -1, vrep.simx_opmode_blocking)\n\t# if tool_rotation_angle - gripper_orientation[1] > 0:\n\t# increment = 0.2\n\t# else:\n\t# increment = -0.2\n\t# while abs(tool_rotation_angle - gripper_orientation[1]) >= 0.2:\n\t# vrep.simxSetObjectOrientation(self.sim_client, UR5_target_handle, -1, (np.pi/2, gripper_orientation[1] + increment, np.pi/2), vrep.simx_opmode_blocking)\n\t# time.sleep(0.01)\n\t# sim_ret,gripper_orientation = vrep.simxGetObjectOrientation(self.sim_client, UR5_target_handle, -1, vrep.simx_opmode_blocking)\n\t# vrep.simxSetObjectOrientation(self.sim_client, UR5_target_handle, -1, (np.pi/2, tool_rotation_angle, np.pi/2), vrep.simx_opmode_blocking)\n\n\t# # Approach place target\n\t# self.move_to(position, None)\n\n\t# # Ensure gripper is open\n\t# self.open_gripper()\n\n\t# # Move gripper to location above place target\n\t# self.move_to(location_above_place_target, None)\n" ]
[ [ "numpy.random.random_sample", "numpy.eye", "numpy.tile", "numpy.argmin", "numpy.fliplr", "numpy.floor", "numpy.asarray", "numpy.random.choice", "numpy.argmax", "numpy.arange", "numpy.cos", "numpy.power", "numpy.array", "numpy.sin", "numpy.dot", "numpy.random.randint", "numpy.linalg.norm", "numpy.deg2rad" ] ]
wesh92/nwnodetool
[ "1c79418d9ad0f1f22dfbc6aab390ca9603fb1596" ]
[ "nwnodedetector/nwocrsounds.py" ]
[ "\"\"\"\nNew World OCR Node Detector\nCreated: 2021-10-07\nDev: Wes H.\n\nUses OCR to get coordinates from top right of the NW game window\nand imposes that against a list of possible nodes.\nWhen you're close to one it will play a bell noise!\n\"\"\"\nimport winsound\nfrom PIL import ImageGrab, ImageOps, Image\nimport pytesseract\nimport psutil\nfrom time import sleep\nimport pathlib\nimport iron_markers as imark\nimport essence_markers as emark\nimport chest_essence as ce\nimport numpy as np\n\n# for 3440*1440 : (3182,19,3416,39)\nlocalpath = str(pathlib.Path(__file__).parent.resolve())\npytesseract.pytesseract.tesseract_cmd = rf\"{localpath}\\Tesseract-OCR\\tesseract.exe\"\n# node = [['7831', '1673'], ['9341', '2725']] \nnode = ce.chest_essence\ndef screen_loc_check(items, screen_img):\n\n z = ImageOps.crop(screen_img, (173,0,50,0))\n zpos = pytesseract.image_to_string(z, config=\"--psm 13 outputbase digits\")\n zpos = str(zpos).replace('\\n', '').replace('\\x0c', '').replace('(', '').replace(']', '').replace('[', '')\n if zpos.isdigit() and int(float(zpos)) >= 100:\n xcrop = (0,0,220,0)\n ycrop = (82,0,128,0)\n else:\n xcrop = (0,0,210,0)\n ycrop = (88,0,120,0)\n x = ImageOps.crop(screen_img, xcrop)\n y = ImageOps.crop(screen_img, ycrop)\n \n x = x.resize((150, 100))\n y = y.resize((150, 100))\n \n datax = np.array(x)\n datay = np.array(y)\n r1, g1, b1 = 235, 235, 165\n r1x, g1x, b1x = 110, 105, 70\n r2, g2, b2 = 0, 0, 0\n redx, greenx, bluex = datax[:,:,0], datax[:,:,1], datax[:,:,2]\n redy, greeny, bluey = datay[:,:,0], datay[:,:,1], datay[:,:,2]\n mask1x = (redx <= r1x) & (greenx <= g1x) & (bluex <= b1x)\n mask2x = (redx >= r1) & (greenx >= g1) & (bluex >= b1)\n mask1y = (redy <= r1x) & (greeny <= g1x) & (bluey <= b1x)\n mask2y = (redy >= r1) & (greeny >= g1) & (bluey >= b1)\n datax[:,:,:3][mask1x] = [r2, g2, b2]\n datax[:,:,:3][mask2x] = [r2, g2, b2]\n datay[:,:,:3][mask1y] = [r2, g2, b2]\n datay[:,:,:3][mask2y] = [r2, g2, b2]\n x = Image.fromarray(datax)\n y = Image.fromarray(datay)\n x.convert(\"L\")\n y.convert(\"L\")\n \n xpos = pytesseract.image_to_string(x, config=\"--psm 13 outputbase digits\")\n ypos = pytesseract.image_to_string(y, config=\"--psm 13 outputbase digits\")\n\n xpos = str(xpos).replace('\\n', '').replace('\\x0c', '').replace('(', '').replace(']', '').replace('[', '')\n ypos = str(ypos).replace('\\n', '').replace('\\x0c', '').replace('(', '').replace(']', '').replace('[', '')\n\n pos = [xpos, ypos]\n \n confirms = []\n for element in items:\n min_x = int(float(element[0]))-15\n max_x = int(float(element[0]))+15\n min_y = int(float(element[1]))-15\n max_y = int(float(element[1]))+15\n if pos[0].isdigit() and pos[1].isdigit():\n if int(float(pos[0])) >= min_x and int(float(pos[0])) <= max_x and int(float(pos[1])) >= min_y and int(float(pos[1])) <= max_y:\n confirms.append(True)\n else:\n confirms.append(False)\n else:\n pass\n\n if any(confirms):\n print(\"All Match\\n ---------\")\n print(pos[0], pos[1])\n return True\n else:\n print(\"Miss\\n ---------\")\n print(pos[0], pos[1])\n return False\n \nwhile \"NewWorld.exe\" in (p.name() for p in psutil.process_iter()):\n screen = ImageGrab.grab(bbox=(3191, 19, 3440, 39))\n remote_image = screen.convert('RGBA')\n remote_image.save('grabbed.png')\n \n if screen_loc_check(node, remote_image) is True:\n duration = 333\n freq = 880\n winsound.Beep(freq, duration)\n sleep(1)\n " ]
[ [ "numpy.array" ] ]
ljocha/DeepEI
[ "96aee49192ac805dda7971041c01e16c62cd3cbc" ]
[ "Scripts/read_msp.py" ]
[ "from DeepEI.utils import ms2vec, get_cdk_fingerprints, get_cdk_descriptors\nfrom matchms.importing import load_from_msp\nimport json\nimport numpy as np\nfrom scipy.sparse import csr_matrix, save_npz\nfrom rdkit import Chem\nfrom rdkit.Chem import AllChem\nfrom rdkit.Chem.rdMolDescriptors import CalcExactMolWt\n\n# incompatible with DeepEI/utils.py\n#from pycdk.pycdk import MolFromSmiles, parser_formula, MolToFormula\n\nfrom concurrent.futures import ProcessPoolExecutor\nimport os\nfrom argparse import ArgumentParser\n\np = ArgumentParser()\np.add_argument('--ncores','-n',type=int,help='number of cores',default=1)\np.add_argument('--dest','-d',type=str,help='destination directory',default='.')\np.add_argument('infile',type=str,help='input file')\n\nargs = p.parse_args()\nfile_msp = args.infile\nncores = args.ncores\ndest = args.dest\n\nif not os.path.isdir(dest):\n\tprint(f\"{dest} does not exist\")\n\texit(1)\n\ndef process_mol(nm):\n\tn,m = nm\n\ttry:\n\t\tosmiles = m.get('smiles')\n\t\tmol = Chem.MolFromSmiles(osmiles)\n\t\tname = m.get('name')\n\t\tpeakindex = m.peaks.mz\n\t\tpeakintensity = m.peaks.intensities\n\n\t\tmolwt = CalcExactMolWt(mol)\n\t\tif molwt > 2000:\n\t\t\treturn {}\n\t\tsmiles = Chem.MolToSmiles(mol)\n# XXX: pycdk\n#\t\telements = parser_formula(MolToFormula(MolFromSmiles(smiles)))\n#\t\tfor e in elements:\n#\t\t\tif e not in ['C', 'H', 'O', 'N', 'S', 'P', 'Si', 'F', 'Cl', 'Br', 'I']:\n#\t\t\t\tprint(f\"{osmiles}: uncommon element {e}, skipping\")\n#\t\t\t\treturn {}\n\t\tmorgan_fp = np.array(AllChem.GetMorganFingerprintAsBitVect(mol, 2, nBits=4096))\n\t\tcdk_fp = get_cdk_fingerprints(smiles)\n\t\tcdk_des = np.array(get_cdk_descriptors(smiles))\n# XXX\n#\t\tri = list(m['RI'].values())\n\t\tpeak_vec = ms2vec(peakindex,peakintensity)\n\n\t\tprint(f\"{n}:{osmiles}: done\")\n\t\treturn { \n\t\t\t'smiles': smiles,\n\t\t\t'name': name,\n\t\t\t'peak_vec': peak_vec,\n#\t\t\t'ri': ri,\n\t\t\t'morgan_fp': morgan_fp,\n\t\t\t'cdk_fp': cdk_fp,\n\t\t\t'cdk_des': cdk_des,\n\t\t\t'molwt': molwt,\n\t\t}\n\texcept BaseException as e:\n\t\tprint(f\"{osmiles}: {e}\")\n\t\treturn {}\n\nprint(f\"Loading {file_msp}...\")\nall_mol = load_from_msp(file_msp)\nprint(\"done\")\n\nwith ProcessPoolExecutor(max_workers=ncores) as pool:\n\tall_output = pool.map(process_mol, enumerate(all_mol))\n\n# filter out empty entries\nall_output = list(filter(lambda x: x,all_output))\n\nall_smiles = list(map(lambda x: x['smiles'], all_output))\nPeak_data = np.array(list(map(lambda x: x['peak_vec'], all_output)))\n# RI_data = map(lambda x: x['smiles'], all_output)\nMorgan_fp = np.array(list(map(lambda x: x['morgan_fp'], all_output)))\nCDK_fp = np.array(list(map(lambda x: x['cdk_fp'], all_output)))\nCDK_des = np.array(list(map(lambda x: x['cdk_des'], all_output)))\nMolWt = np.array(list(map(lambda x: x['molwt'], all_output)))\n\nprint(\"writing output ...\")\nos.chdir(dest)\n\n# np.save('retention.npy', np.array(RI_data))\nnp.save('descriptor.npy', CDK_des)\nnp.save('molwt.npy', MolWt)\n\nPeak_data = csr_matrix(Peak_data)\nMorgan_fp = csr_matrix(Morgan_fp)\nCDK_fp = csr_matrix(CDK_fp)\n\nsave_npz('peakvec.npz', Peak_data)\nsave_npz('morgan.npz', Morgan_fp)\nsave_npz('fingerprints.npz', CDK_fp)\n\nwith open('all_smiles.json', 'w') as t:\n\tjson.dump(all_smiles, t)\n\nprint(\"done\")\n" ]
[ [ "numpy.save", "scipy.sparse.csr_matrix", "scipy.sparse.save_npz" ] ]
wikimedia/research-reader-survey-analysis
[ "fbf4d71eebaf5ac5205713b0271f4ea51ab388f8" ]
[ "src/preprocessing/02_extractlogtraces/03_join_responses_with_ip.py" ]
[ "\"\"\"\nThis script joins:\n * the EventLogging (EL) data based on webrequest beacons (in my experience, most complete / simplest)\n * Google Forms survey responses\n * EditAttemptStep data based on hive tables\n\nThere are two outputs for each language:\n * CSV w/ survey responses + EL details (e.g., datetime, pageID) + webrequest details (e.g., client-IP, user-agent)\n * CSV w/ all approximate userhashes for matching against webrequest logs\n\"\"\"\n\nimport argparse\nimport csv\nimport os\n\nfrom geopy.distance import distance\nimport pandas as pd\n\n# hacky way to make sure utils is visible\nimport sys\nsys.path.append(os.path.abspath(os.path.abspath(os.path.dirname(__file__)) + '/../../..'))\n\nfrom src.utils import config\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--el_logs_fn\",\n default=config.quicksurvey_el_tsv,\n help=\"TSV with EventLogging data\")\n parser.add_argument(\"--survey_req_fn\",\n default=config.quicksurvey_requests_tsv,\n help=\"TSV with survey webrequests.\")\n parser.add_argument(\"--editattempt_fn\",\n default=config.edit_el_tsv,\n help=\"TSV filename for edit attempt data\")\n parser.add_argument(\"--ids_dir\",\n default=config.ids_folder,\n help=\"Folder to store survey respondent UserIDs\")\n parser.add_argument(\"--languages\",\n default=config.languages,\n nargs=\"*\",\n help=\"List of languages to process\")\n parser.add_argument(\"--responses_dir\",\n default=config.responses_folder,\n help=\"Folder to hold survey responses + associated webrequest\")\n parser.add_argument(\"--dist_threshold\",\n default=config.ip_dist_threshold,\n help=\"Max distance in km between Geonames point and IP point for match.\")\n parser.add_argument(\"--geonames_tsv\",\n default=config.geonames_tsv,\n help=\"Geonames TSV file w/ place and population information.\")\n\n args = parser.parse_args()\n\n requests = pd.read_csv(args.survey_req_fn, sep=\"\\t\")\n print(\"{0} total requests.\".format(len(requests)))\n\n requests.drop_duplicates(inplace=True)\n requests.sort_values(by=['response_type'], ascending=False, inplace=True)\n requests.set_index('pageview_token', inplace=True)\n print(\"{0} requests from {1} unique users after removing duplicates.\".format(len(requests),\n len(requests['userhash'].unique())))\n\n map_ip_to_population(requests, args.geonames_tsv, args.dist_threshold)\n\n# edit_attempts = pd.read_csv(args.editattempt_fn, sep=\"\\t\")\n# print(\"{0} edit actions across {1} users.\".format(len(edit_attempts), len(edit_attempts['userhash'].unique())))\n# edit_attempts = edit_attempts.groupby('userhash').apply(group_edit_actions)\n\n if not os.path.isdir(args.ids_dir):\n print(\"Creating directory: {0}\".format(os.path.abspath(args.ids_dir)))\n os.mkdir(args.ids_dir)\n\n if not os.path.isdir(args.responses_dir):\n print(\"Creating directory: {0}\".format(os.path.abspath(args.responses_dir)))\n os.mkdir(args.responses_dir)\n\n all_ids = []\n for lang in args.languages:\n recoded_fn = os.path.join(config.data_folder, \"recoded\", \"responses_{0}_recoded.csv\".format(lang))\n surv_responses = pd.read_csv(recoded_fn, sep = '\\t')\n surv_responses.set_index('survey_id', inplace=True)\n print(\"**********\")\n print(\"Google Responses in {0}: {1}\".format(lang, len(surv_responses)))\n\n # merge in quicksurveys eventlogging -- priority to yes to take survey, no to take survey, initiation\n srv_el_req = pd.merge(surv_responses, requests, how=\"left\", left_index=True, right_index=True)\n srv_el_req = srv_el_req[~srv_el_req.index.duplicated(keep='first')]\n print(\"Breakdown of ability to match up Google responses with EL: (w/o initiation)\")\n print(srv_el_req['response_type'].value_counts(dropna=False))\n print(\"Breakdown of ability to match up Google responses with EL (w/ initiation):\")\n print(srv_el_req['country'].value_counts(dropna=False))\n\n # merge in edit attempt data\n# srv_el_req = srv_el_req.join(edit_attempts, how=\"left\", on=\"userhash\")\n# print(\"Responses w/ associated edit data (is anon):\")\n# print(srv_el_req['is_anon'].value_counts(dropna=False))\n\n # Write responses+EL+webrequest data to TSV\n output_merged_data = os.path.join(args.responses_dir, \"responses_with_el_{0}.csv\".format(lang))\n srv_el_req.to_csv(output_merged_data, sep='\\t')\n\n # Write userIDs associated with completed surveys to file\n output_respondent_ids = os.path.join(args.ids_dir, \"ids_{0}.csv\".format(lang))\n ids = srv_el_req[\"userhash\"]\n ids = ids.dropna()\n ids.to_csv(output_respondent_ids, index=False, header=False)\n print(\"Complete IDs:\", len(ids))\n\n all_ids.extend(list(ids.values))\n\n if all_ids:\n with open(config.all_ids_csv, 'w') as fout:\n csvwriter = csv.writer(fout)\n for ip_ua in all_ids:\n csvwriter.writerow([ip_ua])\n\n\ndef group_edit_actions(user_data):\n is_anon = any(user_data['anon'])\n edit_count = user_data['user_edit'].value_counts().index[0]\n editor_interface = user_data['editor_interface'].value_counts().index[0]\n return pd.Series({'is_anon': is_anon,\n 'edit_count': edit_count,\n 'editor_interface':editor_interface})\n\n\ndef map_ip_to_population(df, geonames_tsv, dist_threshold):\n print(\"Loading geonames lookup\")\n geonames = get_geonames_map(geonames_tsv)\n print(\"Calculating populations\")\n df['population'] = df.apply(lambda x: lookup_row(x, geonames, dist_threshold=dist_threshold), axis=1)\n print(\"Success rate:\", (df['population'] >= 1).sum() / df['population'].count())\n print(\"Breakdown of matches:\", df['population'].apply(lambda x: 1 if x > 0 else x).value_counts(dropna=False))\n try:\n ipdump_fn = geonames_tsv.replace('.txt', '_ipmatch.tsv')\n df[['city', 'country_code', 'lat', 'lon', 'population']].to_csv(ipdump_fn, header=True, index=False, sep='\\t')\n print(\"Dumped IP->population data to:\", ipdump_fn)\n except Exception:\n print(\"Failed to dump IP->population data.\")\n\ndef calc_dist(pt1, pt2):\n return distance(pt1, pt2).kilometers\n\ndef get_geonames_map(allcountries):\n geonames_header = ['geonameid', 'name', 'asciiname', 'alternatenames',\n 'latitude', 'longitude', 'feature class', 'feature code',\n 'country code', 'cc2', 'admin1 code', 'admin2 code', 'admin3 code', 'admin4 code',\n 'population', 'elevation', 'dem', 'timezone', 'modification date']\n country_idx = geonames_header.index('country code')\n pop_idx = geonames_header.index('population')\n lat_idx = geonames_header.index('latitude')\n lon_idx = geonames_header.index('longitude')\n name_idx = geonames_header.index('name')\n altname_idx = geonames_header.index('alternatenames')\n feature_idx = geonames_header.index('feature class')\n\n lookup = {}\n num_countries = 0\n num_places = 0\n num_pops = 0\n nonzero_pops = 0\n duplicates = 0\n with open(allcountries, 'r') as fin:\n tsvreader = csv.reader(fin, delimiter='\\t')\n for line in tsvreader:\n feature = line[feature_idx]\n try:\n population = int(line[pop_idx])\n except ValueError:\n population = -1\n if (feature == 'A' and population >= 0) or feature == 'P':\n pt = (float(line[lat_idx]), float(line[lon_idx]))\n names = [line[name_idx]]\n if line[altname_idx]:\n names.extend(line[altname_idx].split(','))\n country = line[country_idx]\n if country not in lookup:\n num_countries += 1\n lookup[country] = {}\n for n in names:\n if n in lookup[country]:\n if pt in lookup[country][n]:\n existing_pop = lookup[country][n][pt]\n if not population:\n continue\n elif existing_pop == population:\n continue\n elif not existing_pop:\n lookup[country][n][pt] = population\n num_pops += 1\n else:\n duplicates += 1\n else:\n lookup[country][n][pt] = population\n num_places += 1\n if num_places % 500000 == 0:\n print(num_places, \"added.\")\n if population >= 0:\n num_pops += 1\n if population == 0:\n nonzero_pops += 1\n else:\n lookup[country][n] = {pt:population}\n num_places += 1\n if num_places % 500000 == 0:\n print(num_places, \"added.\")\n if population >= 0:\n num_pops += 1\n if population == 0:\n nonzero_pops += 1\n print(\"{0} countries. {1} places. {2} places w/ population. {3} w/ pop 0. {4} duplicates\".format(\n num_countries, num_places, num_pops, nonzero_pops, duplicates))\n # add location-based lookup index for places w/ unknown cities but that still have points\n locs_to_add = {}\n for cc in lookup:\n for n in lookup[cc]:\n for loc in lookup[cc][n]:\n simple_loc = (int(loc[0]), int(loc[1]))\n if simple_loc not in locs_to_add:\n locs_to_add[simple_loc] = set()\n locs_to_add[simple_loc].add((cc, n))\n for l in locs_to_add:\n lookup[l] = locs_to_add[l]\n return lookup\n\ndef lookup_row(x, geonames, dist_threshold):\n country = x['country_code']\n city = x['city']\n pt = (float(x['lat']), float(x['lon']))\n # no city info, use lat-lon as backup\n if city.lower() == \"unknown\":\n return lookup_pt(pt, geonames, dist_threshold)\n # use city to geocode and then lat-lon to filter\n else:\n try:\n candidates = geonames[country][city]\n within_thres = []\n # find all potential place matches\n for cpt, cpop in candidates.items():\n if calc_dist(pt, cpt) < dist_threshold:\n within_thres.append(cpop)\n # return potential match with highest population (arbitrary choice but empirically seems to matter little)\n if within_thres:\n # Success: found a matching place w/i distance threshold\n # Possibilities:\n # >0 == have a real population\n # 0 if geonames listed that\n # -1 population if geonames didn't provide a number\n return max(within_thres)\n else:\n # found a matching name but was not close enough\n backup = lookup_pt(pt, geonames, dist_threshold)\n if backup > 0:\n return backup\n else:\n return -2\n except KeyError:\n # did not find a matching name\n return lookup_pt(pt, geonames, dist_threshold)\n\ndef lookup_pt(pt, geonames, dist_threshold):\n simple_pt = (int(pt[0]), int(pt[1]))\n closest_with_pop = float('inf')\n pop = -3\n for cc, name in geonames.get(simple_pt, []):\n for cpt, cpop in geonames[cc][name]:\n if cpop > 0:\n cand_dist = calc_dist(pt, cpt)\n if cand_dist < dist_threshold and cand_dist < closest_with_pop:\n closest_with_pop = cand_dist\n pop = cpop\n return pop\n\nif __name__ == \"__main__\":\n main()" ]
[ [ "pandas.read_csv", "pandas.Series", "pandas.merge" ] ]
gcba/IATos
[ "d42cffea313170bb249edcadb0776f7a6d368654" ]
[ "algos/prediction/transformers.py" ]
[ "import librosa\nimport numpy as np\n\nfrom PIL import Image\nfrom typing import Optional\nfrom sklearn.base import BaseEstimator, TransformerMixin\nfrom matplotlib.cm import ScalarMappable\n\n__all__ = [\n \"Denoising\",\n \"MelSpectogram\",\n \"ColoredSpectogram\",\n]\n\n\nclass BaseTransformer(BaseEstimator, TransformerMixin):\n def __init__(self):\n pass\n\n def fit(self, X, y=None):\n return self\n\n @classmethod\n def read_params(cls, params):\n return cls(**params)\n\n\nclass Denoising(BaseTransformer):\n \"\"\"Placeholder para la capa \"denoising\" actualmente en codigo MATLAB\"\"\"\n\n def transform(self, X: np.array, y: Optional[np.array] = None) -> np.array:\n \"\"\"Codigo aqui\"\"\"\n return X\n\n\nclass MelSpectogram(BaseTransformer):\n \"\"\"Transforma una señal en un espectograma con escala de Mel utilizando librosa\n \n Parameters\n ----------\n\n Los parametros para instanciar son los que se pasan a `librosa.feature.melspectogram`\n y a `librosa.power_to_db`.\n\n Returns\n -------\n\n np.array : Numpy array del espectograma con valores en decibeles.\n\n \"\"\"\n\n def __init__(\n self,\n sr: int,\n n_fft: int,\n hop_length: int,\n n_mels: int,\n fmin: int,\n fmax: int,\n ref: str,\n T: bool,\n as_ratio: bool,\n ):\n self.sr = sr\n self.n_fft = n_fft\n self.hop_length = hop_length\n self.n_mels = n_mels\n self.fmin = fmin\n self.fmax = fmax\n self.ref = ref\n self.T = T\n self.as_ratio = as_ratio\n\n def transform(self, X: np.array, y: Optional[np.array] = None) -> np.array:\n X_ = self._mel_spec(X)\n if self.T: # backward compatibility\n X_ = X_.T\n return librosa.power_to_db(X_, ref=getattr(np, self.ref))\n\n def _mel_spec(self, X: np.array) -> np.array:\n hop = self.hop_length\n if self.as_ratio: # backward compatibility\n hop = X.size // self.hop_length\n return librosa.feature.melspectrogram(\n y=X, sr=self.sr, hop_length=hop, n_mels=self.n_mels\n )\n\n\nclass ColoredSpectogram(BaseTransformer):\n \"\"\"Transforma una matriz de valores a una imagen con escala de colores.\n \n Parameters\n ----------\n cmap : str\n Escala de colores accesible desde `matplotlib.cm.get_cmap`.\n\n Returns\n -------\n PIL.Image : Imagen en modo RGB.\n\n \"\"\"\n\n def __init__(self, cmap: str):\n self.cmap = cmap\n\n def transform(self, X: np.array, y: Optional[np.array] = None) -> Image:\n X_ = ScalarMappable(cmap=self.cmap).to_rgba(X, bytes=True)\n return Image.fromarray(X_).convert(\"RGB\")\n" ]
[ [ "matplotlib.cm.ScalarMappable" ] ]
jacopok/notes
[ "805ebe1be49bbd14c6b46b24055f9fc7d1cd2586" ]
[ "ap_third_semester/compact_objects/figures/roche-lobe-radius.py" ]
[ "#%%\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom astropy.visualization import astropy_mpl_style\nplt.style.use(astropy_mpl_style)\nfrom matplotlib import rc\nrc('font',**{'family':'serif','serif':['Palatino']})\nrc('text', usetex=True)\nrc('text.latex', preamble=r'''\\usepackage{amsmath}\n \\usepackage{physics}\n \\usepackage{siunitx}\n ''')\nTHR = .5\nWIDTH = 0\n\n# def weight(q):\n# if WIDTH>0:\n# offset = 1/2 - THR / WIDTH\n# return (np.piecewise(q, \n# condlist=[\n# q < THR - WIDTH / 2,\n# q > THR - WIDTH / 2 and q < THR + WIDTH / 2 ,\n# q > THR + WIDTH / 2,\n# ],\n# funclist=[\n# 0,\n# lambda x: x / WIDTH + offset,\n# 1\n# ]\n# ))\n# else:\n# return (np.piecewise(q,\n# condlist=[q < THR, q >= THR],\n# funclist=[0, 1]\n# ))\n\ndef f1(q):\n return (.46224 * (q / (1 + q))**(1 / 3))\n \ndef f2(q):\n return (.38 + .2 * np.log10(q))\n\n\ndef f(q):\n if q < 0.5:\n return (f1(q))\n else:\n return(f2(q))\n\nf = np.vectorize(f, signature='()->()')\n\n#%% \n\nqs = np.linspace(0, 8, num=1000)\n\nf_q = f(qs)\n\n# plt.plot(qs, f(qs))\n# plt.xlabel('$q = M_2 / M_1$')\n# plt.ylabel('$R_{\\\\text{{lobe}}} / a$')\n# plt.savefig('roche-lobe-radius.pdf', format = 'pdf')\n\n#%%\n\ndef a(q):\n return((1+q)**4 / q**2)\n\na_q = a(qs)\n\nplt.plot(qs, np.abs(np.gradient(f_q, qs) / f_q), label='$\\\\abs{\\\\Delta \\\\log f}$')\nplt.plot(qs, np.abs(np.gradient(a_q, qs) / a_q), label='$\\\\abs{\\\\Delta \\\\log a}$')\nplt.plot(qs, np.gradient(a_q, qs) / a_q + np.gradient(f_q, qs) / f_q, label='$\\\\Delta \\\\log a + \\\\Delta \\\\log f$', ls='--')\nplt.axvline(1, label='$q = 1$', ls=':', c='black')\n\n\nplt.xlabel('$q = M_2 / M_1$')\nplt.ylabel('relative variation')\nplt.legend()\nplt.yscale('log')\nplt.savefig('roche-lobe-relative-corrections.pdf')\nplt.show()\n\n#%%\n\nqs = np.linspace(0, 5/4, num=200)\n\ndef circ(q):\n return((.5 - .227 * np.log10(q))**4 * (1+q))\n\nplt.plot(qs, f(1 / qs), label='Roche Lobe radius')\nplt.plot(qs, circ(qs), label='Circularization radius')\nplt.xlabel('$q$')\nplt.ylim(0,1)\nplt.legend()\nplt.savefig('roche-vs-circularization.pdf')\n# %%\n" ]
[ [ "matplotlib.pyplot.style.use", "matplotlib.pyplot.axvline", "numpy.vectorize", "matplotlib.pyplot.legend", "matplotlib.pyplot.savefig", "matplotlib.pyplot.yscale", "matplotlib.rc", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel", "numpy.log10", "matplotlib.pyplot.ylim", "numpy.gradient", "numpy.linspace", "matplotlib.pyplot.xlabel" ] ]
deepbluesea/transformers
[ "11a2317986aad6e9a72f542e31344cfb7c94cbab" ]
[ "examples/distillation/distiller.py" ]
[ "# coding=utf-8\n# Copyright 2019-present, the HuggingFace Inc. team and Facebook, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\" The distiller to distil the student.\n Adapted in part from Facebook, Inc XLM model (https://github.com/facebookresearch/XLM)\n\"\"\"\nimport os\nimport math\nimport psutil\nimport time\nfrom tensorboardX import SummaryWriter\nfrom tqdm import trange, tqdm\nimport numpy as np\nimport psutil\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.optim import AdamW\nfrom torch.utils.data.distributed import DistributedSampler\nfrom torch.utils.data import RandomSampler, BatchSampler, DataLoader\n\nfrom transformers import WarmupLinearSchedule\n\nfrom utils import logger\nfrom lm_seqs_dataset import LmSeqsDataset\nfrom grouped_batch_sampler import GroupedBatchSampler, create_lengths_groups\n\nclass Distiller:\n def __init__(self,\n params: dict,\n dataset: LmSeqsDataset,\n token_probs: torch.tensor,\n student: nn.Module,\n teacher: nn.Module):\n logger.info('Initializing Distiller')\n self.params = params\n self.dump_path = params.dump_path\n self.multi_gpu = params.multi_gpu\n self.fp16 = params.fp16\n\n self.student = student\n self.teacher = teacher\n\n self.student_config = student.config\n self.vocab_size = student.config.vocab_size\n\n if params.n_gpu <= 1:\n sampler = RandomSampler(dataset)\n else:\n sampler = DistributedSampler(dataset)\n\n if params.group_by_size:\n groups = create_lengths_groups(lengths=dataset.lengths, k=params.max_model_input_size)\n sampler = GroupedBatchSampler(sampler=sampler, group_ids=groups, batch_size=params.batch_size)\n else:\n sampler = BatchSampler(sampler=sampler, batch_size=params.batch_size, drop_last=False)\n\n self.dataloader = DataLoader(dataset=dataset,\n batch_sampler=sampler,\n collate_fn=dataset.batch_sequences)\n\n self.temperature = params.temperature\n assert self.temperature > 0.\n\n self.alpha_ce = params.alpha_ce\n self.alpha_mlm = params.alpha_mlm\n self.alpha_clm = params.alpha_clm\n self.alpha_mse = params.alpha_mse\n self.alpha_cos = params.alpha_cos\n\n self.mlm = params.mlm\n if self.mlm:\n logger.info(f'Using MLM loss for LM step.')\n self.mlm_mask_prop = params.mlm_mask_prop\n assert 0.0 <= self.mlm_mask_prop <= 1.0\n assert params.word_mask + params.word_keep + params.word_rand == 1.0\n self.pred_probs = torch.FloatTensor([params.word_mask, params.word_keep, params.word_rand])\n self.pred_probs = self.pred_probs.to(f'cuda:{params.local_rank}') if params.n_gpu > 0 else self.pred_probs\n self.token_probs = token_probs.to(f'cuda:{params.local_rank}') if params.n_gpu > 0 else token_probs\n if self.fp16:\n self.pred_probs = self.pred_probs.half()\n self.token_probs = self.token_probs.half()\n else:\n logger.info(f'Using CLM loss for LM step.')\n\n self.epoch = 0\n self.n_iter = 0\n self.n_total_iter = 0\n self.n_sequences_epoch = 0\n self.total_loss_epoch = 0\n self.last_loss = 0\n self.last_loss_ce = 0\n self.last_loss_mlm = 0\n self.last_loss_clm = 0\n if self.alpha_mse > 0.: self.last_loss_mse = 0\n if self.alpha_cos > 0.: self.last_loss_cos = 0\n self.last_log = 0\n\n self.ce_loss_fct = nn.KLDivLoss(reduction='batchmean')\n self.lm_loss_fct = nn.CrossEntropyLoss(ignore_index=-1)\n if self.alpha_mse > 0.:\n self.mse_loss_fct = nn.MSELoss(reduction='sum')\n if self.alpha_cos > 0.:\n self.cosine_loss_fct = nn.CosineEmbeddingLoss(reduction='mean')\n\n logger.info('--- Initializing model optimizer')\n assert params.gradient_accumulation_steps >= 1\n self.num_steps_epoch = len(self.dataloader)\n num_train_optimization_steps = int(self.num_steps_epoch / params.gradient_accumulation_steps * params.n_epoch) + 1\n\n no_decay = ['bias', 'LayerNorm.weight']\n optimizer_grouped_parameters = [\n {'params': [p for n, p in student.named_parameters() if not any(nd in n for nd in no_decay) and p.requires_grad], 'weight_decay': params.weight_decay},\n {'params': [p for n, p in student.named_parameters() if any(nd in n for nd in no_decay) and p.requires_grad], 'weight_decay': 0.0}\n ]\n logger.info(\"------ Number of trainable parameters (student): %i\" % sum([p.numel() for p in self.student.parameters() if p.requires_grad]))\n logger.info(\"------ Number of parameters (student): %i\" % sum([p.numel() for p in self.student.parameters()]))\n self.optimizer = AdamW(optimizer_grouped_parameters,\n lr=params.learning_rate,\n eps=params.adam_epsilon,\n betas=(0.9, 0.98))\n\n warmup_steps = math.ceil(num_train_optimization_steps * params.warmup_prop)\n self.scheduler = WarmupLinearSchedule(self.optimizer,\n warmup_steps=warmup_steps,\n t_total=num_train_optimization_steps)\n\n if self.fp16:\n try:\n from apex import amp\n except ImportError:\n raise ImportError(\"Please install apex from https://www.github.com/nvidia/apex to use fp16 training.\")\n logger.info(f\"Using fp16 training: {self.params.fp16_opt_level} level\")\n self.student, self.optimizer = amp.initialize(self.student,\n self.optimizer,\n opt_level=self.params.fp16_opt_level)\n self.teacher = self.teacher.half()\n\n if self.multi_gpu:\n if self.fp16:\n from apex.parallel import DistributedDataParallel\n logger.info(\"Using apex.parallel.DistributedDataParallel for distributed training.\")\n self.student = DistributedDataParallel(self.student)\n else:\n from torch.nn.parallel import DistributedDataParallel\n logger.info(\"Using nn.parallel.DistributedDataParallel for distributed training.\")\n self.student = DistributedDataParallel(self.student,\n device_ids=[params.local_rank],\n output_device=params.local_rank,\n find_unused_parameters=True)\n\n self.is_master = params.is_master\n if self.is_master:\n logger.info('--- Initializing Tensorboard')\n self.tensorboard = SummaryWriter(log_dir=os.path.join(self.dump_path, 'log', 'train'))\n self.tensorboard.add_text(tag='config/training', text_string=str(self.params), global_step=0)\n self.tensorboard.add_text(tag='config/student', text_string=str(self.student_config), global_step=0)\n\n def prepare_batch_mlm(self,\n batch):\n \"\"\"\n Prepare the batch: from the token_ids and the lenghts, compute the attention mask and the masked label for MLM.\n\n Input:\n ------\n batch: `Tuple`\n token_ids: `torch.tensor(bs, seq_length)` - The token ids for each of the sequence. It is padded.\n lengths: `torch.tensor(bs)` - The lengths of each of the sequences in the batch.\n\n Output:\n -------\n token_ids: `torch.tensor(bs, seq_length)` - The token ids after the modifications for MLM.\n attn_mask: `torch.tensor(bs, seq_length)` - The attention mask for the self-attention.\n mlm_labels: `torch.tensor(bs, seq_length)` - The masked languge modeling labels. There is a -1 where there is nothing to predict.\n \"\"\"\n token_ids, lengths = batch\n token_ids, lengths = self.round_batch(x=token_ids, lengths=lengths)\n assert token_ids.size(0) == lengths.size(0)\n\n attn_mask = (torch.arange(token_ids.size(1), dtype=torch.long, device=lengths.device) < lengths[:, None])\n\n bs, max_seq_len = token_ids.size()\n mlm_labels = token_ids.new(token_ids.size()).copy_(token_ids)\n\n x_prob = self.token_probs[token_ids.flatten()]\n n_tgt = math.ceil(self.mlm_mask_prop * lengths.sum().item())\n tgt_ids = torch.multinomial(x_prob / x_prob.sum(), n_tgt, replacement=False)\n pred_mask = torch.zeros(bs * max_seq_len, dtype=torch.bool, device=token_ids.device) # previously `dtype=torch.uint8`, cf pytorch 1.2.0 compatibility\n pred_mask[tgt_ids] = 1\n pred_mask = pred_mask.view(bs, max_seq_len)\n\n pred_mask[token_ids == self.params.special_tok_ids['pad_token']] = 0\n\n # mask a number of words == 0 [8] (faster with fp16)\n if self.fp16:\n n1 = pred_mask.sum().item()\n if n1 > 8:\n pred_mask = pred_mask.view(-1)\n n2 = max(n1 % 8, 8 * (n1 // 8))\n if n2 != n1:\n pred_mask[torch.nonzero(pred_mask).view(-1)[:n1-n2]] = 0\n pred_mask = pred_mask.view(bs, max_seq_len)\n assert pred_mask.sum().item() % 8 == 0, pred_mask.sum().item()\n\n _token_ids_real = token_ids[pred_mask]\n _token_ids_rand = _token_ids_real.clone().random_(self.vocab_size)\n _token_ids_mask = _token_ids_real.clone().fill_(self.params.special_tok_ids['mask_token'])\n probs = torch.multinomial(self.pred_probs, len(_token_ids_real), replacement=True)\n _token_ids = _token_ids_mask * (probs == 0).long() + _token_ids_real * (probs == 1).long() + _token_ids_rand * (probs == 2).long()\n token_ids = token_ids.masked_scatter(pred_mask, _token_ids)\n\n mlm_labels[~pred_mask] = -1 # previously `mlm_labels[1-pred_mask] = -1`, cf pytorch 1.2.0 compatibility\n\n # sanity checks\n assert 0 <= token_ids.min() <= token_ids.max() < self.vocab_size\n\n return token_ids, attn_mask, mlm_labels\n\n def prepare_batch_clm(self,\n batch):\n \"\"\"\n Prepare the batch: from the token_ids and the lenghts, compute the attention mask and the labels for CLM.\n\n Input:\n ------\n batch: `Tuple`\n token_ids: `torch.tensor(bs, seq_length)` - The token ids for each of the sequence. It is padded.\n lengths: `torch.tensor(bs)` - The lengths of each of the sequences in the batch.\n\n Output:\n -------\n token_ids: `torch.tensor(bs, seq_length)` - The token ids after the modifications for MLM.\n attn_mask: `torch.tensor(bs, seq_length)` - The attention mask for the self-attention.\n clm_labels: `torch.tensor(bs, seq_length)` - The causal languge modeling labels. There is a -1 where there is nothing to predict.\n \"\"\"\n token_ids, lengths = batch\n token_ids, lengths = self.round_batch(x=token_ids, lengths=lengths)\n assert token_ids.size(0) == lengths.size(0)\n\n attn_mask = (torch.arange(token_ids.size(1), dtype=torch.long, device=lengths.device) < lengths[:, None])\n clm_labels = token_ids.new(token_ids.size()).copy_(token_ids)\n clm_labels[~attn_mask] = -1 # previously `clm_labels[1-attn_mask] = -1`, cf pytorch 1.2.0 compatibility\n\n # sanity checks\n assert 0 <= token_ids.min() <= token_ids.max() < self.vocab_size\n\n return token_ids, attn_mask, clm_labels\n\n def round_batch(self,\n x: torch.tensor,\n lengths: torch.tensor):\n \"\"\"\n For float16 only.\n Sub-sample sentences in a batch, and add padding, so that each dimension is a multiple of 8.\n\n Input:\n ------\n x: `torch.tensor(bs, seq_length)` - The token ids.\n lengths: `torch.tensor(bs, seq_length)` - The lengths of each of the sequence in the batch.\n\n Output:\n -------\n x: `torch.tensor(new_bs, new_seq_length)` - The updated token ids.\n lengths: `torch.tensor(new_bs, new_seq_length)` - The updated lengths.\n \"\"\"\n if not self.fp16 or len(lengths) < 8:\n return x, lengths\n\n # number of sentences == 0 [8]\n bs1 = len(lengths)\n bs2 = 8 * (bs1 // 8)\n assert bs2 > 0 and bs2 % 8 == 0\n if bs1 != bs2:\n idx = torch.randperm(bs1)[:bs2]\n lengths = lengths[idx]\n slen = lengths.max().item()\n x = x[idx, :slen]\n else:\n idx = None\n\n # sequence length == 0 [8]\n ml1 = x.size(1)\n if ml1 % 8 != 0:\n pad = 8 - (ml1 % 8)\n ml2 = ml1 + pad\n if self.mlm:\n pad_id = self.params.special_tok_ids['pad_token']\n else:\n pad_id = self.params.special_tok_ids['unk_token']\n padding_tensor = torch.zeros(bs2, pad, dtype=torch.long, device=x.device).fill_(pad_id)\n x = torch.cat([x, padding_tensor], 1)\n assert x.size() == (bs2, ml2)\n\n assert x.size(0) % 8 == 0\n assert x.size(1) % 8 == 0\n return x, lengths\n\n def train(self):\n \"\"\"\n The real training loop.\n \"\"\"\n if self.is_master: logger.info('Starting training')\n self.last_log = time.time()\n self.student.train()\n self.teacher.eval()\n\n for _ in range(self.params.n_epoch):\n if self.is_master: logger.info(f'--- Starting epoch {self.epoch}/{self.params.n_epoch-1}')\n if self.multi_gpu:\n torch.distributed.barrier()\n\n iter_bar = tqdm(self.dataloader, desc=\"-Iter\", disable=self.params.local_rank not in [-1, 0])\n for batch in iter_bar:\n if self.params.n_gpu > 0:\n batch = tuple(t.to(f'cuda:{self.params.local_rank}') for t in batch)\n\n if self.mlm:\n token_ids, attn_mask, lm_labels = self.prepare_batch_mlm(batch=batch)\n else:\n token_ids, attn_mask, lm_labels = self.prepare_batch_clm(batch=batch)\n self.step(input_ids=token_ids, attention_mask=attn_mask, lm_labels=lm_labels)\n\n iter_bar.update()\n iter_bar.set_postfix({'Last_loss': f'{self.last_loss:.2f}',\n 'Avg_cum_loss': f'{self.total_loss_epoch/self.n_iter:.2f}'})\n iter_bar.close()\n\n if self.is_master: logger.info(f'--- Ending epoch {self.epoch}/{self.params.n_epoch-1}')\n self.end_epoch()\n\n if self.is_master:\n logger.info(f'Save very last checkpoint as `pytorch_model.bin`.')\n self.save_checkpoint(checkpoint_name=f'pytorch_model.bin')\n logger.info('Training is finished')\n\n def step(self,\n input_ids: torch.tensor,\n attention_mask: torch.tensor,\n lm_labels: torch.tensor):\n \"\"\"\n One optimization step: forward of student AND teacher, backward on the loss (for gradient accumulation),\n and possibly a parameter update (depending on the gradient accumulation).\n\n Input:\n ------\n input_ids: `torch.tensor(bs, seq_length)` - The token ids.\n attention_mask: `torch.tensor(bs, seq_length)` - The attention mask for self attention.\n lm_labels: `torch.tensor(bs, seq_length)` - The language modeling labels (mlm labels for MLM and clm labels for CLM).\n \"\"\"\n if self.mlm:\n s_logits, s_hidden_states = self.student(input_ids=input_ids, attention_mask=attention_mask) # (bs, seq_length, voc_size)\n with torch.no_grad():\n t_logits, t_hidden_states = self.teacher(input_ids=input_ids, attention_mask=attention_mask) # (bs, seq_length, voc_size)\n else:\n s_logits, _, s_hidden_states = self.student(input_ids=input_ids, attention_mask=None) # (bs, seq_length, voc_size)\n with torch.no_grad():\n t_logits, _, t_hidden_states = self.teacher(input_ids=input_ids, attention_mask=None) # (bs, seq_length, voc_size)\n assert s_logits.size() == t_logits.size()\n\n #https://github.com/peterliht/knowledge-distillation-pytorch/blob/master/model/net.py#L100\n #https://github.com/peterliht/knowledge-distillation-pytorch/issues/2\n if self.params.restrict_ce_to_mask:\n mask = (lm_labels>-1).unsqueeze(-1).expand_as(s_logits) # (bs, seq_lenth, voc_size)\n else:\n mask = attention_mask.unsqueeze(-1).expand_as(s_logits) # (bs, seq_lenth, voc_size)\n s_logits_slct = torch.masked_select(s_logits, mask) # (bs * seq_length * voc_size) modulo the 1s in mask\n s_logits_slct = s_logits_slct.view(-1, s_logits.size(-1)) # (bs * seq_length, voc_size) modulo the 1s in mask\n t_logits_slct = torch.masked_select(t_logits, mask) # (bs * seq_length * voc_size) modulo the 1s in mask\n t_logits_slct = t_logits_slct.view(-1, s_logits.size(-1)) # (bs * seq_length, voc_size) modulo the 1s in mask\n assert t_logits_slct.size() == s_logits_slct.size()\n\n loss_ce = self.ce_loss_fct(F.log_softmax(s_logits_slct/self.temperature, dim=-1),\n F.softmax(t_logits_slct/self.temperature, dim=-1)) * (self.temperature)**2\n loss = self.alpha_ce*loss_ce\n\n if self.alpha_mlm > 0.:\n loss_mlm = self.lm_loss_fct(s_logits.view(-1, s_logits.size(-1)), lm_labels.view(-1))\n loss += self.alpha_mlm * loss_mlm\n if self.alpha_clm > 0.:\n shift_logits = s_logits[..., :-1, :].contiguous()\n shift_labels = lm_labels[..., 1:].contiguous()\n loss_clm = self.lm_loss_fct(shift_logits.view(-1, shift_logits.size(-1)),\n shift_labels.view(-1))\n loss += self.alpha_clm * loss_clm\n\n if self.alpha_mse > 0.:\n loss_mse = self.mse_loss_fct(s_logits_slct, t_logits_slct)/s_logits_slct.size(0) # Reproducing batchmean reduction\n loss += self.alpha_mse * loss_mse\n if self.alpha_cos > 0.:\n s_hidden_states = s_hidden_states[-1] # (bs, seq_length, dim)\n t_hidden_states = t_hidden_states[-1] # (bs, seq_length, dim)\n mask = attention_mask.unsqueeze(-1).expand_as(s_hidden_states) # (bs, seq_length, dim)\n assert s_hidden_states.size() == t_hidden_states.size()\n dim = s_hidden_states.size(-1)\n \n s_hidden_states_slct = torch.masked_select(s_hidden_states, mask) # (bs * seq_length * dim)\n s_hidden_states_slct = s_hidden_states_slct.view(-1, dim) # (bs * seq_length, dim)\n t_hidden_states_slct = torch.masked_select(t_hidden_states, mask) # (bs * seq_length * dim)\n t_hidden_states_slct = t_hidden_states_slct.view(-1, dim) # (bs * seq_length, dim)\n \n target = s_hidden_states_slct.new(s_hidden_states_slct.size(0)).fill_(1) # (bs * seq_length,)\n loss_cos = self.cosine_loss_fct(s_hidden_states_slct, t_hidden_states_slct, target)\n loss += self.alpha_cos * loss_cos\n\n self.total_loss_epoch += loss.item()\n self.last_loss = loss.item()\n self.last_loss_ce = loss_ce.item()\n if self.alpha_mlm > 0.:\n self.last_loss_mlm = loss_mlm.item()\n if self.alpha_clm > 0.:\n self.last_loss_clm = loss_clm.item()\n if self.alpha_mse > 0.:\n self.last_loss_mse = loss_mse.item()\n if self.alpha_cos > 0.:\n self.last_loss_cos = loss_cos.item()\n\n self.optimize(loss)\n\n self.n_sequences_epoch += input_ids.size(0)\n\n def optimize(self,\n loss):\n \"\"\"\n Normalization on the loss (gradient accumulation or distributed training), followed by\n backward pass on the loss, possibly followed by a parameter update (depending on the gradient accumulation).\n Also update the metrics for tensorboard.\n \"\"\"\n # Check for NaN\n if (loss != loss).data.any():\n logger.error('NaN detected')\n exit()\n\n if self.multi_gpu:\n loss = loss.mean()\n if self.params.gradient_accumulation_steps > 1:\n loss = loss / self.params.gradient_accumulation_steps\n\n if self.fp16:\n from apex import amp\n with amp.scale_loss(loss, self.optimizer) as scaled_loss:\n scaled_loss.backward()\n else:\n loss.backward()\n\n self.iter()\n if self.n_iter % self.params.gradient_accumulation_steps == 0:\n if self.fp16:\n torch.nn.utils.clip_grad_norm_(amp.master_params(self.optimizer), self.params.max_grad_norm)\n else:\n torch.nn.utils.clip_grad_norm_(self.student.parameters(), self.params.max_grad_norm)\n self.optimizer.step()\n self.optimizer.zero_grad()\n self.scheduler.step()\n\n def iter(self):\n \"\"\"\n Update global counts, write to tensorboard and save checkpoint.\n \"\"\"\n self.n_iter += 1\n self.n_total_iter += 1\n\n if self.n_total_iter % self.params.log_interval == 0:\n self.log_tensorboard()\n self.last_log = time.time()\n if self.n_total_iter % self.params.checkpoint_interval == 0:\n self.save_checkpoint()\n\n def log_tensorboard(self):\n \"\"\"\n Log into tensorboard. Only by the master process.\n \"\"\"\n if not self.is_master:\n return\n\n for param_name, param in self.student.named_parameters():\n self.tensorboard.add_scalar(tag='parameter_mean/' + param_name, scalar_value=param.data.mean(), global_step=self.n_total_iter)\n self.tensorboard.add_scalar(tag='parameter_std/' + param_name, scalar_value=param.data.std(), global_step=self.n_total_iter)\n if param.grad is None:\n continue\n self.tensorboard.add_scalar(tag=\"grad_mean/\" + param_name, scalar_value=param.grad.data.mean(),global_step=self.n_total_iter)\n self.tensorboard.add_scalar(tag=\"grad_std/\" + param_name, scalar_value=param.grad.data.std(), global_step=self.n_total_iter)\n\n self.tensorboard.add_scalar(tag=\"losses/cum_avg_loss_epoch\", scalar_value=self.total_loss_epoch/self.n_iter, global_step=self.n_total_iter)\n self.tensorboard.add_scalar(tag=\"losses/loss\", scalar_value=self.last_loss, global_step=self.n_total_iter)\n self.tensorboard.add_scalar(tag=\"losses/loss_ce\", scalar_value=self.last_loss_ce, global_step=self.n_total_iter)\n if self.alpha_mlm > 0.:\n self.tensorboard.add_scalar(tag=\"losses/loss_mlm\", scalar_value=self.last_loss_mlm, global_step=self.n_total_iter)\n if self.alpha_clm > 0.:\n self.tensorboard.add_scalar(tag=\"losses/loss_clm\", scalar_value=self.last_loss_clm, global_step=self.n_total_iter)\n if self.alpha_mse > 0.:\n self.tensorboard.add_scalar(tag=\"losses/loss_mse\", scalar_value=self.last_loss_mse, global_step=self.n_total_iter)\n if self.alpha_cos > 0.:\n self.tensorboard.add_scalar(tag=\"losses/loss_cos\", scalar_value=self.last_loss_cos, global_step=self.n_total_iter)\n self.tensorboard.add_scalar(tag=\"learning_rate/lr\", scalar_value=self.scheduler.get_lr()[0], global_step=self.n_total_iter)\n \n self.tensorboard.add_scalar(tag=\"global/memory_usage\", scalar_value=psutil.virtual_memory()._asdict()['used']/1_000_000, global_step=self.n_total_iter)\n self.tensorboard.add_scalar(tag=\"global/speed\", scalar_value=time.time()-self.last_log, global_step=self.n_total_iter)\n\n def end_epoch(self):\n \"\"\"\n Finally arrived at the end of epoch (full pass on dataset).\n Do some tensorboard logging and checkpoint saving.\n \"\"\"\n logger.info(f'{self.n_sequences_epoch} sequences have been trained during this epoch.')\n\n if self.is_master:\n self.save_checkpoint(checkpoint_name=f'model_epoch_{self.epoch}.pth')\n self.tensorboard.add_scalar(tag='epoch/loss', scalar_value=self.total_loss_epoch/self.n_iter, global_step=self.epoch)\n\n self.epoch += 1\n self.n_sequences_epoch = 0\n self.n_iter = 0\n self.total_loss_epoch = 0\n\n def save_checkpoint(self,\n checkpoint_name: str = 'checkpoint.pth'):\n \"\"\"\n Save the current state. Only by the master process.\n \"\"\"\n if not self.is_master:\n return\n mdl_to_save = self.student.module if hasattr(self.student, 'module') else self.student\n mdl_to_save.config.save_pretrained(self.dump_path)\n state_dict = mdl_to_save.state_dict()\n torch.save(state_dict, os.path.join(self.dump_path, checkpoint_name))\n" ]
[ [ "torch.utils.data.DataLoader", "torch.nonzero", "torch.nn.functional.softmax", "torch.no_grad", "torch.nn.KLDivLoss", "torch.cat", "torch.utils.data.BatchSampler", "torch.utils.data.RandomSampler", "torch.distributed.barrier", "torch.nn.parallel.DistributedDataParallel", "torch.optim.AdamW", "torch.masked_select", "torch.FloatTensor", "torch.nn.functional.log_softmax", "torch.nn.MSELoss", "torch.utils.data.distributed.DistributedSampler", "torch.nn.CosineEmbeddingLoss", "torch.nn.CrossEntropyLoss", "torch.randperm", "torch.zeros" ] ]
uibcdf/openpharmacophore
[ "4f563fa206f6e7c081502acab97bb795d27bdeb9" ]
[ "openpharmacophore/pharmacophore/dynophore.py" ]
[ "# OpenPharmacophore\nfrom openpharmacophore._private_tools.exceptions import InvalidFileFormat, NoLigandsError, OpenPharmacophoreTypeError\nfrom openpharmacophore.pharmacophore.pharmacophoric_point import UniquePharmacophoricPoint\nfrom openpharmacophore import StructuredBasedPharmacophore\nfrom openpharmacophore import Pharmacophore\nfrom openpharmacophore.utils.conformers import conformer_energy\nfrom openpharmacophore.pharmacophore.color_palettes import get_color_from_palette_for_feature\n# Third Party\nimport matplotlib.pyplot as plt\nimport MDAnalysis as mda\nfrom MDAnalysis.lib.util import NamedStream\nimport mdtraj as mdt\nimport numpy as np\nimport pandas as pd\nimport pyunitwizard as puw\nfrom rdkit.Chem.Draw import rdMolDraw2D\nfrom tqdm.auto import tqdm\n# Standard Library\nfrom collections import defaultdict\nimport copy\nimport bisect\nfrom io import StringIO\nimport tempfile\nfrom typing import List, Tuple, Optional\n\nclass Dynophore():\n \"\"\" Class to store and compute dynamic pharmacophores\n\n Parameters\n ----------\n\n trajectory : str or mdtraj.trajectory or MDAnalysis.universe\n A str with the file path containing the trajectory, an mdtraj trajectory object, \n or an MDAnalysis universe.\n\n Attributes\n ----------\n\n pharmacophores : list of openpharmacophore.StructuredBasedPharmacophore\n List with pharmacophores for each relevant frame in the trajectory. \n\n pharmacophore_indices : list of int\n Indices of the frame of the trajectory from which the pharmacophores were extracted.\n The index of each element of the list corresponds to the one in pharmacophores list.\n\n n_pharmacophores : int\n Number of different pharmacophores in the trajectory.\n\n \"\"\"\n def __init__(self, trajectory):\n self.pharmacophores = []\n self.pharmacophore_indices = []\n self.n_pharmacophores = 0\n self.unique_pharmacophoric_points = []\n\n # TODO: Load other types of file, including using a topology and tajectory\n if isinstance(trajectory, str):\n self._trajectory = self._load_trajectory_file(trajectory)\n elif isinstance(trajectory, mdt.Trajectory):\n self._trajectory_type = \"mdt\"\n self._trajectory = trajectory\n self._n_frames = self._trajectory.n_frames\n elif isinstance(trajectory, mda.Universe):\n self._trajectory_type = \"mda\"\n self._trajectory = trajectory\n self._n_frames = trajectory.trajectory.n_frames\n else:\n raise TypeError(\"Trajectory must be of type string, mdtraj.Trajectory or MdAnalysis.Universe\")\n \n \n self._saved_ligand = False\n self._averaged_coords = False\n\n def common_hits_approach(self, frame_list=None):\n \"\"\" Get a list of pharmacophore models from a trajectory using the common hits approach\n method.\n\n Notes\n -----\n\n This method is based on obtaining a list of representative pharmacophore models from a \n trajectory and then validate and score them using virtual screening. The best performant\n pharmacophore models are then returned.\n\n References\n ----------\n\n [1] Wieder, Marcus, Arthur Garon, Ugo Perricone, Stefan Boresch, Thomas Seidel, Anna Maria Almerico, \n and Thierry Langer. \"Common hits approach: combining pharmacophore modeling and molecular dynamics \n simulations.\" Journal of chemical information and modeling 57, no. 2 (2017): 365-385 \n\n \"\"\"\n if frame_list is None:\n frame_list = list(range(0, self._n_frames))\n\n self.pharmacophores_from_frames(frame_list, load_ligand=True)\n self._get_unique_pharmacophoric_points(avg_coordinates=False)\n rpms = self.representative_pharmacophore_models()\n\n pass\n\n def draw(self, file_name: str, img_size: Tuple[int, int] = (500,500), \n legend: str = \"\", freq_threshold: float = 0.2) -> None:\n \"\"\" Draw a 2d representation of the dynamic pharmacophore. This is a drawing of the\n ligand with the pharmacophoric features highlighted and the frequency if each\n one. \n\n Parameters\n ----------\n file_name : str\n Name or path og the file where the drawing will be saved. Must be a png file.\n\n img_size : 2-tuple of int, optional \n The size of the image (default=(500,500))\n\n legend : str, optional\n Image legend.\n\n freq_threshold : double , optional\n The minimun frequency of a pharmacophoric point to be drawn. Number\n between 0.0 and 1.0 (default=0.2). \n \"\"\"\n if freq_threshold < 0.0 or freq_threshold > 1.0:\n raise ValueError(\"Freqency threshold must be a value between 0 and 1\") \n\n if not file_name.endswith(\".png\"):\n raise InvalidFileFormat(\"File must be a png.\")\n\n # Extract a ligand\n if self.pharmacophores[0].ligand is None:\n raise NoLigandsError(\"Ligand could not be extracted\")\n ligand = copy.deepcopy(self.pharmacophores[0].ligand)\n ligand.RemoveAllConformers()\n\n atoms = []\n bond_colors = {}\n atom_highlights = defaultdict(list)\n highlight_radius = {}\n\n for up in self.unique_pharmacophoric_points:\n \n if up.frequency < freq_threshold:\n continue\n\n indices = up.atom_indices\n update_freq = True\n for idx in indices:\n\n # If an atom has more than one feature keep higher frequency value\n if idx in atoms:\n if ligand.GetAtomWithIdx(idx).HasProp(\"atomNote\"):\n freq = int(ligand.GetAtomWithIdx(idx).GetProp(\"atomNote\")[2:])\n if freq > up.frequency:\n update_freq = False\n\n atoms.append(idx)\n if \"hydrophobicity\" in up.feature_name:\n feat_name = \"hydrophobicity\"\n else:\n feat_name = \" \".join(up.feature_name.split()[0:2])\n \n atom_highlights[idx].append(get_color_from_palette_for_feature(feat_name))\n highlight_radius[idx] = 0.6\n\n # Draw aromatic rings bonds\n if up.short_name == \"R\":\n for neighbor in ligand.GetAtomWithIdx(idx).GetNeighbors():\n nbr_idx = neighbor.GetIdx()\n if nbr_idx not in indices:\n continue\n bond = ligand.GetBondBetweenAtoms(idx, nbr_idx).GetIdx()\n bond_colors[bond] = [get_color_from_palette_for_feature(\"aromatic ring\")]\n \n if update_freq:\n frequency = int(up.frequency * 100)\n ligand.GetAtomWithIdx(idx).SetProp(\"atomNote\", f\"f={frequency}\")\n\n drawing = rdMolDraw2D.MolDraw2DCairo(img_size[0], img_size[1])\n drawing.DrawMoleculeWithHighlights(ligand, legend, dict(atom_highlights), bond_colors, highlight_radius, {})\n drawing.FinishDrawing()\n drawing.WriteDrawingText(file_name)\n\n def first_and_last_pharmacophore(self) -> None:\n \"\"\" Derive a pharmacophore model for the first and last frames of a trajectory.\n\n References\n ----------\n [1] Wieder, Marcus, Ugo Perricone, Thomas Seidel, Stefan Boresch, and Thierry Langer. \n \"Comparing pharmacophore models derived from crystal structures and from molecular \n dynamics simulations.\" Monatshefte für Chemie-Chemical Monthly 147, no. 3 (2016): \n 553-563.\n \"\"\"\n if self._trajectory_type == \"mdt\":\n get_pharmacophore = self._pharmacophore_from_mdtraj\n elif self._trajectory_type == \"mda\":\n get_pharmacophore = self._pharmacohore_from_mdanalysis\n\n initial_pharmacophore = get_pharmacophore(0, True, True)\n end_pharmacophore = get_pharmacophore(-1, True, True)\n last_frame_index = self._trajectory.n_frames\n self.pharmacophores = [\n initial_pharmacophore,\n end_pharmacophore\n ]\n self.pharmacophore_indices = [0, last_frame_index]\n self.n_pharmacophores = 2\n\n def pharmacophore_by_frequency(self, threshold: float) -> Pharmacophore:\n \"\"\" Derive a unique pharmacophore model with the pharmacophoric points\n that have a frequency >= to threshold.\n\n Parameters\n ---------\n threshold : float\n The value of frequency from which points are considered part of\n the pharmacophore model. Must be a value between 0 and 1-\n\n Returns\n -------\n openpharmcophore.Pharmacophore\n Pharmacophore model with the unique pharmacophoric points.\n\n References\n ----------\n [1] Wieder, Marcus, Ugo Perricone, Thomas Seidel, and Thierry Langer. \"Pharmacophore models \n derived from molecular dynamics simulations of protein-ligand complexes: A case study.\" \n Natural product communications 11, no. 10 (2016): 1934578X1601101019.\n \"\"\"\n if threshold < 0 or threshold > 1:\n raise ValueError(\"Threshold must be a number between 0 and 1\")\n \n if len(self.unique_pharmacophoric_points) == 0:\n self._get_unique_pharmacophoric_points(avg_coordinates=True)\n \n points = [p for p in self.unique_pharmacophoric_points if p.frequency >= threshold]\n return Pharmacophore(points)\n\n def pharmacophore_from_unique_points(self, unique_points: List[str]) -> Pharmacophore:\n \"\"\" Get a pharmacophore which consists of the passed unique pharmacophoric\n points.\n\n Parameters\n ----------\n unique_points: list of str\n List with the name of the unique pharmacophoric points.\n\n Returns\n -------\n openpharmcophore.Pharmacophore\n Pharmacophore model with the specified points.\n \"\"\"\n if len(self.unique_pharmacophoric_points) == 0 or not self._averaged_coords:\n self._get_unique_pharmacophoric_points(avg_coordinates=True)\n points = [point for point in self.unique_pharmacophoric_points if point.feature_name in unique_points]\n return Pharmacophore(pharmacophoric_points=points)\n\n def pharmacophores_from_frames(self, frames: List[int], load_ligand: bool = True) -> None:\n \"\"\" Get pharmacophores for the specified frames in a trajectory\n\n Parameters\n ----------\n frames : list of int\n Indices of the frames for which pharmacophores will be derived.\n\n \"\"\"\n if self._trajectory_type == \"mdt\":\n get_pharmacophore = self._pharmacophore_from_mdtraj\n elif self._trajectory_type == \"mda\":\n get_pharmacophore = self._pharmacohore_from_mdanalysis\n \n self.pharmacophores.clear()\n self.pharmacophore_indices.clear()\n for ii in tqdm(frames):\n self.pharmacophores.append(get_pharmacophore(ii, load_ligand=load_ligand))\n self.pharmacophore_indices.append(ii)\n self.n_pharmacophores = len(self.pharmacophores)\n \n def pharmacophoric_point_frequency(self) -> pd.DataFrame:\n \"\"\" Get a dataframe with all unique pharmacophoric points and its frequency.\n\n Returns\n -------\n pandas.DataFrame\n Dataframe with the following columns: feature name, frequency and atom\n indices.\n \"\"\"\n if len(self.unique_pharmacophoric_points) == 0 or not self._averaged_coords:\n self._get_unique_pharmacophoric_points(avg_coordinates=True)\n \n names = []\n frequencies = []\n indices = []\n for point in self.unique_pharmacophoric_points:\n names.append(point.feature_name)\n frequencies.append(point.frequency)\n indices.append(point.atom_indices)\n\n frequency = pd.DataFrame().from_dict({\n \"Feature Name\": names,\n \"Frequency\": frequencies,\n \"Atoms Indices\": indices\n })\n frequency.sort_values(by=[\"Frequency\"], ascending=False, inplace=True)\n frequency.reset_index(inplace=True)\n frequency.drop(columns=[\"index\"], inplace=True)\n return frequency\n\n def point_frequency_plot(self, threshold: float = 0.0, n_bins: int = 10, \n ax: Optional[plt.Axes] = None):\n \"\"\" Plot of pharmacophoric points frequency vs time. \n \n Each pharmacophoric point will appear as a different line in the plot.\n\n Parameters\n ----------\n threshold : double, default=0.0\n The value of overall frequency from which points will form part of the \n plot. If there are a lot of points with really low frequency, setting\n the threshold value can help with visualization.\n\n n_bins : int, default=10\n Number of bins to discretize the timesteps. \n\n ax : matplotlib.axes._subplots.AxesSubplot, optional.\n An axes object where the plot will be drawn.\n \"\"\"\n if len(self.unique_pharmacophoric_points) == 0 or not self._averaged_coords:\n self._get_unique_pharmacophoric_points(avg_coordinates=True)\n\n if threshold < 0 or threshold > 1:\n raise ValueError(\"Threshold must be a number between 0 and 1\")\n\n if ax is None:\n fig, ax = plt.subplots(figsize=(10, 7))\n n_timesteps = self._n_frames\n bins = np.arange(0, n_timesteps + 1, n_timesteps/n_bins)\n\n for point in self.unique_pharmacophoric_points:\n if point.frequency < threshold:\n continue\n point_timesteps = np.array(point.timesteps)\n discretized_timesteps = np.digitize(point_timesteps, bins)\n\n counts = np.zeros_like(bins)\n\n for i in range(bins.shape[0]):\n c = np.count_nonzero(discretized_timesteps == i)\n counts[i] = c\n \n ax.plot(bins, counts, label=point.feature_name)\n\n ax.legend()\n ax.set_xlabel(\"Timesteps\")\n ax.set_ylabel(\"Count\")\n plt.show()\n\n return ax\n \n def representative_pharmacophore_models(self) -> List[StructuredBasedPharmacophore]:\n \"\"\" Get all representative pharmacophore models (RPM) in a trajectory. \n \n RPMs are pharmacophore models that have the same pharmacophoric points, \n\n Returns\n -------\n rpms : list of openpharmacophore.StructuredBasedPharmacophore\n The representative pharmacophore models\n\n Note\n -----\n Pharmacophoric points are considered equal based only on feature type and the atoms to \n which this points belong to. Coordinates are not taken into account.\n\n The coordinates of the pharmacophoric points are those that belong to the median energy of\n the ligand.\n\n References\n ----------\n [1] Wieder, Marcus, Arthur Garon, Ugo Perricone, Stefan Boresch, Thomas Seidel, Anna Maria Almerico, \n and Thierry Langer. \"Common hits approach: combining pharmacophore modeling and molecular dynamics \n simulations.\" Journal of chemical information and modeling 57, no. 2 (2017): 365-385 \n \n \"\"\"\n if len(self.unique_pharmacophoric_points) == 0 or self._averaged_coords:\n self._get_unique_pharmacophoric_points(avg_coordinates=False)\n self._averaged_coords = False\n \n rpms_indices = self._get_rpms_indices()\n \n return self._pharmacophores_from_ligand_median_energy(rpms_indices)\n\n def _get_rpms_indices(self) -> List[List[int]]:\n \"\"\" Get the indices of the representative pharmacophore models.\n \n If an empty list is returned it means that all pharmacophore models in the trajectory are different.\n \n Returns\n --------\n rpms_indices : list of list of int\n A list where each sublist contains the indices of each representative pharmacophore\n model. This indices correspond to the attribute pharmacophores of the Dynophore\n class.\n \"\"\"\n # Compute a matrix where each row represents a feature vector of a pharmacophore\n n_pharmacophores = self.n_pharmacophores\n n_features = len(self.unique_pharmacophoric_points) \n feature_matrix = np.zeros((n_pharmacophores, n_features), dtype=np.int32)\n for ii, pharmacophore in enumerate(self.pharmacophores):\n for point in pharmacophore:\n for jj, unique_point in enumerate(self.unique_pharmacophoric_points):\n if point.is_equal(unique_point):\n feature_matrix[ii, jj] = 1\n break\n \n # Find similar pharmacophores in the matrix\n rpms_indices = []\n skip = []\n for ii in range(n_pharmacophores):\n rpm = [ii]\n for jj in range(ii + 1, n_pharmacophores):\n if jj in skip:\n continue\n if np.all(feature_matrix[ii, :] == feature_matrix[jj, :]):\n rpm.append(jj)\n skip.append(jj)\n # Keep only models that have a frequency higher than 2\n if len(rpm) > 2:\n rpms_indices.append(rpm)\n \n \n return rpms_indices\n \n def _pharmacophores_from_ligand_median_energy(self, rpms_indices)-> List[List[int]]:\n \"\"\" Get the representative pharmacophore models that correspond to the pharmacophore\n with ligand median energy.\n\n Parameters\n ----------\n rpms_indices : list of list of int\n A list where each sublist contains the indices of each representative pharmacophore\n model. This indices correspond to the attribute pharmacophores of the Dynophore\n class.\n \n Returns\n -------\n rpms : list of openpharmacophore.StructuredBasedPharmacophore\n The representative pharmacophore models\n \"\"\"\n rpms = []\n for indices in rpms_indices:\n energies = []\n for index in indices:\n energy = (conformer_energy(self.pharmacophores[index].ligand), index)\n bisect.insort(energies, energy)\n # Take the pharmacophore with median energy\n median_energy_index = energies[int(len(energies) / 2)][1]\n rpms.append(self.pharmacophores[median_energy_index])\n \n return rpms\n\n \n def _load_trajectory_file(self, file_name: str) -> mdt.Trajectory:\n \"\"\" Load a trajectory file from a MD simulation\n\n Parameters\n ----------\n file_name : str\n Name of the file containing the trajectory.\n\n Returns\n -------\n traj : \n The trajectory object. \n \"\"\"\n if file_name.endswith(\"h5\"):\n traj = mdt.load(file_name)\n self._trajectory_type = \"mdt\"\n else:\n raise NotImplementedError\n\n return traj\n \n def _get_unique_pharmacophoric_points(self, avg_coordinates: bool = True) -> None:\n \"\"\" Get all unique pharmacophoric points across all the pharmacophore models \n derived from the trajectory. \n\n Parameters\n ----------\n avg_coordinates : bool\n Whether to average the coordinates of the pharmacophoric points.\n \n Notes\n -----\n Two points are considered equal if they have the same feature type and\n are associated with the same atom in the ligand.\n \"\"\"\n if avg_coordinates:\n self._averaged_coords = True\n\n if self.n_pharmacophores == 0:\n self.pharmacophores_from_frames(list(range(0, self._n_frames)))\n \n all_points = []\n for ii, pharmacophore in enumerate(self.pharmacophores):\n for pharmacophoric_point in pharmacophore:\n pharmacophoric_point.pharmacophore_index = ii\n all_points.append(pharmacophoric_point)\n \n self.unique_pharmacophoric_points.clear()\n # Get all unique parmacophoric points while also updating the count, \n # timesteps where they appear and calculating the average centroid.\n for point in all_points:\n is_unique = True\n for unique_p in self.unique_pharmacophoric_points:\n if point.is_equal(unique_p):\n timestep = point.pharmacophore_index\n if not timestep in unique_p.timesteps:\n unique_p.timesteps.append(timestep)\n unique_p.count += 1\n if avg_coordinates:\n unique_p.center += point.center\n is_unique = False\n break\n if is_unique:\n self.unique_pharmacophoric_points.append(UniquePharmacophoricPoint(point, point.pharmacophore_index))\n \n names = []\n for point in self.unique_pharmacophoric_points:\n if avg_coordinates:\n # Normalize centroid\n point.center /= point.count \n point.frequency = point.count / self.n_pharmacophores\n # Get a unique name for each point\n feat_num = 1\n full_name = point.feature_name + \" \" + str(feat_num)\n if full_name not in names:\n names.append(full_name)\n point.feature_name = full_name\n else:\n while True:\n feat_num += 1\n full_name = point.feature_name + \" \" + str(feat_num)\n if full_name not in names:\n names.append(full_name)\n point.feature_name = full_name\n break\n\n def _pharmacophore_from_mdtraj(self, frame_num: int, load_mol_system: bool=False, \n load_ligand: bool=False) -> StructuredBasedPharmacophore:\n \"\"\" Derive a pharmacophore for a single frame of an mdtraj Trajectory object.\n\n Parameters\n ----------\n frame_num : int\n The index number of the frame from which the pharmacophore will be derived.\n \n load_mol_system : bool, default=False\n If true the receptor will be stored in the pharmacophore object.\n \n load_ligand : bool, default=False\n If true the ligand will be stored in the pharmacophore object.\n \"\"\"\n # mdtraj trajectories cannot be passed to SringIO objects nor saved as string. So with this\n # method, temporary pdb files will be created that can be read by the StructuredBasedPharmacophore \n # class.\n if not isinstance(frame_num, int):\n raise OpenPharmacophoreTypeError(\"Frame number must be an integer\")\n frame = self._trajectory[frame_num]\n \n with tempfile.NamedTemporaryFile() as original_file:\n frame.save_pdb(original_file.name)\n original_file.seek(0) \n lines_original = original_file.readlines()\n \n # The pdb mdtraj generates needs to be edited so that pybel can read it.\n # The third line that contains \"MODEL\" needs to be removed for the structured \n # based pharmacophore to work.\n with tempfile.NamedTemporaryFile() as modified_file:\n for line in lines_original:\n if not line.startswith(b'MODEL'):\n modified_file.write(line)\n modified_file.truncate()\n modified_file.seek(0)\n pharmacophore = StructuredBasedPharmacophore.from_pdb(modified_file, \n radius=1.0, ligand_id=None, hydrophobics=\"plip\", \n load_mol_system=load_mol_system, load_ligand=load_ligand)\n \n return pharmacophore\n \n def _pharmacohore_from_mdanalysis(self, frame_num: int, load_mol_system: bool = False, \n load_ligand: bool = False) -> StructuredBasedPharmacophore:\n \"\"\" Derive a pharmacophore for a single frame of an MdAnalysis Universe object.\n\n Parameters\n ----------\n frame_num : int\n The index number of the frame from which the pharmacophore will be derived.\n \n load_mol_system: bool, default=False\n If true the receptor will be stored in the pharmacophore object.\n \n load_ligand: bool, default=False\n If true the ligand will be stored in the pharmacophore object.\n \"\"\"\n if not isinstance(frame_num, int):\n raise OpenPharmacophoreTypeError(\"Frame number must be an integer\")\n stream = StringIO()\n pdb_stream = NamedStream(stream, \"output.pdb\")\n atoms = self._trajectory.select_atoms(\"all\")\n atoms.write(pdb_stream, frames=self._trajectory.trajectory[[frame_num]])\n pharmacophore = StructuredBasedPharmacophore.from_pdb(pdb_stream, \n radius=1.0, ligand_id=None, hydrophobics=\"plip\", \n load_mol_system=load_mol_system, load_ligand=load_ligand)\n \n return pharmacophore\n\n def __repr__(self) -> str:\n return f\"{self.__class__.__name__}(n_pharmacophores={self.n_pharmacophores}; n_frames={self._n_frames})\"\n\n \n \n" ]
[ [ "numpy.zeros_like", "numpy.zeros", "pandas.DataFrame", "matplotlib.pyplot.subplots", "numpy.count_nonzero", "numpy.arange", "matplotlib.pyplot.show", "numpy.all", "numpy.array", "numpy.digitize" ] ]
scott-mao/EOD
[ "f10e64de86c0f356ebf5c7e923f4042eec4207b1" ]
[ "eod/utils/general/saver_helper.py" ]
[ "# Standard Library\nimport json\nimport os\nimport shutil\n\n# Import from third library\nimport torch\n\n# Import from local\nfrom .log_helper import default_logger as logger\nfrom .registry_factory import SAVER_REGISTRY\n\n\n__all__ = ['Saver']\n\n\n@SAVER_REGISTRY.register('base')\nclass Saver(object):\n def __init__(self, save_cfg, yml_path=None, work_dir='./'):\n # checkpoint dir\n self.save_cfg = self.prepend_work_dir(save_cfg, work_dir)\n self.work_dir = work_dir\n self.save_dir = save_cfg['save_dir']\n os.makedirs(self.save_dir, exist_ok=True)\n if yml_path is not None and 's3://' not in yml_path: # TODO, save cpeh data\n yml_name = os.path.basename(yml_path)\n dst_path = os.path.join(self.save_dir, yml_name)\n shutil.copy(yml_path, dst_path)\n\n self.auto_resume = self.save_cfg.get('auto_resume', False)\n self.running_config_file = os.path.join(self.save_dir, 'running_config.json')\n\n def prepend_work_dir(self, save_cfg, work_dir):\n\n def osp(path):\n return os.path.join(work_dir, path)\n\n save_cfg['save_dir'] = osp(save_cfg['save_dir'])\n save_cfg['results_dir'] = osp(save_cfg['results_dir'])\n\n return save_cfg\n\n @staticmethod\n def get_model_from_ckpt(ckpt_path):\n return Saver.load_checkpoint(ckpt_path)['model']\n\n def load_pretrain_or_resume(self):\n if self.auto_resume:\n last_checkpoint_path = self.find_last_checkpoint()\n if last_checkpoint_path is not None:\n logger.warning('Load checkpoint from {}'.format(last_checkpoint_path))\n return self.load_checkpoint(last_checkpoint_path)\n else:\n logger.warning('Not found any valid checkpoint yet')\n\n if 'resume_model' in self.save_cfg:\n logger.warning('Load checkpoint from {}'.format(self.save_cfg['resume_model']))\n state = self.load_checkpoint(self.save_cfg['resume_model'])\n return state\n elif 'pretrain_model' in self.save_cfg:\n state = self.load_checkpoint(self.save_cfg['pretrain_model'])\n logger.warning('Load checkpoint from {}'.format(self.save_cfg['pretrain_model']))\n output = {}\n if 'ema' in state:\n if \"ema_state_dict\" in state['ema']:\n logger.info(\"Load ema pretrain model\")\n st = state['ema']['ema_state_dict']\n else:\n st = state['model']\n else:\n st = state['model']\n output['model'] = st\n return output\n else:\n logger.warning('Load nothing! No weights provided {}')\n return {'model': {}}\n\n @staticmethod\n def load_checkpoint(ckpt_path):\n \"\"\"Load state_dict from checkpoint\"\"\"\n\n def remove_prefix(state_dict, prefix):\n \"\"\"Old style model is stored with all names of parameters share common prefix 'module.'\"\"\"\n f = lambda x: x.split(prefix, 1)[-1] if x.startswith(prefix) else x\n return {f(key): value for key, value in state_dict.items()}\n\n # assert os.path.exists(ckpt_path), f'No such file: {ckpt_path}'\n device = torch.cuda.current_device()\n ckpt_dict = torch.load(ckpt_path, map_location=lambda storage, loc: storage.cuda(device))\n\n if 'model' in ckpt_dict:\n state_dict = ckpt_dict['model']\n elif 'state_dict' in ckpt_dict:\n state_dict = ckpt_dict['state_dict']\n else:\n state_dict = ckpt_dict\n\n state_dict = remove_prefix(state_dict, 'module.')\n ckpt_dict['model'] = state_dict\n\n return ckpt_dict\n\n def lns_latest_ckpt(self, ckpt_path, new_path):\n try:\n pwd = os.getcwd()\n absolute_ckpt_path = os.path.join(pwd, ckpt_path)\n absolute_new_path = os.path.join(pwd, new_path)\n if os.path.exists(absolute_new_path):\n os.system(f'rm {absolute_new_path}')\n os.system(f\"ln -s {absolute_ckpt_path} {absolute_new_path}\")\n except Exception as e:\n logger.warning(f'Failed to ln -s {ckpt_path} {new_path}')\n logger.warning(e)\n\n def save(self, epoch, iter, **kwargs):\n \"\"\"Save model checkpoint for one epoch\"\"\"\n os.makedirs(self.save_dir, exist_ok=True)\n # Assume we warmup for a epochs and training a+b epochs in total,\n # then our checkpoints are named of ckpt_e{-a+1}.pth ~ ckpt_e{b}.pth\n # if best in kwargs, we save the best ckpt as ckpt_best.path.auto\n if 'suffix' in kwargs:\n suffix = kwargs['suffix']\n ckpt_path = os.path.join(self.save_dir, 'ckpt_e{}-{}.pth'.format(epoch, suffix))\n elif 'auto_save' in kwargs:\n ckpt_path = os.path.join(self.save_dir, 'ckpt_{}.pth'.format(kwargs['auto_save']))\n else:\n ckpt_path = os.path.join(self.save_dir, 'ckpt_e{}.pth'.format(epoch))\n # since epoch not in kwargs\n kwargs['epoch'] = epoch\n kwargs['iter'] = iter\n kwargs['metric_val'] = kwargs.get('metric_val', -1)\n lns_latest_ckpt = kwargs.pop('lns', True)\n torch.save(kwargs, ckpt_path)\n if lns_latest_ckpt:\n latest_path = os.path.join(self.save_dir, 'ckpt_latest.pth')\n self.lns_latest_ckpt(ckpt_path, latest_path)\n return ckpt_path\n\n def save_model_arch(self, model):\n \"\"\"Save model structure\"\"\"\n os.makedirs(self.save_dir, exist_ok=True)\n meta_path = os.path.join(self.save_dir, 'model_arch.txt')\n with open(meta_path, 'w') as fid:\n fid.write(str(model))\n\n def save_running_config(self, config):\n with open(self.running_config_file, 'w') as rcf:\n json.dump(config, rcf, indent=2)\n\n def find_last_checkpoint(self):\n last_ckpt_path = os.path.join(self.save_dir, \"ckpt_latest.pth\")\n if os.path.exists(last_ckpt_path):\n return last_ckpt_path\n else:\n return None\n" ]
[ [ "torch.save", "torch.cuda.current_device" ] ]
dynamicguy/imgaug
[ "f58c06323eb04416c76de1f18952ca5875caf883" ]
[ "imgaug/augmenters/weather.py" ]
[ "\"\"\"\nAugmenters that create wheather effects.\n\nDo not import directly from this file, as the categorization is not final.\nUse instead::\n\n from imgaug import augmenters as iaa\n\nand then e.g.::\n\n seq = iaa.Sequential([iaa.Snowflakes()])\n\nList of augmenters:\n\n * FastSnowyLandscape\n * Clouds\n * Fog\n * CloudLayer\n * Snowflakes\n * SnowflakesLayer\n\n\"\"\"\nfrom __future__ import print_function, division, absolute_import\n\nimport numpy as np\nimport cv2\n\nfrom . import meta, arithmetic, blur, contrast\nfrom .. import imgaug as ia\nfrom .. import parameters as iap\n\n\nclass FastSnowyLandscape(meta.Augmenter):\n \"\"\"\n Augmenter to convert non-snowy landscapes to snowy ones.\n\n This expects to get an image that roughly shows a landscape.\n\n This is based on the method proposed by\n https://medium.freecodecamp.org/image-augmentation-make-it-rain-make-it-snow-how-to-modify-a-photo-with-machine-learning-163c0cb3843f?gi=bca4a13e634c\n\n Parameters\n ----------\n lightness_threshold : number or tuple of number or list of number\\\n or imgaug.parameters.StochasticParameter, optional\n All pixels with lightness in HLS colorspace below this value will have their lightness increased by\n `lightness_multiplier`.\n\n * If an int, then that value will be used for all images.\n * If a tuple ``(a, b)``, then a value from the discrete range ``[a .. b]`` will be used.\n * If a list, then a random value will be sampled from that list per image.\n * If a StochasticParameter, then a value will be sampled per image from that parameter.\n\n lightness_multiplier : number or tuple of number or list of number\\\n or imgaug.parameters.StochasticParameter, optional\n Multiplier for pixel's lightness value in HLS colorspace. Affects all pixels selected via `lightness_threshold`.\n\n * If a number, then that value will be used for all images.\n * If a tuple ``(a, b)``, then a value from the continuous range ``[a, b]`` will be used.\n * If a list, then a random value will be sampled from that list per image.\n * If a StochasticParameter, then a value will be sampled per image from that parameter.\n\n name : None or str, optional\n See :func:`imgaug.augmenters.meta.Augmenter.__init__`.\n\n deterministic : bool, optional\n See :func:`imgaug.augmenters.meta.Augmenter.__init__`.\n\n random_state : None or int or numpy.random.RandomState, optional\n See :func:`imgaug.augmenters.meta.Augmenter.__init__`.\n\n Examples\n --------\n >>> aug = iaa.FastSnowyLandscape(lightness_threshold=140, lightness_multiplier=2.5)\n\n Search for all pixels in the image with a lightness value in HLS colorspace of less than 140 and increase their\n lightness by a factor of 2.5. This is the configuration proposed in the original article (see link above).\n\n >>> aug = iaa.FastSnowyLandscape(lightness_threshold=[128, 200], lightness_multiplier=(1.5, 3.5))\n\n Search for all pixels in the image with a lightness value in HLS colorspace of less than 128 or less than 200\n (one of these values is picked per image) and multiply their lightness by a factor of ``x`` with ``x`` being\n sampled from ``uniform(1.5, 3.5)`` (once per image).\n\n >>> aug = iaa.FastSnowyLandscape(lightness_threshold=(100, 255), lightness_multiplier=(1.0, 4.0))\n\n Similar to above, but the lightness threshold is sampled from ``uniform(100, 255)`` (per image) and the multiplier\n from ``uniform(1.0, 4.0)`` (per image). This seems to produce good and varied results.\n\n \"\"\"\n\n def __init__(self, lightness_threshold=(100, 255), lightness_multiplier=(1.0, 4.0), name=None, deterministic=False,\n random_state=None):\n super(FastSnowyLandscape, self).__init__(name=name, deterministic=deterministic, random_state=random_state)\n\n self.lightness_threshold = iap.handle_continuous_param(lightness_threshold, \"lightness_threshold\",\n value_range=(0, 255),\n tuple_to_uniform=True,\n list_to_choice=True)\n self.lightness_multiplier = iap.handle_continuous_param(lightness_multiplier, \"lightness_multiplier\",\n value_range=(0, None), tuple_to_uniform=True,\n list_to_choice=True)\n\n def _draw_samples(self, augmentables, random_state):\n nb_augmentables = len(augmentables)\n rss = ia.derive_random_states(random_state, 2)\n thresh_samples = self.lightness_threshold.draw_samples((nb_augmentables,), rss[1])\n lmul_samples = self.lightness_multiplier.draw_samples((nb_augmentables,), rss[0])\n return thresh_samples, lmul_samples\n\n def _augment_images(self, images, random_state, parents, hooks):\n input_dtypes = meta.copy_dtypes_for_restore(images, force_list=True)\n thresh_samples, lmul_samples = self._draw_samples(images, random_state)\n result = images\n\n for i, (image, input_dtype, thresh, lmul) in enumerate(zip(images, input_dtypes, thresh_samples, lmul_samples)):\n image_hls = cv2.cvtColor(image, cv2.COLOR_RGB2HLS).astype(np.float64)\n lightness = image_hls[..., 1]\n\n lightness[lightness < thresh] *= lmul\n\n image_hls = meta.clip_augmented_image_(image_hls, 0, 255) # TODO make value range more flexible\n image_hls = meta.restore_augmented_image_dtype_(image_hls, input_dtype)\n image_rgb = cv2.cvtColor(image_hls, cv2.COLOR_HLS2RGB)\n\n result[i] = image_rgb\n\n return result\n\n def _augment_heatmaps(self, heatmaps, random_state, parents, hooks):\n return heatmaps\n\n def _augment_keypoints(self, keypoints_on_images, random_state, parents, hooks):\n return keypoints_on_images\n\n def get_parameters(self):\n return [self.lightness_threshold, self.lightness_multiplier]\n\n\n# TODO add vertical gradient alpha to have clouds only at skylevel/groundlevel\n# TODO add configurable parameters\ndef Clouds(name=None, deterministic=False, random_state=None):\n \"\"\"\n Augmenter to draw clouds in images.\n\n This is a wrapper around ``CloudLayer``. It executes 1 to 2 layers per image, leading to varying densities\n and frequency patterns of clouds.\n\n This augmenter seems to be fairly robust w.r.t. the image size. Tested with ``96x128``, ``192x256``\n and ``960x1280``.\n\n Parameters\n ----------\n name : None or str, optional\n See :func:`imgaug.augmenters.meta.Augmenter.__init__`.\n\n deterministic : bool, optional\n See :func:`imgaug.augmenters.meta.Augmenter.__init__`.\n\n random_state : None or int or numpy.random.RandomState, optional\n See :func:`imgaug.augmenters.meta.Augmenter.__init__`.\n\n Examples\n --------\n >>> aug = iaa.Clouds()\n\n Creates an augmenter that adds clouds to images.\n\n \"\"\"\n if name is None:\n name = \"Unnamed%s\" % (ia.caller_name(),)\n\n return meta.SomeOf((1, 2), children=[\n CloudLayer(\n intensity_mean=(196, 255), intensity_freq_exponent=(-2.5, -2.0), intensity_coarse_scale=10,\n alpha_min=0, alpha_multiplier=(0.25, 0.75), alpha_size_px_max=(2, 8), alpha_freq_exponent=(-2.5, -2.0),\n sparsity=(0.8, 1.0), density_multiplier=(0.5, 1.0)\n ),\n CloudLayer(\n intensity_mean=(196, 255), intensity_freq_exponent=(-2.0, -1.0), intensity_coarse_scale=10,\n alpha_min=0, alpha_multiplier=(0.5, 1.0), alpha_size_px_max=(64, 128), alpha_freq_exponent=(-2.0, -1.0),\n sparsity=(1.0, 1.4), density_multiplier=(0.8, 1.5)\n )\n ], random_order=False, name=name, deterministic=deterministic, random_state=random_state)\n\n\n# TODO add vertical gradient alpha to have fog only at skylevel/groundlevel\n# TODO add configurable parameters\ndef Fog(name=None, deterministic=False, random_state=None):\n \"\"\"\n Augmenter to draw fog in images.\n\n This is a wrapper around ``CloudLayer``. It executes a single layer per image with a configuration leading\n to fairly dense clouds with low-frequency patterns.\n\n This augmenter seems to be fairly robust w.r.t. the image size. Tested with ``96x128``, ``192x256``\n and ``960x1280``.\n\n Parameters\n ----------\n name : None or str, optional\n See :func:`imgaug.augmenters.meta.Augmenter.__init__`.\n\n deterministic : bool, optional\n See :func:`imgaug.augmenters.meta.Augmenter.__init__`.\n\n random_state : None or int or numpy.random.RandomState, optional\n See :func:`imgaug.augmenters.meta.Augmenter.__init__`.\n\n Examples\n --------\n >>> aug = iaa.Fog()\n\n Creates an augmenter that adds fog to images.\n\n \"\"\"\n if name is None:\n name = \"Unnamed%s\" % (ia.caller_name(),)\n\n return CloudLayer(\n intensity_mean=(220, 255), intensity_freq_exponent=(-2.0, -1.5), intensity_coarse_scale=2,\n alpha_min=(0.7, 0.9), alpha_multiplier=0.3, alpha_size_px_max=(2, 8), alpha_freq_exponent=(-4.0, -2.0),\n sparsity=0.9, density_multiplier=(0.4, 0.9),\n name=name, deterministic=deterministic, random_state=random_state\n )\n\n\n# TODO add perspective transform to each cloud layer to make them look more distant?\n# TODO alpha_mean and density overlap - remove one of them\nclass CloudLayer(meta.Augmenter):\n \"\"\"\n Augmenter to add a single layer of clouds to an image.\n\n Parameters\n ----------\n intensity_mean : number or tuple of number or list of number or imgaug.parameters.StochasticParameter\n Mean intensity of the clouds (i.e. mean color). Recommended to be around ``(190, 255)``.\n\n * If a number, then that value will be used for all images.\n * If a tuple ``(a, b)``, then a value from the continuous range ``[a, b]`` will be used.\n * If a list, then a random value will be sampled from that list per image.\n * If a StochasticParameter, then a value will be sampled per image from that parameter.\n\n intensity_freq_exponent : number or tuple of number or list of number\\\n or imgaug.parameters.StochasticParameter\n Exponent of the frequency noise used to add fine intensity to the mean intensity.\n Recommended to be somewhere around ``(-2.5, -1.5)``.\n See :func:`imgaug.parameters.FrequencyNoise.__init__` for details.\n\n intensity_coarse_scale : number or tuple of number or list of number\\\n or imgaug.parameters.StochasticParameter\n Standard deviation of the gaussian distribution used to add more localized intensity to the mean intensity.\n Sampled in low resolution space, i.e. affects final intensity on a coarse level. Recommended to be\n around ``(0, 10)``.\n\n * If a number, then that value will be used for all images.\n * If a tuple ``(a, b)``, then a value from the continuous range ``[a, b]`` will be used.\n * If a list, then a random value will be sampled from that list per image.\n * If a StochasticParameter, then a value will be sampled per image from that parameter.\n\n alpha_min : number or tuple of number or list of number or imgaug.parameters.StochasticParameter\n Minimum alpha when blending cloud noise with the image. High values will lead to clouds being \"everywhere\".\n Recommended to usually be at around ``0.0`` for clouds and ``>0`` for fog.\n\n * If a number, then that value will be used for all images.\n * If a tuple ``(a, b)``, then a value from the continuous range ``[a, b]`` will be used.\n * If a list, then a random value will be sampled from that list per image.\n * If a StochasticParameter, then a value will be sampled per image from that parameter.\n\n alpha_multiplier : number or tuple of number or list of number or imgaug.parameters.StochasticParameter\n Multiplier for the sampled alpha values. High values will lead to denser clouds wherever they are visible.\n Recommended to be at around ``(0.3, 1.0)``. Note that this parameter currently overlaps with\n `density_multiplier`, which is applied a bit later to the alpha mask.\n\n * If a number, then that value will be used for all images.\n * If a tuple ``(a, b)``, then a value from the continuous range ``[a, b]`` will be used.\n * If a list, then a random value will be sampled from that list per image.\n * If a StochasticParameter, then a value will be sampled per image from that parameter.\n\n alpha_size_px_max : number or tuple of number or list of number or imgaug.parameters.StochasticParameter\n Controls the image size at which the alpha mask is sampled. Lower values will lead to coarser alpha masks\n and hence larger clouds (and empty areas).\n See :func:`imgaug.parameters.FrequencyNoise.__init__` for details.\n\n alpha_freq_exponent : number or tuple of number or list of number or imgaug.parameters.StochasticParameter\n Exponent of the frequency noise used to sample the alpha mask. Similarly to `alpha_size_max_px`, lower values\n will lead to coarser alpha patterns. Recommended to be somewhere around ``(-4.0, -1.5)``.\n See :func:`imgaug.parameters.FrequencyNoise.__init__` for details.\n\n sparsity : number or tuple of number or list of number or imgaug.parameters.StochasticParameter\n Exponent applied late to the alpha mask. Lower values will lead to coarser cloud patterns, higher values\n to finer patterns. Recommended to be somewhere around ``1.0``. Do not deviate far from that values, otherwise\n the alpha mask might get weird patterns with sudden fall-offs to zero that look very unnatural.\n\n * If a number, then that value will be used for all images.\n * If a tuple ``(a, b)``, then a value from the continuous range ``[a, b]`` will be used.\n * If a list, then a random value will be sampled from that list per image.\n * If a StochasticParameter, then a value will be sampled per image from that parameter.\n\n density_multiplier : number or tuple of number or list of number or imgaug.parameters.StochasticParameter\n Late multiplier for the alpha mask, similar to `alpha_multiplier`. Set this higher to get \"denser\" clouds\n wherever they are visible. Recommended to be around ``(0.5, 1.5)``.\n\n * If a number, then that value will be used for all images.\n * If a tuple ``(a, b)``, then a value from the continuous range ``[a, b]`` will be used.\n * If a list, then a random value will be sampled from that list per image.\n * If a StochasticParameter, then a value will be sampled per image from that parameter.\n\n name : None or str, optional\n See :func:`imgaug.augmenters.meta.Augmenter.__init__`.\n\n deterministic : bool, optional\n See :func:`imgaug.augmenters.meta.Augmenter.__init__`.\n\n random_state : None or int or numpy.random.RandomState, optional\n See :func:`imgaug.augmenters.meta.Augmenter.__init__`.\n\n \"\"\"\n def __init__(self, intensity_mean, intensity_freq_exponent, intensity_coarse_scale,\n alpha_min, alpha_multiplier, alpha_size_px_max, alpha_freq_exponent,\n sparsity, density_multiplier,\n name=None, deterministic=False, random_state=None):\n super(CloudLayer, self).__init__(name=name, deterministic=deterministic, random_state=random_state)\n self.intensity_mean = iap.handle_continuous_param(intensity_mean, \"intensity_mean\")\n self.intensity_freq_exponent = intensity_freq_exponent\n self.intensity_coarse_scale = intensity_coarse_scale\n self.alpha_min = iap.handle_continuous_param(alpha_min, \"alpha_min\")\n self.alpha_multiplier = iap.handle_continuous_param(alpha_multiplier, \"alpha_multiplier\")\n self.alpha_size_px_max = alpha_size_px_max\n self.alpha_freq_exponent = alpha_freq_exponent\n self.sparsity = iap.handle_continuous_param(sparsity, \"sparsity\")\n self.density_multiplier = iap.handle_continuous_param(density_multiplier, \"density_multiplier\")\n\n def _augment_images(self, images, random_state, parents, hooks):\n rss = ia.derive_random_states(random_state, len(images))\n result = images\n for i, (image, rs) in enumerate(zip(images, rss)):\n result[i] = self.draw_on_image(image, rs)\n return result\n\n def _augment_heatmaps(self, heatmaps, random_state, parents, hooks):\n return heatmaps\n\n def _augment_keypoints(self, keypoints_on_images, random_state, parents, hooks):\n return keypoints_on_images\n\n def get_parameters(self):\n return [self.intensity_mean, self.alpha_min, self.alpha_multiplier, self.alpha_size_px_max,\n self.alpha_freq_exponent, self.intensity_freq_exponent, self.sparsity, self.density_min,\n self.density_multiplier,\n self.intensity_coarse_scale]\n\n def draw_on_image(self, image, random_state):\n alpha, intensity = self.generate_maps(image, random_state)\n alpha = alpha[..., np.newaxis]\n intensity = intensity[..., np.newaxis]\n return np.clip(\n (1 - alpha) * image.astype(np.float64) + alpha * intensity.astype(np.float64),\n 0,\n 255\n ).astype(np.uint8)\n\n def generate_maps(self, image, random_state):\n intensity_mean_sample = self.intensity_mean.draw_sample(random_state)\n alpha_min_sample = self.alpha_min.draw_sample(random_state)\n alpha_multiplier_sample = self.alpha_multiplier.draw_sample(random_state)\n alpha_size_px_max = self.alpha_size_px_max\n intensity_freq_exponent = self.intensity_freq_exponent\n alpha_freq_exponent = self.alpha_freq_exponent\n sparsity_sample = self.sparsity.draw_sample(random_state)\n density_multiplier_sample = self.density_multiplier.draw_sample(random_state)\n\n height, width = image.shape[0:2]\n rss_alpha, rss_intensity = ia.derive_random_states(random_state, 2)\n\n intensity_coarse = self._generate_intensity_map_coarse(\n height, width, intensity_mean_sample,\n iap.Normal(0, scale=self.intensity_coarse_scale),\n rss_intensity\n )\n intensity_fine = self._generate_intensity_map_fine(height, width, intensity_mean_sample,\n intensity_freq_exponent, rss_intensity)\n intensity = np.clip(intensity_coarse + intensity_fine, 0, 255)\n\n alpha = self._generate_alpha_mask(height, width, alpha_min_sample, alpha_multiplier_sample,\n alpha_freq_exponent, alpha_size_px_max,\n sparsity_sample, density_multiplier_sample, rss_alpha)\n\n return alpha, intensity\n\n @classmethod\n def _generate_intensity_map_coarse(cls, height, width, intensity_mean, intensity_local_offset, random_state):\n height_intensity, width_intensity = (8, 8) # TODO this might be too simplistic for some image sizes\n intensity = intensity_mean\\\n + intensity_local_offset.draw_samples((height_intensity, width_intensity), random_state)\n intensity = ia.imresize_single_image(np.clip(intensity, 0, 255).astype(np.uint8), (height, width),\n interpolation=\"cubic\")\n\n return intensity\n\n @classmethod\n def _generate_intensity_map_fine(cls, height, width, intensity_mean, exponent, random_state):\n intensity_details_generator = iap.FrequencyNoise(\n exponent=exponent,\n size_px_max=max(height, width),\n upscale_method=\"cubic\"\n )\n intensity_details = intensity_details_generator.draw_samples((height, width), random_state)\n return intensity_mean * ((2*intensity_details - 1.0)/5.0)\n\n @classmethod\n def _generate_alpha_mask(cls, height, width, alpha_min, alpha_multiplier, exponent, alpha_size_px_max, sparsity,\n density_multiplier, random_state):\n alpha_generator = iap.FrequencyNoise(\n exponent=exponent,\n size_px_max=alpha_size_px_max,\n upscale_method=\"cubic\"\n )\n alpha_local = alpha_generator.draw_samples((height, width), random_state)\n alpha = alpha_min + (alpha_multiplier * alpha_local)\n alpha = (alpha ** sparsity) * density_multiplier\n alpha = np.clip(alpha, 0.0, 1.0)\n\n return alpha\n\n\ndef Snowflakes(density=(0.005, 0.075), density_uniformity=(0.3, 0.9), flake_size=(0.2, 0.7),\n flake_size_uniformity=(0.4, 0.8), angle=(-30, 30), speed=(0.007, 0.03),\n name=None, deterministic=False, random_state=None):\n \"\"\"\n Augmenter to add falling snowflakes to images.\n\n This is a wrapper around ``SnowflakesLayer``. It executes 1 to 3 layers per image.\n\n Parameters\n ----------\n density : number or tuple of number or list of number or imgaug.parameters.StochasticParameter\n Density of the snowflake layer, as a probability of each pixel in low resolution space to be a snowflake.\n Valid value range is ``(0.0, 1.0)``. Recommended to be around ``(0.01, 0.075)``.\n\n * If a number, then that value will be used for all images.\n * If a tuple ``(a, b)``, then a value from the continuous range ``[a, b]`` will be used.\n * If a list, then a random value will be sampled from that list per image.\n * If a StochasticParameter, then a value will be sampled per image from that parameter.\n\n density_uniformity : number or tuple of number or list of number or imgaug.parameters.StochasticParameter\n Size uniformity of the snowflakes. Higher values denote more similarly sized snowflakes.\n Valid value range is ``(0.0, 1.0)``. Recommended to be around ``0.5``.\n\n * If a number, then that value will be used for all images.\n * If a tuple ``(a, b)``, then a value from the continuous range ``[a, b]`` will be used.\n * If a list, then a random value will be sampled from that list per image.\n * If a StochasticParameter, then a value will be sampled per image from that parameter.\n\n flake_size : number or tuple of number or list of number or imgaug.parameters.StochasticParameter\n Size of the snowflakes. This parameter controls the resolution at which snowflakes are sampled.\n Higher values mean that the resolution is closer to the input image's resolution and hence each sampled\n snowflake will be smaller (because of the smaller pixel size).\n\n Valid value range is ``[0.0, 1.0)``. Recommended values:\n\n * On ``96x128`` a value of ``(0.1, 0.4)`` worked well.\n * On ``192x256`` a value of ``(0.2, 0.7)`` worked well.\n * On ``960x1280`` a value of ``(0.7, 0.95)`` worked well.\n\n Allowed datatypes:\n\n * If a number, then that value will be used for all images.\n * If a tuple ``(a, b)``, then a value from the continuous range ``[a, b]`` will be used.\n * If a list, then a random value will be sampled from that list per image.\n * If a StochasticParameter, then a value will be sampled per image from that parameter.\n\n flake_size_uniformity : number or tuple of number or list of number or imgaug.parameters.StochasticParameter\n Controls the size uniformity of the snowflakes. Higher values mean that the snowflakes are more similarly\n sized. Valid value range is ``(0.0, 1.0)``. Recommended to be around ``0.5``.\n\n * If a number, then that value will be used for all images.\n * If a tuple ``(a, b)``, then a value from the continuous range ``[a, b]`` will be used.\n * If a list, then a random value will be sampled from that list per image.\n * If a StochasticParameter, then a value will be sampled per image from that parameter.\n\n angle : number or tuple of number or list of number or imgaug.parameters.StochasticParameter\n Angle in degrees of motion blur applied to the snowflakes, where ``0.0`` is motion blur that points straight\n upwards. Recommended to be around ``(-30, 30)``.\n See also :func:`imgaug.augmenters.blur.MotionBlur.__init__`.\n\n * If a number, then that value will be used for all images.\n * If a tuple ``(a, b)``, then a value from the continuous range ``[a, b]`` will be used.\n * If a list, then a random value will be sampled from that list per image.\n * If a StochasticParameter, then a value will be sampled per image from that parameter.\n\n speed : number or tuple of number or list of number or imgaug.parameters.StochasticParameter\n Perceived falling speed of the snowflakes. This parameter controls the motion blur's kernel size.\n It follows roughly the form ``kernel_size = image_size * speed``. Hence,\n Values around ``1.0`` denote that the motion blur should \"stretch\" each snowflake over the whole image.\n\n Valid value range is ``(0.0, 1.0)``. Recommended values:\n\n * On ``96x128`` a value of ``(0.01, 0.05)`` worked well.\n * On ``192x256`` a value of ``(0.007, 0.03)`` worked well.\n * On ``960x1280`` a value of ``(0.001, 0.03)`` worked well.\n\n\n Allowed datatypes:\n\n * If a number, then that value will be used for all images.\n * If a tuple ``(a, b)``, then a value from the continuous range ``[a, b]`` will be used.\n * If a list, then a random value will be sampled from that list per image.\n * If a StochasticParameter, then a value will be sampled per image from that parameter.\n\n name : None or str, optional\n See :func:`imgaug.augmenters.meta.Augmenter.__init__`.\n\n deterministic : bool, optional\n See :func:`imgaug.augmenters.meta.Augmenter.__init__`.\n\n random_state : None or int or numpy.random.RandomState, optional\n See :func:`imgaug.augmenters.meta.Augmenter.__init__`.\n\n Examples\n --------\n >>> aug = iaa.Snowflakes(flake_size=(0.1, 0.4), speed=(0.01, 0.05))\n\n Adds snowflakes to small images (around ``96x128``).\n\n >>> aug = iaa.Snowflakes(flake_size=(0.2, 0.7), speed=(0.007, 0.03))\n\n Adds snowflakes to medium-sized images (around ``192x256``).\n\n >>> aug = iaa.Snowflakes(flake_size=(0.7, 0.95), speed=(0.001, 0.03))\n\n Adds snowflakes to large images (around ``960x1280``).\n\n \"\"\"\n if name is None:\n name = \"Unnamed%s\" % (ia.caller_name(),)\n\n layer = SnowflakesLayer(\n density=density, density_uniformity=density_uniformity,\n flake_size=flake_size, flake_size_uniformity=flake_size_uniformity,\n angle=angle, speed=speed,\n blur_sigma_fraction=(0.0001, 0.001)\n )\n\n return meta.SomeOf(\n (1, 3), children=[layer.deepcopy() for _ in range(3)],\n random_order=False, name=name, deterministic=deterministic, random_state=random_state\n )\n\n\n# TODO snowflakes are all almost 100% white, add some grayish tones and maybe color to them\nclass SnowflakesLayer(meta.Augmenter):\n \"\"\"\n Augmenter to add a single layer of falling snowflakes to images.\n\n Parameters\n ----------\n density : number or tuple of number or list of number or imgaug.parameters.StochasticParameter\n Density of the snowflake layer, as a probability of each pixel in low resolution space to be a snowflake.\n Valid value range is ``(0.0, 1.0)``. Recommended to be around ``(0.01, 0.075)``.\n\n * If a number, then that value will be used for all images.\n * If a tuple ``(a, b)``, then a value from the continuous range ``[a, b]`` will be used.\n * If a list, then a random value will be sampled from that list per image.\n * If a StochasticParameter, then a value will be sampled per image from that parameter.\n\n density_uniformity : number or tuple of number or list of number or imgaug.parameters.StochasticParameter\n Size uniformity of the snowflakes. Higher values denote more similarly sized snowflakes.\n Valid value range is ``(0.0, 1.0)``. Recommended to be around ``0.5``.\n\n * If a number, then that value will be used for all images.\n * If a tuple ``(a, b)``, then a value from the continuous range ``[a, b]`` will be used.\n * If a list, then a random value will be sampled from that list per image.\n * If a StochasticParameter, then a value will be sampled per image from that parameter.\n\n flake_size : number or tuple of number or list of number or imgaug.parameters.StochasticParameter\n Size of the snowflakes. This parameter controls the resolution at which snowflakes are sampled.\n Higher values mean that the resolution is closer to the input image's resolution and hence each sampled\n snowflake will be smaller (because of the smaller pixel size).\n\n Valid value range is ``[0.0, 1.0)``. Recommended values:\n\n * On 96x128 a value of ``(0.1, 0.4)`` worked well.\n * On 192x256 a value of ``(0.2, 0.7)`` worked well.\n * On 960x1280 a value of ``(0.7, 0.95)`` worked well.\n\n Allowed datatypes:\n\n * If a number, then that value will be used for all images.\n * If a tuple ``(a, b)``, then a value from the continuous range ``[a, b]`` will be used.\n * If a list, then a random value will be sampled from that list per image.\n * If a StochasticParameter, then a value will be sampled per image from that parameter.\n\n flake_size_uniformity : number or tuple of number or list of number or imgaug.parameters.StochasticParameter\n Controls the size uniformity of the snowflakes. Higher values mean that the snowflakes are more similarly\n sized. Valid value range is ``(0.0, 1.0)``. Recommended to be around ``0.5``.\n\n * If a number, then that value will be used for all images.\n * If a tuple ``(a, b)``, then a value from the continuous range ``[a, b]`` will be used.\n * If a list, then a random value will be sampled from that list per image.\n * If a StochasticParameter, then a value will be sampled per image from that parameter.\n\n angle : number or tuple of number or list of number or imgaug.parameters.StochasticParameter\n Angle in degrees of motion blur applied to the snowflakes, where ``0.0`` is motion blur that points straight\n upwards. Recommended to be around ``(-30, 30)``.\n See also :func:`imgaug.augmenters.blur.MotionBlur.__init__`.\n\n * If a number, then that value will be used for all images.\n * If a tuple ``(a, b)``, then a value from the continuous range ``[a, b]`` will be used.\n * If a list, then a random value will be sampled from that list per image.\n * If a StochasticParameter, then a value will be sampled per image from that parameter.\n\n speed : number or tuple of number or list of number or imgaug.parameters.StochasticParameter\n Perceived falling speed of the snowflakes. This parameter controls the motion blur's kernel size.\n It follows roughly the form ``kernel_size = image_size * speed``. Hence,\n Values around ``1.0`` denote that the motion blur should \"stretch\" each snowflake over the whole image.\n\n Valid value range is ``(0.0, 1.0)``. Recommended values:\n\n * On 96x128 a value of ``(0.01, 0.05)`` worked well.\n * On 192x256 a value of ``(0.007, 0.03)`` worked well.\n * On 960x1280 a value of ``(0.001, 0.03)`` worked well.\n\n\n Allowed datatypes:\n\n * If a number, then that value will be used for all images.\n * If a tuple ``(a, b)``, then a value from the continuous range ``[a, b]`` will be used.\n * If a list, then a random value will be sampled from that list per image.\n * If a StochasticParameter, then a value will be sampled per image from that parameter.\n\n blur_sigma_fraction : number or tuple of number or list of number or imgaug.parameters.StochasticParameter\n Standard deviation (as a fraction of the image size) of gaussian blur applied to the snowflakes.\n Valid value range is ``(0.0, 1.0)``. Recommended to be around ``(0.0001, 0.001)``. May still require tinkering\n based on image size.\n\n * If a number, then that value will be used for all images.\n * If a tuple ``(a, b)``, then a value from the continuous range ``[a, b]`` will be used.\n * If a list, then a random value will be sampled from that list per image.\n * If a StochasticParameter, then a value will be sampled per image from that parameter.\n\n blur_sigma_limits : tuple of float, optional\n Controls allows min and max values of `blur_sigma_fraction` after(!) multiplication with the image size.\n First value is the minimum, second value is the maximum. Values outside of that range will be clipped to be\n within that range. This prevents extreme values for very small or large images.\n\n name : None or str, optional\n See :func:`imgaug.augmenters.meta.Augmenter.__init__`.\n\n deterministic : bool, optional\n See :func:`imgaug.augmenters.meta.Augmenter.__init__`.\n\n random_state : None or int or numpy.random.RandomState, optional\n See :func:`imgaug.augmenters.meta.Augmenter.__init__`.\n\n \"\"\"\n def __init__(self, density, density_uniformity, flake_size, flake_size_uniformity, angle, speed, blur_sigma_fraction,\n blur_sigma_limits=(0.5, 3.75), name=None, deterministic=False,\n random_state=None):\n super(SnowflakesLayer, self).__init__(name=name, deterministic=deterministic, random_state=random_state)\n self.density = density\n self.density_uniformity = iap.handle_continuous_param(density_uniformity, \"density_uniformity\",\n value_range=(0.0, 1.0))\n self.flake_size = iap.handle_continuous_param(flake_size, \"flake_size\", value_range=(0.0+1e-4, 1.0))\n self.flake_size_uniformity = iap.handle_continuous_param(flake_size_uniformity, \"flake_size_uniformity\",\n value_range=(0.0, 1.0))\n self.angle = iap.handle_continuous_param(angle, \"angle\")\n self.speed = iap.handle_continuous_param(speed, \"speed\", value_range=(0.0, 1.0))\n self.blur_sigma_fraction = iap.handle_continuous_param(blur_sigma_fraction, \"blur_sigma_fraction\",\n value_range=(0.0, 1.0))\n self.blur_sigma_limits = blur_sigma_limits # (min, max), same for all images\n self.gate_noise_size = (8, 8) # (height, width), same for all images\n\n def _augment_images(self, images, random_state, parents, hooks):\n rss = ia.derive_random_states(random_state, len(images))\n result = images\n for i, (image, rs) in enumerate(zip(images, rss)):\n result[i] = self.draw_on_image(image, rs)\n return result\n\n def _augment_heatmaps(self, heatmaps, random_state, parents, hooks):\n return heatmaps\n\n def _augment_keypoints(self, keypoints_on_images, random_state, parents, hooks):\n return keypoints_on_images\n\n def get_parameters(self):\n return [self.density, self.density_uniformity, self.flake_size, self.flake_size_uniformity, self.angle,\n self.speed, self.blur_sigma_fraction, self.blur_sigma_limits, self.gate_noise_size]\n\n def draw_on_image(self, image, random_state):\n flake_size_sample = self.flake_size.draw_sample(random_state)\n flake_size_uniformity_sample = self.flake_size_uniformity.draw_sample(random_state)\n angle_sample = self.angle.draw_sample(random_state)\n speed_sample = self.speed.draw_sample(random_state)\n blur_sigma_fraction_sample = self.blur_sigma_fraction.draw_sample(random_state)\n\n height, width = image.shape[0:2]\n downscale_factor = np.clip(1.0 - flake_size_sample, 0.001, 1.0)\n height_down, width_down = int(height*downscale_factor), int(width*downscale_factor),\n noise = self._generate_noise(\n height_down,\n width_down,\n self.density,\n ia.derive_random_state(random_state)\n )\n\n # gate the sampled noise via noise in range [0.0, 1.0]\n # this leads to less flakes in some areas of the image and more in other areas\n gate_noise = iap.Beta(1.0, 1.0 - self.density_uniformity)\n noise = self._gate(noise, gate_noise, self.gate_noise_size, ia.derive_random_state(random_state))\n noise = ia.imresize_single_image(noise, (height, width), interpolation=\"cubic\")\n\n # apply a bit of gaussian blur and then motion blur according to angle and speed\n sigma = max(height, width) * blur_sigma_fraction_sample\n sigma = np.clip(sigma, self.blur_sigma_limits[0], self.blur_sigma_limits[1])\n noise_small_blur = self._blur(noise, sigma, random_state)\n noise_small_blur = self._motion_blur(noise_small_blur, angle=angle_sample, speed=speed_sample,\n random_state=random_state)\n\n # use contrast adjustment of noise to make the flake size a bit less uniform\n # then readjust the noise values to make them more visible again\n gain = 1.0 + 2*(1 - flake_size_uniformity_sample)\n gain_adj = 1.0 + 5*(1 - flake_size_uniformity_sample)\n noise_small_blur = contrast.GammaContrast(gain).augment_image(noise_small_blur)\n noise_small_blur = noise_small_blur.astype(np.float32) * gain_adj\n noise_small_blur_rgb = np.tile(noise_small_blur[..., np.newaxis], (1, 1, 3))\n\n # blend:\n # sum for a bit of glowy, hardly visible flakes\n # max for the main flakes\n image_f32 = image.astype(np.float32)\n image_f32 = self._blend_by_sum(image_f32, (0.1 + 20*speed_sample) * noise_small_blur_rgb)\n image_f32 = self._blend_by_max(image_f32, (1.0 + 20*speed_sample) * noise_small_blur_rgb)\n return image_f32\n\n @classmethod\n def _generate_noise(cls, height, width, density, random_state):\n noise = arithmetic.Salt(p=density, random_state=random_state)\n return noise.augment_image(np.zeros((height, width), dtype=np.uint8))\n\n @classmethod\n def _gate(cls, noise, gate_noise, gate_size, random_state):\n # the beta distribution here has most of its weight around 1.0 and will only rarely sample values around 0.0\n # the average of the sampled values seems to be at around 0.6-0.75\n gate_noise = gate_noise.draw_samples(gate_size, random_state)\n gate_noise_up = ia.imresize_single_image(gate_noise, noise.shape[0:2], interpolation=\"cubic\")\n gate_noise_up = np.clip(gate_noise_up, 0.0, 1.0)\n return np.clip(noise.astype(np.float32) * gate_noise_up, 0, 255).astype(np.uint8)\n\n @classmethod\n def _blur(cls, noise, sigma, random_state):\n blurer = blur.GaussianBlur(sigma, random_state=random_state)\n return blurer.augment_image(noise)\n\n @classmethod\n def _motion_blur(cls, noise, angle, speed, random_state):\n size = max(noise.shape[0:2])\n k = int(speed * size)\n if k <= 1:\n return noise\n\n # we use max(k, 3) here because MotionBlur errors for anything less than 3\n blurer = blur.MotionBlur(k=max(k, 3), angle=angle, direction=1.0, random_state=random_state)\n return blurer.augment_image(noise)\n\n @classmethod\n def _blend_by_sum(cls, image_f32, noise_small_blur_rgb):\n image_f32 = image_f32 + noise_small_blur_rgb\n return np.clip(image_f32, 0, 255).astype(np.uint8)\n\n @classmethod\n def _blend_by_max(cls, image_f32, noise_small_blur_rgb):\n image_f32 = np.maximum(image_f32, noise_small_blur_rgb)\n return np.clip(image_f32, 0, 255).astype(np.uint8)\n" ]
[ [ "numpy.maximum", "numpy.tile", "numpy.clip", "numpy.zeros" ] ]
computational-imaging/DeepOpticsHDR
[ "1180749b028dd21f6b7140c0538fe332bd29bb46" ]
[ "src/optics_numpy.py" ]
[ "#Julie Chang and Chris Metzler 2020\nimport abc\n\n# import tensorflow as tf\nimport numpy as np\n# import matplotlib as mpl\n# mpl.use('TKAgg')\nimport matplotlib.pyplot as plt\nfrom PIL import Image\n\nfrom numpy.fft import ifftshift\nimport fractions\n# import layers.optics_no_transpose as optics\n#import optics_no_transpose as optics\nfrom skimage.transform import resize\nfrom skimage.measure import block_reduce\nfrom scipy.ndimage import gaussian_filter\n# from scipy.interpolate import RectBivariateSpline\nimport scipy.interpolate as interp\nfrom skimage.io import imsave\n\ndef phaseshifts_from_height_map(height_map, wave_lengths, refractive_idcs, dtype=np.complex64):\n '''Calculates the phase shifts created by a height map with certain\n refractive index for light with specific wave length.\n '''\n # refractive index difference\n delta_N = refractive_idcs.reshape([1,-1,1,1]) - 1.\n # wave number\n wave_nos = 2. * np.pi / wave_lengths\n wave_nos = wave_nos.reshape([1,-1,1,1])\n # phase delay indiced by height field\n phi = wave_nos * delta_N * height_map\n phase_shifts = np.exp(1j*phi)\n return phase_shifts\n\ndef get_vanilla_zernike_height_map(zernike_volume, zernike_coeffs, output_resolution=None):\n heightmap_zernike = np.sum(zernike_coeffs * zernike_volume, axis=0)\n if output_resolution is not None:\n heightmap_zernike = resize(heightmap_zernike, output_resolution)\n return heightmap_zernike\n\nclass PhasePlate():\n def __init__(self,\n wave_lengths,\n height_map,\n refractive_idcs,\n height_tolerance=None,\n lateral_tolerance=None,\n dtype=np.complex64):\n\n self.wave_lengths = wave_lengths\n self.height_map = height_map\n self.resolution = np.array(np.shape(height_map))\n self.refractive_idcs=refractive_idcs\n self.height_tolerance=height_tolerance\n self.lateral_tolerance=lateral_tolerance\n self.dtype = dtype\n\n def __call__(self, input_field):\n # Add manufacturing tolerances in the form of height map noise\n if self.height_tolerance is not None:\n self.height_map += np.random.uniform(low=-self.height_tolerance,\n high=self.height_tolerance,\n size=self.height_map.shape)\n print(\"Phase plate with manufacturing tolerance %0.2e\"%self.height_tolerance)\n\n self.phase_shifts = phaseshifts_from_height_map(self.height_map,\n self.wave_lengths,\n self.refractive_idcs,\n dtype=self.dtype)\n\n input_field = input_field.astype(self.dtype)\n return input_field * self.phase_shifts\n\ndef psf2otf(input_filter, output_size):\n \"\"\"Convert 4D tensorflow filter into its FFT.\n Input shape: [in_channels, out_channels, height, width]\n \"\"\"\n # pad out to output_size with zeros\n # circularly shift so center pixel is at 0,0\n _, _, fh, fw = np.shape(input_filter)\n \n if output_size[0] != fh:\n pad = (output_size[0] - fh)/2\n\n if (output_size[0] - fh) % 2 != 0:\n pad_top = pad_left = int(np.ceil(pad))\n pad_bottom = pad_right = int(np.floor(pad))\n else:\n pad_top = pad_left = int(pad) + 1\n pad_bottom = pad_right = int(pad) - 1\n\n padded = np.pad(input_filter, ((0,0), (0,0), (pad_top, pad_bottom),\n (pad_left, pad_right)), mode='constant')\n else:\n padded = input_filter\n\n padded = np.fft.ifftshift(padded, axes=(2,3))\n tmp = np.fft.fft2(padded)\n\n return tmp\n\ndef propagate_exact(input_field, kernels):\n\n _, _, M_orig, N_orig = np.shape(input_field)\n\n # zero padding.\n Mpad = M_orig//2\n Npad = N_orig//2\n\n M = M_orig + 2*Mpad\n N = N_orig + 2*Npad\n\n padded_input_field = np.pad(input_field,\n ((0,0), (0,0), (Mpad,Mpad), (Npad,Npad)),\n mode='constant')\n\n objFT = np.fft.fft2(padded_input_field)\n out_field = np.fft.ifft2( objFT * kernels)\n\n out_field = out_field[:,:,Npad:-Npad,Npad:-Npad]\n\n return out_field\n\ndef plano_convex_initializer(focal_length,\n wave_lengths,\n wave_resolution,\n discretization_step,\n refractive_idx):\n convex_radius = (refractive_idx - 1.) * focal_length\n N,M = wave_resolution\n [x, y] = np.mgrid[-N//2:N//2,\n -M//2:M//2].astype(np.float64)\n\n x = x * discretization_step\n y = y * discretization_step\n x = x.reshape([N,M])\n y = y.reshape([N,M])\n\n # This approximates the spherical surface with qaudratic-phase surfaces.\n height_map = -(x ** 2 + y ** 2) / 2. * (1. / convex_radius)\n # height_map = np.mod(height_map, get_one_phase_shift_thickness(wave_lengths[0], refractive_idcs[0]))\n # return tf.constant(np.sqrt(height_map), dtype=dtype)\n \n return height_map\n\ndef circular_aperture(input_field, r_cutoff=None):\n try:\n input_shape = np.shape(input_field)\n except:\n input_shape = input_field.shape\n\n [x, y] = np.mgrid[-input_shape[2] // 2: input_shape[2] // 2,\n -input_shape[3] // 2: input_shape[3] // 2].astype(np.float64)\n\n if r_cutoff is None:\n r_cutoff = np.amax(x)\n\n r = np.sqrt(x ** 2 + y ** 2)[None,None,:,:]\n aperture = (r<r_cutoff).astype(np.float32)\n return aperture * input_field\n\ndef get_psfs(optical_element,\n depth_values,\n wave_lengths,\n optical_feature_size,\n sensor_distance,\n propagation_kernel,\n psf_resolution=None,\n sampling_factor=None,\n use_circular_aperture=True,\n r_cutoff=None,\n amplitude_mask=None,\n use_planar_incidence=False,\n dtype=np.complex64,\n sigma=None,\n get_otfs=True,\n otf_resolution=None):\n\n wave_resolution = optical_element.resolution\n physical_size = wave_resolution[0] * optical_feature_size\n # what about magnification\n \n N, M = wave_resolution\n [x, y] = np.mgrid[-N//2:N//2,\n -M//2:M//2].astype(np.float64)\n\n x = x/N * physical_size\n y = y/M * physical_size\n\n squared_sum = x**2 + y**2\n squared_sum = squared_sum[None,None,:,:]\n\n wave_nos = 2. * np.pi / wave_lengths\n wave_nos = wave_nos.reshape([1,-1,1,1])\n\n input_fields = np.tile(squared_sum, [len(depth_values), len(wave_lengths), 1, 1])\n input_fields = np.sqrt(input_fields + np.array(depth_values).reshape([-1, 1, 1, 1])**2)\n input_fields = np.exp(1.j * wave_nos * input_fields)\n\n if use_circular_aperture:\n input_fields = circular_aperture(input_fields, r_cutoff)\n if amplitude_mask is not None:\n input_fields = input_fields * amplitude_mask\n\n psfs = []\n otfs = []\n # calculate PSF for each depth\n for depth_idx in range(len(depth_values)):\n # propagate through optical element\n input_field = input_fields[depth_idx:depth_idx+1,:,:,:]\n field = optical_element(input_field)\n\n # propagate field to sensor\n sensor_incident_field = propagate_exact(field, propagation_kernel)\n psf = np.square(np.abs(sensor_incident_field))\n psf_edit = []\n for wavelength in range(np.shape(psf)[1]):\n psf_image = np.squeeze(psf[0,wavelength,:,:])\n if psf_resolution is not None:\n psf_image = np.array(Image.fromarray(psf_image).resize((psf_resolution[0], psf_resolution[1]),\n resample=Image.BILINEAR))\n if sampling_factor is not None:\n psf_image = block_reduce(psf_image, block_size=(sampling_factor,sampling_factor), func=np.mean)\n if sigma is not None:\n psf_image = gaussian_filter(psf_image, sigma)\n psf_image /= np.sum(psf_image)\n psf_edit.append(np.expand_dims(np.expand_dims(psf_image, axis=0), axis=0))\n \n psf = np.concatenate(psf_edit, axis=1)\n psfs.append(psf)\n \n # calculate OTF as well\n if get_otfs:\n if otf_resolution is None:\n otf_resolution = np.shape(psf)[2:3]\n otf = psf2otf(psf, otf_resolution)\n otfs.append(otf)\n\n return psfs, otfs\n\n\ndef get_psfs_coherent(optical_element,\n depth_values,\n wave_lengths,\n optical_feature_size,\n sensor_distance,\n propagation_kernel,\n psf_resolution=None,\n use_circular_aperture=True,\n r_cutoff=None,\n use_planar_incidence=False,\n dtype=np.complex64,\n get_otfs=True,\n otf_resolution=None):\n\n wave_resolution = optical_element.resolution\n physical_size = wave_resolution[0] * optical_feature_size\n # what about magnification\n \n N, M = wave_resolution\n [x, y] = np.mgrid[-N//2:N//2,\n -M//2:M//2].astype(np.float64)\n\n x = x/N * physical_size\n y = y/M * physical_size\n\n squared_sum = x**2 + y**2\n squared_sum = squared_sum[None,None,:,:]\n\n wave_nos = 2. * np.pi / wave_lengths\n wave_nos = wave_nos.reshape([1,-1,1,1])\n\n input_fields = np.tile(squared_sum, [len(depth_values), len(wave_lengths), 1, 1])\n input_fields = np.sqrt(input_fields + np.array(depth_values).reshape([-1, 1, 1, 1])**2)\n input_fields = np.exp(1.j * wave_nos * input_fields)\n\n if use_circular_aperture:\n input_fields = circular_aperture(input_fields, r_cutoff)\n\n psfs = []\n otfs = []\n # calculate PSF for each depth\n for depth_idx in range(len(depth_values)):\n # propagate through optical element\n input_field = input_fields[depth_idx:depth_idx+1,:,:,:]\n field = optical_element(input_field)\n\n # propagate field to sensor\n sensor_incident_field = propagate_exact(field, propagation_kernel)\n psf = sensor_incident_field\n # psf_edit = []\n # for wavelength in range(np.shape(psf)[1]):\n # psf_image = np.squeeze(psf[0,wavelength,:,:])\n # if psf_resolution is not None:\n # psf_image = np.array(Image.fromarray(psf_image).resize((psf_resolution[0], psf_resolution[1])))\n # psf_image /= np.sum(np.abs(psf_image))\n # psf_edit.append(np.expand_dims(np.expand_dims(psf_image, axis=0), axis=0))\n \n # psf = np.concatenate(psf_edit, axis=1)\n psfs.append(psf)\n \n # calculate OTF as well\n if get_otfs:\n otf = np.fft.fft2(psf)\n otfs.append(otf)\n\n return psfs, otfs\n\ndef PhaseShiftThinLens_rgb(focal_length,wave_lengths,wave_resolution,optical_feature_size,refractive_idcs):\n #Output is 1 x wave_resolution x wave_resolution x 3\n height_map_thinlens_0 = plano_convex_initializer(focal_length,\n wave_lengths[0],\n wave_resolution,\n optical_feature_size,\n refractive_idcs[0])\n PhaseThinLens_0 = phaseshifts_from_height_map(height_map_thinlens_0, wave_lengths[0],\n refractive_idcs[0])\n height_map_thinlens_1 = plano_convex_initializer(focal_length,\n wave_lengths[1],\n wave_resolution,\n optical_feature_size,\n refractive_idcs[1])\n PhaseThinLens_1 = phaseshifts_from_height_map(height_map_thinlens_1, wave_lengths[1],\n refractive_idcs[1])\n height_map_thinlens_2 = plano_convex_initializer(focal_length,\n wave_lengths[2],\n wave_resolution,\n optical_feature_size,\n refractive_idcs[2])\n PhaseThinLens_2 = phaseshifts_from_height_map(height_map_thinlens_2, wave_lengths[2],\n refractive_idcs[2])\n PhaseThinLens = np.concatenate((PhaseThinLens_0, PhaseThinLens_1, PhaseThinLens_2), axis=1)\n PhaseThinLens = np.transpose(PhaseThinLens, [0, 2, 3, 1])\n return PhaseThinLens\n\ndef SaveHeightasTiff(height_map,filename,input_feature_size=4.29e-6,output_feature_size=1e-6,mask_size=5.6e-3,quantization_res=21.16e-9,Interp_Method='Nearest'):\n #height_map is given in meters and should be saved as a 32-bit integer where 0=0 nm and 1=21.16 nm (quantization_res)\n #Interpolate the height_map to a higher resolution, then resample at the output_feature_size\n #Nearest neighbor interpolation works by far the best\n assert (np.allclose(np.mod(mask_size, output_feature_size), 0.)), \"mask_size must be a common multiple of the output_feature_size\"\n height_map = height_map/1e-6#Perform interpolation in um\n x_input = np.arange(height_map.shape[0]) * input_feature_size\n y_input = np.arange(height_map.shape[1]) * input_feature_size\n if Interp_Method=='Nearest':\n f = interp.RegularGridInterpolator((x_input,y_input), height_map,method='nearest',bounds_error=False,fill_value=0.)\n elif Interp_Method=='Linear':\n f = interp.RegularGridInterpolator((x_input, y_input), height_map, method='linear', bounds_error=False, fill_value=0.)\n else:\n f = interp.RectBivariateSpline(x_input, y_input, height_map, bbox=[None, None, None, None], kx=3, ky=3, s=0)\n n_pixel_out = int(mask_size / output_feature_size)\n if Interp_Method=='Nearest' or Interp_Method=='Linear':\n grid_x_out, grid_y_out = np.mgrid[0:n_pixel_out, 0:n_pixel_out]*output_feature_size\n grid_x_out=grid_x_out.flatten()\n grid_y_out=grid_y_out.flatten()\n points_out = np.array((grid_x_out,grid_y_out)).T\n resampled_height_map = f(points_out)\n resampled_height_map=np.reshape(resampled_height_map,(n_pixel_out,n_pixel_out))\n else:\n x_output = np.arange(n_pixel_out) * output_feature_size\n y_output = np.arange(n_pixel_out) * output_feature_size\n resampled_height_map = f(x_output,y_output)\n resampled_height_map = np.clip(resampled_height_map,height_map.min(),height_map.max())\n\n # Quantize the height map to the nearest quantization_res. Save as a fp value in um and as a integer value, where 0 = 0 and 1 = quantization_res\n quantized_resampled_height_map_fp = (np.floor((resampled_height_map)/(quantization_res/1e-6))*(quantization_res/1e-6)).astype(np.float32)\n quantized_resampled_height_map_int = (np.floor((resampled_height_map) / (quantization_res / 1e-6))).astype(np.int32) # In um, quantized to nearest 21.16nm\n\n # import matplotlib.pyplot as plt\n # plt.subplot(121)\n # imgplot = plt.imshow((height_map))\n # plt.colorbar(imgplot)\n # plt.title('Height Map After Interpolation')\n # plt.subplot(122)\n # imgplot = plt.imshow((resampled_height_map))\n # plt.colorbar(imgplot)\n # plt.title('Height Map After Interpolation')\n # plt.show()\n #\n # import matplotlib.pyplot as plt\n # plt.subplot(121)\n # height_map_slice = height_map[1000,:]\n # imgplot = plt.hist(height_map_slice)\n # plt.title('Height Map Slice After Interpolation')\n # plt.subplot(122)\n # resampled_height_map_slice = resampled_height_map[2500,:]\n # imgplot = plt.hist(resampled_height_map_slice)\n # plt.title('Height Map Slice After Interpolation')\n # plt.show()\n\n filename_fp=filename + \"_fp32_wrt_um.tiff\"\n imsave(filename_fp, quantized_resampled_height_map_fp)\n filename_int=filename + \"_integer.tiff\"\n imsave(filename_int, quantized_resampled_height_map_int)\n return [resampled_height_map,quantized_resampled_height_map_fp,quantized_resampled_height_map_int]\n" ]
[ [ "numpy.sum", "numpy.amax", "numpy.transpose", "numpy.reshape", "numpy.abs", "numpy.expand_dims", "numpy.random.uniform", "numpy.ceil", "numpy.fft.fft2", "numpy.mod", "numpy.arange", "scipy.interpolate.RegularGridInterpolator", "numpy.pad", "numpy.array", "numpy.squeeze", "numpy.fft.ifft2", "numpy.floor", "scipy.interpolate.RectBivariateSpline", "numpy.exp", "numpy.fft.ifftshift", "scipy.ndimage.gaussian_filter", "numpy.shape", "numpy.sqrt", "numpy.concatenate" ] ]
qrsforever/workspace
[ "53c7ce7ca7da62c9fbb3d991ae9e4e34d07ece5f" ]
[ "ML/learn/intro_convolution.py" ]
[ "#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\n# @file intro_convolution.py\n# @brief\n# @author QRS\n# @blog qrsforever.github.io\n# @version 1.0\n# @date 2019-06-03 20:52:26\n\n################################ jupyter-vim #######################################\n# https://github.com/qrsforever/vim/blob/master/bundle/.configs/jupyter-vim_conf.vim\n# %pylab --no-import-all # noqa\n#####################################################################################\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\n#####################################################################################\n# <codecell>\n#####################################################################################\n\na = np.array([200, 200])\nb = np.array([a, a])\n\nkernel_horizonal = np.array([np.array([2, 2]), np.array([-2, 2])])\n\nnp.multiply(b, kernel_horizonal)\n" ]
[ [ "numpy.array", "numpy.multiply" ] ]
wmkai/quantization
[ "351d184527e9867e0394878cf91b64ffd5c6b109" ]
[ "micronet/compression/quantization/wbwtab/bn_fuse/bn_fuse.py" ]
[ "import copy\nimport sys\nsys.path.append(\"..\")\nsys.path.append(\"../../../..\")\nimport os\nimport argparse\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom models import nin_gc, nin\n\nimport quantize\n\n\n# ******************** 是否保存模型完整参数 ********************\n#torch.set_printoptions(precision=8, edgeitems=sys.maxsize, linewidth=200, sci_mode=False)\n\ndef bn_fuse(conv, bn):\n # 可以进行“针对特征(A)二值的BN融合”的BN层位置\n global bn_counter, bin_bn_fuse_num\n bn_counter = bn_counter + 1\n # ******************** bn参数 *********************\n mean = bn.running_mean\n std = torch.sqrt(bn.running_var + bn.eps)\n gamma = bn.weight\n beta = bn.bias\n # ******************* conv参数 ********************\n w = conv.weight\n w_fused = w.clone()\n if conv.bias is not None:\n b = conv.bias\n else:\n b = mean.new_zeros(mean.shape)\n b_fused = b.clone()\n # ******************* 针对特征(A)二值的bn融合 *******************\n if(bn_counter >= 1 and bn_counter <= bin_bn_fuse_num):\n mask_positive = gamma.data.gt(0)\n mask_negetive = gamma.data.lt(0)\n\n w_fused[mask_positive] = w[mask_positive]\n b_fused[mask_positive] = b[mask_positive] - mean[mask_positive] + \\\n beta[mask_positive] * (std[mask_positive] / gamma[mask_positive])\n\n w_fused[mask_negetive] = w[mask_negetive] * -1\n b_fused[mask_negetive] = mean[mask_negetive] - b[mask_negetive] - \\\n beta[mask_negetive] * (std[mask_negetive] / gamma[mask_negetive])\n # ******************* 普通bn融合 *******************\n else:\n w_fused = w * (gamma / std).reshape([conv.out_channels, 1, 1, 1])\n b_fused = beta + (b - mean) * (gamma / std)\n if(bn_counter >= 2 and bn_counter <= bin_bn_fuse_num):\n bn_fused_conv = quantize.QuantConv2d(conv.in_channels,\n conv.out_channels,\n conv.kernel_size,\n stride=conv.stride,\n padding=conv.padding,\n dilation=conv.dilation,\n groups=conv.groups,\n bias=True,\n padding_mode=conv.padding_mode,\n W=args.W,\n quant_inference=True)\n else:\n bn_fused_conv = nn.Conv2d(conv.in_channels,\n conv.out_channels,\n conv.kernel_size,\n stride=conv.stride,\n padding=conv.padding,\n dilation=conv.dilation,\n groups=conv.groups,\n bias=True,\n padding_mode=conv.padding_mode)\n bn_fused_conv.weight.data = w_fused\n bn_fused_conv.bias.data = b_fused\n return bn_fused_conv\n\n\ndef bn_fuse_module(module):\n for name, child in module.named_children():\n if isinstance(child, nn.Conv2d):\n conv_name_temp = name\n conv_child_temp = child\n elif isinstance(child, nn.BatchNorm2d):\n bn_fused_conv = bn_fuse(conv_child_temp, child)\n module._modules[conv_name_temp] = bn_fused_conv\n module._modules[name] = nn.Identity()\n else:\n bn_fuse_module(child)\n\n\ndef model_bn_fuse(model, inplace=False):\n if not inplace:\n model = copy.deepcopy(model)\n bn_fuse_module(model)\n return model\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--gpu_id', action='store', default='',\n help='gpu_id')\n parser.add_argument('--prune_quant', action='store_true',\n help='this is prune_quant model')\n parser.add_argument('--model_type', type=int, default=1,\n help='model type:0-nin,1-nin_gc')\n parser.add_argument('--W', type=int, default=2,\n help='Wb:2, Wt:3, Wfp:32')\n parser.add_argument('--A', type=int, default=2,\n help='Ab:2, Afp:32')\n\n args = parser.parse_args()\n print('==> Options:', args)\n\n if args.gpu_id:\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = args.gpu_id\n\n if args.prune_quant:\n print('******Prune Quant model******')\n if args.model_type == 0:\n checkpoint = torch.load('../models_save/nin.pth')\n quant_model_train = nin.Net(cfg=checkpoint['cfg'])\n else:\n checkpoint = torch.load('../models_save/nin_gc.pth')\n quant_model_train = nin_gc.Net(cfg=checkpoint['cfg'])\n else:\n if args.model_type == 0:\n checkpoint = torch.load('../models_save/nin.pth')\n quant_model_train = nin.Net()\n else:\n checkpoint = torch.load('../models_save/nin_gc.pth')\n quant_model_train = nin_gc.Net()\n quant_bn_fused_model_inference = copy.deepcopy(quant_model_train)\n quantize.prepare(quant_model_train, inplace=True, A=args.A, W=args.W)\n quantize.prepare(quant_bn_fused_model_inference, inplace=True,\n A=args.A, W=args.W, quant_inference=True)\n quant_model_train.load_state_dict(checkpoint['state_dict'])\n quant_bn_fused_model_inference.load_state_dict(checkpoint['state_dict'])\n\n # ********************** quant_model_train ************************\n torch.save(quant_model_train, 'models_save/quant_model_train.pth')\n torch.save(quant_model_train.state_dict(), 'models_save/quant_model_train_para.pth')\n model_array = np.array(quant_model_train)\n model_para_array = np.array(quant_model_train.state_dict())\n np.savetxt('models_save/quant_model_train.txt', [model_array], fmt='%s', delimiter=',')\n np.savetxt('models_save/quant_model_train_para.txt', [model_para_array], fmt='%s', delimiter=',')\n\n # ********************* quant_bn_fused_model_inference **********************\n bn_counter = 0\n bin_bn_fuse_num = 0\n # 统计可以进行“针对特征(A)二值的BN融合”的BN层位置\n for m in quant_bn_fused_model_inference.modules():\n if isinstance(m, quantize.ActivationQuantizer):\n bin_bn_fuse_num += 1\n model_bn_fuse(quant_bn_fused_model_inference, inplace=True) # bn融合\n print('***quant_model_train***\\n', quant_model_train)\n print('\\n***quant_bn_fused_model_inference***\\n', quant_bn_fused_model_inference)\n torch.save(quant_bn_fused_model_inference, 'models_save/quant_bn_fused_model_inference.pth')\n torch.save(quant_bn_fused_model_inference.state_dict(), 'models_save/quant_bn_fused_model_inference_para.pth')\n model_array = np.array(quant_bn_fused_model_inference)\n model_para_array = np.array(quant_bn_fused_model_inference.state_dict())\n np.savetxt('models_save/quant_bn_fused_model_inference.txt', [model_array], fmt='%s', delimiter=',')\n np.savetxt('models_save/quant_bn_fused_model_inference_para.txt', [model_para_array], fmt='%s', delimiter=',')\n print(\"************* bn_fuse 完成 **************\")\n print(\"************* bn_fused_model 已保存 **************\")\n" ]
[ [ "torch.load", "numpy.savetxt", "torch.save", "torch.sqrt", "torch.nn.Conv2d", "torch.nn.Identity", "numpy.array" ] ]
havi121/poliastro-AU
[ "98889b36892622b43cb284f64e6ecf72e3f01c6f" ]
[ "src/poliastro/plotting/tisserand.py" ]
[ "\"\"\" Generates Tisserand plots \"\"\"\nfrom enum import Enum\n\nimport numpy as np\nfrom astropy import units as u\nfrom matplotlib import pyplot as plt\n\nfrom poliastro.plotting._base import BODY_COLORS\nfrom poliastro.twobody.mean_elements import get_mean_elements\nfrom poliastro.util import norm\n\n\nclass TisserandKind(Enum):\n \"\"\"All possible Tisserand kinds\"\"\"\n\n APSIS = \"apsis\"\n ENERGY = \"energy\"\n PERIOD = \"period\"\n\n\nclass TisserandPlotter:\n \"\"\"Generates Tisserand figures\"\"\"\n\n def __init__(self, kind=TisserandKind.APSIS, axes=None):\n \"\"\"Object initializer\n\n Parameters\n ----------\n kind: TisserandKind\n Nature for the Tisserand\n axes: ~matplotlib.pyplot.axes\n Axes for the figure\n\n \"\"\"\n\n # Asign Tisserand kind\n self.kind = kind\n\n # Check if axis available\n if not axes:\n _, self.ax = plt.subplots(1, 1)\n else:\n self.ax = axes\n\n # Force axes scale regarding Tisserand kind\n self.ax.set_xscale(\"log\")\n if self.kind == TisserandKind.APSIS:\n self.ax.set_yscale(\"log\")\n\n def _solve_tisserand(\n self, body, vinf_span, num_contours, alpha_lim=(0, np.pi), N=100\n ):\n \"\"\"Solves all possible Tisserand lines with a meshgrid workflow\n\n Parameters\n ----------\n body: ~poliastro.bodies.Body\n Body to be plotted Tisserand\n vinf_array: ~astropy.units.Quantity\n Desired Vinf for the flyby\n num_contours: int\n Number of contour lines for flyby speed\n N: int\n Number of points for flyby angle\n\n Note\n ----\n The algorithm for generating Tisserand plots is the one depicted in\n \"Preliminary Trajectory Design of a Mission to Enceladus\" by David\n Falcato Fialho Palma, section 3.6\n\n \"\"\"\n\n # Generate mean orbital elements Earth\n body_rv = get_mean_elements(body).to_vectors()\n R_body, V_body = norm(body_rv.r), norm(body_rv.v)\n\n # Generate non-dimensional velocity and alpha span\n vinf_array = np.linspace(vinf_span[0], vinf_span[-1], num_contours)\n alpha_array = np.linspace(alpha_lim[0], alpha_lim[-1], N)\n vinf_array /= V_body\n\n # Construct the mesh for any configuration\n V_INF, ALPHA = np.meshgrid(vinf_array, alpha_array)\n\n # Solving for non-dimensional a_sc and ecc_sc\n A_SC = 1 / np.abs(1 - V_INF ** 2 - 2 * V_INF * np.cos(ALPHA))\n ECC_SC = np.sqrt(1 - 1 / A_SC * ((3 - 1 / A_SC - V_INF ** 2) / (2)) ** 2)\n\n # Compute main Tisserand variables\n RR_P = A_SC * R_body * (1 - ECC_SC)\n RR_A = A_SC * R_body * (1 + ECC_SC)\n TT = 2 * np.pi * np.sqrt((A_SC * R_body) ** 3 / body.parent.k)\n EE = -body.parent.k / (2 * A_SC * R_body)\n\n # Build color lines to internal canvas\n return RR_P, RR_A, EE, TT\n\n def _build_lines(self, RR_P, RR_A, EE, TT, color):\n \"\"\"Collect lines and append them to internal data\n\n Parameters\n ----------\n data: list\n Array containing [RR_P, RR_A, EE, TT, color]\n\n Returns\n -------\n lines: list\n Plotting lines for the Tisserand\n \"\"\"\n\n # Plot desired kind lines\n if self.kind == TisserandKind.APSIS:\n # Generate apsis lines\n lines = self.ax.plot(RR_A.to(u.AU), RR_P.to(u.AU), color=color)\n elif self.kind == TisserandKind.ENERGY:\n # Generate energy lines\n lines = self.ax.plot(\n RR_P.to(u.AU), EE.to(u.au ** 2 / u.s ** 2), color=color\n )\n elif self.kind == TisserandKind.PERIOD:\n # Generate period lines\n lines = self.ax.plot(RR_P.to(u.AU), TT.to(u.year), color=color)\n\n return lines\n\n def plot_line(self, body, vinf, alpha_lim=(0, np.pi), color=None):\n \"\"\"Plots body Tisserand line within flyby angle\n\n Parameters\n ----------\n body: ~poliastro.bodies.Body\n Body to be plotted Tisserand\n vinf: ~astropy.units.Quantity\n Vinf velocity line\n alpha_lim: tuple\n Minimum and maximum flyby angles\n color: str\n String representing for the color lines\n\n Returns\n -------\n self.ax: ~matplotlib.axes.Axes\n Apsis tisserand is the default plotting option\n\n \"\"\"\n\n # HACK: to reuse Tisserand solver, we transform input Vinf into a tuple\n vinf_span = (vinf, vinf)\n\n # Solve Tisserand parameters\n RR_P, RR_A, EE, TT = self._solve_tisserand(\n body, vinf_span, num_contours=2, alpha_lim=alpha_lim\n )\n\n # Check if color defined\n if not color:\n color = BODY_COLORS[body.name]\n\n # Build canvas lines from Tisserand parameters\n self._build_lines(RR_P, RR_A, EE, TT, color)\n\n return self.ax\n\n def plot(self, body, vinf_span, num_contours=10, color=None):\n \"\"\"Plots body Tisserand for given amount of solutions within Vinf span\n\n Parameters\n ----------\n body: ~poliastro.bodies.Body\n Body to be plotted Tisserand\n vinf_span: tuple\n Minimum and maximum Vinf velocities\n num_contours: int\n Number of points to iterate over previously defined velocities\n color: str\n String representing for the color lines\n\n Returns\n -------\n self.ax: ~matplotlib.axes.Axes\n Apsis tisserand is the default plotting option\n\n \"\"\"\n\n # Solve Tisserand parameters\n RR_P, RR_A, EE, TT = self._solve_tisserand(body, vinf_span, num_contours)\n\n # Check if color defined\n if not color:\n color = BODY_COLORS[body.name]\n\n # Build canvas lines from Tisserand parameters\n self._build_lines(RR_P, RR_A, EE, TT, color)\n\n return self.ax\n" ]
[ [ "numpy.cos", "matplotlib.pyplot.subplots", "numpy.sqrt", "numpy.meshgrid", "numpy.linspace" ] ]
romanstrazanec/ChaosEquations
[ "cff505832b3ef8db2e3dc05e299a30f52b8e6473" ]
[ "python/readme_prog_change_sub.py" ]
[ "import matplotlib.pyplot as plt\n\nn = 5\nT = [-1, -.5, 0., .5, 1]\nx = [1] * len(T)\ny = [1] * len(T)\n\nplt.subplot(122)\nfor i in range(1, n+1):\n for j in range(len(T)):\n x[j], y[j] = (x[j] + y[j]*T[j], x[j] - y[j]*T[j])\n for j in range(len(T)-1):\n plt.arrow(x[j], y[j], x[j+1]-x[j], y[j+1]-y[j], head_width=.35, head_length=.35, alpha=.3, fc='k')\n plt.plot(x, y, alpha=.7, label=f\"{i} i\")\n\nplt.subplot(121)\nfor t in T:\n x, y = (1, 1)\n xs, ys = [x], [y]\n for i in range(1, n+1):\n x, y = (x + y*t, x - y*t)\n xs.append(x)\n ys.append(y)\n plt.plot(xs, ys, '.-', alpha=.5, label=f\"T = {t}\")\n\n\nplt.legend()\nplt.subplot(122)\nplt.legend()\nplt.savefig(\"../images/plot4sub.png\")\nplt.show()\n" ]
[ [ "matplotlib.pyplot.legend", "matplotlib.pyplot.savefig", "matplotlib.pyplot.subplot", "matplotlib.pyplot.show", "matplotlib.pyplot.plot", "matplotlib.pyplot.arrow" ] ]
amorehead/Equivariant-GNNs
[ "4e81136242a4c8905b0e5fc39be5f704a42cc5e1" ]
[ "project/utils/modules.py" ]
[ "from typing import Dict\n\nimport dgl\nimport dgl.function as fn # for graphs\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom dgl.nn.pytorch.glob import AvgPooling, MaxPooling\nfrom dgl.nn.pytorch.softmax import edge_softmax\nfrom einops import rearrange\nfrom packaging import version\nfrom torch import Tensor, einsum, broadcast_tensors, relu, sigmoid\nfrom torch.nn import GELU\nfrom torch.nn.functional import normalize\nfrom torch.nn.parameter import Parameter\n\nfrom project.utils.fibers import Fiber, fiber2head\nfrom project.utils.from_se3cnn.utils_steerable import _basis_transformation_Q_J, get_spherical_from_cartesian_torch, \\\n precompute_sh\nfrom project.utils.utils import fourier_encode_dist, batched_index_select\nfrom project.utils.utils_profiling import profile # load before other local modules\n\n\n# -------------------------------------------------------------------------------------------------------------------------------------\n# Following code derived from SE(3)-Transformer (https://github.com/FabianFuchsML/se3-transformer-public/):\n# -------------------------------------------------------------------------------------------------------------------------------------\n\n@profile\ndef get_basis(Y, max_degree):\n \"\"\"Precompute the SE(3)-equivariant weight basis.\n This is called by get_basis_and_r().\n Args:\n Y: spherical harmonic dict, returned by utils_steerable.precompute_sh()\n max_degree: non-negative int for degree of highest feature type\n Returns:\n dict of equivariant bases, keys are in form '<d_in><d_out>'\n \"\"\"\n device = Y[0].device\n # No need to backprop through the basis construction\n with torch.no_grad():\n basis = {}\n for d_in in range(max_degree + 1):\n for d_out in range(max_degree + 1):\n K_Js = []\n for J in range(abs(d_in - d_out), d_in + d_out + 1):\n # Get spherical harmonic projection matrices\n Q_J = _basis_transformation_Q_J(J, d_in, d_out)\n Q_J = Q_J.float().T.to(device)\n\n # Create kernel from spherical harmonics\n K_J = torch.matmul(Y[J], Q_J)\n K_Js.append(K_J)\n\n # Reshape so can take linear combinations with a dot product\n size = (-1, 1, 2 * d_out + 1, 1, 2 * d_in + 1, 2 * min(d_in, d_out) + 1)\n basis[f'{d_in},{d_out}'] = torch.stack(K_Js, -1).view(*size)\n return basis\n\n\ndef get_basis_and_r(G, max_degree):\n \"\"\"Return equivariant weight basis (basis) and internodal distances (r).\n Call this function *once* at the start of each forward pass of the model.\n It computes the equivariant weight basis, W_J^lk(x), and internodal\n distances, needed to compute varphi_J^lk(x), of eqn 8 of\n https://arxiv.org/pdf/2006.10503.pdf. The return values of this function\n can be shared as input across all SE(3)-Transformer layers in a model.\n Args:\n G: DGL graph instance of type dgl.DGLGraph()\n max_degree: non-negative int for degree of highest feature-type\n Returns:\n dict of equivariant bases, keys are in form '<d_in><d_out>'\n vector of relative distances, ordered according to edge ordering of G\n \"\"\"\n # Relative positional encodings (vector)\n r_ij = get_spherical_from_cartesian_torch(G.edata['d'])\n # Spherical harmonic basis\n Y = precompute_sh(r_ij, 2 * max_degree)\n # Equivariant basis (dict['d_in><d_out>'])\n basis = get_basis(Y, max_degree)\n # Relative distances (scalar)\n r = torch.sqrt(torch.sum(G.edata['d'] ** 2, -1, keepdim=True))\n return basis, r\n\n\n### SE(3) equivariant operations on graphs in DGL\n\nclass GConvSE3(nn.Module):\n \"\"\"A tensor field network layer as a DGL module.\n\n GConvSE3 stands for a Graph Convolution SE(3)-equivariant layer. It is the\n equivalent of a linear layer in an MLP, a conv layer in a CNN, or a graph\n conv layer in a GCN.\n At each node, the activations are split into different \"feature types\",\n indexed by the SE(3) representation type: non-negative integers 0, 1, 2, ..\n \"\"\"\n\n def __init__(self, f_in, f_out, self_interaction: bool = False, edge_dim: int = 0):\n \"\"\"SE(3)-equivariant Graph Conv Layer\n Args:\n f_in: list of tuples [(multiplicities, type),...]\n f_out: list of tuples [(multiplicities, type),...]\n self_interaction: include self-interaction in convolution\n edge_dim: number of dimensions for edge embedding\n \"\"\"\n super().__init__()\n self.f_in = f_in\n self.f_out = f_out\n self.edge_dim = edge_dim\n self.self_interaction = self_interaction\n\n # Neighbor -> center weights\n self.kernel_unary = nn.ModuleDict()\n for (mi, di) in self.f_in.structure:\n for (mo, do) in self.f_out.structure:\n self.kernel_unary[f'({di},{do})'] = PairwiseConv(di, mi, do, mo, edge_dim=edge_dim)\n\n # Center -> center weights\n self.kernel_self = nn.ParameterDict()\n if self_interaction:\n for m_in, d_in in self.f_in.structure:\n if d_in in self.f_out.degrees:\n m_out = self.f_out.structure_dict[d_in]\n W = nn.Parameter(torch.randn(1, m_out, m_in) / np.sqrt(m_in))\n self.kernel_self[f'{d_in}'] = W\n\n def __repr__(self):\n return f'GConvSE3(structure={self.f_out}, self_interaction={self.self_interaction})'\n\n def udf_u_mul_e(self, d_out):\n \"\"\"Compute the convolution for a single output feature type.\n This function is set up as a User Defined Function in DGL.\n Args:\n d_out: output feature type\n Returns:\n edge -> node function handle\n \"\"\"\n\n def fnc(edges):\n # Neighbor -> center messages\n msg = 0\n for m_in, d_in in self.f_in.structure:\n src = edges.src[f'{d_in}'].view(-1, m_in * (2 * d_in + 1), 1)\n edge = edges.data[f'({d_in},{d_out})']\n msg = msg + torch.matmul(edge, src)\n msg = msg.view(msg.shape[0], -1, 2 * d_out + 1)\n\n # Center -> center messages\n if self.self_interaction:\n if f'{d_out}' in self.kernel_self.keys():\n dst = edges.dst[f'{d_out}']\n W = self.kernel_self[f'{d_out}']\n msg = msg + torch.matmul(W, dst)\n\n return {'msg': msg.view(msg.shape[0], -1, 2 * d_out + 1)}\n\n return fnc\n\n @profile\n def forward(self, h, G=None, r=None, basis=None, **kwargs):\n \"\"\"Forward pass of the linear layer\n Args:\n G: minibatch of (homo)graphs\n h: dict of features\n r: inter-atomic distances\n basis: pre-computed Q * Y\n Returns:\n tensor with new features [B, n_points, n_features_out]\n \"\"\"\n with G.local_scope():\n # Add node features to local graph scope\n for k, v in h.items():\n G.ndata[k] = v\n\n # Add edge features\n if 'w' in G.edata.keys():\n w = G.edata['w']\n feat = torch.cat([w, r], -1)\n else:\n feat = torch.cat([r, ], -1)\n\n for (mi, di) in self.f_in.structure:\n for (mo, do) in self.f_out.structure:\n etype = f'({di},{do})'\n G.edata[etype] = self.kernel_unary[etype](feat, basis)\n\n # Perform message-passing for each output feature type\n for d in self.f_out.degrees:\n G.update_all(self.udf_u_mul_e(d), fn.mean('msg', f'out{d}'))\n\n return {f'{d}': G.ndata[f'out{d}'] for d in self.f_out.degrees}\n\n\nclass RadialFunc(nn.Module):\n \"\"\"NN parameterized radial profile function.\"\"\"\n\n def __init__(self, num_freq, in_dim, out_dim, edge_dim: int = 0):\n \"\"\"NN parameterized radial profile function.\n Args:\n num_freq: number of output frequencies\n in_dim: multiplicity of input (num input channels)\n out_dim: multiplicity of output (num output channels)\n edge_dim: number of dimensions for edge embedding\n \"\"\"\n super().__init__()\n self.num_freq = num_freq\n self.in_dim = in_dim\n self.mid_dim = 32\n self.out_dim = out_dim\n self.edge_dim = edge_dim\n\n self.net = nn.Sequential(nn.Linear(self.edge_dim + 1, self.mid_dim),\n BN(self.mid_dim),\n nn.ReLU(),\n nn.Linear(self.mid_dim, self.mid_dim),\n BN(self.mid_dim),\n nn.ReLU(),\n nn.Linear(self.mid_dim, self.num_freq * in_dim * out_dim))\n\n nn.init.kaiming_uniform_(self.net[0].weight)\n nn.init.kaiming_uniform_(self.net[3].weight)\n nn.init.kaiming_uniform_(self.net[6].weight)\n\n def __repr__(self):\n return f\"RadialFunc(edge_dim={self.edge_dim}, in_dim={self.in_dim}, out_dim={self.out_dim})\"\n\n def forward(self, x):\n y = self.net(x)\n return y.view(-1, self.out_dim, 1, self.in_dim, 1, self.num_freq)\n\n\nclass PairwiseConv(nn.Module):\n \"\"\"SE(3)-equivariant convolution between two single-type features\"\"\"\n\n def __init__(self, degree_in: int, nc_in: int, degree_out: int,\n nc_out: int, edge_dim: int = 0):\n \"\"\"SE(3)-equivariant convolution between a pair of feature types.\n This layer performs a convolution from nc_in features of type degree_in\n to nc_out features of type degree_out.\n Args:\n degree_in: degree of input fiber\n nc_in: number of channels on input\n degree_out: degree of out order\n nc_out: number of channels on output\n edge_dim: number of dimensions for edge embedding\n \"\"\"\n super().__init__()\n # Log settings\n self.degree_in = degree_in\n self.degree_out = degree_out\n self.nc_in = nc_in\n self.nc_out = nc_out\n\n # Functions of the degree\n self.num_freq = 2 * min(degree_in, degree_out) + 1\n self.d_out = 2 * degree_out + 1\n self.edge_dim = edge_dim\n\n # Radial profile function\n self.rp = RadialFunc(self.num_freq, nc_in, nc_out, self.edge_dim)\n\n @profile\n def forward(self, feat, basis):\n # Get radial weights\n R = self.rp(feat)\n kernel = torch.sum(R * basis[f'{self.degree_in},{self.degree_out}'], -1)\n return kernel.view(kernel.shape[0], self.d_out * self.nc_out, -1)\n\n\nclass G1x1SE3(nn.Module):\n \"\"\"Graph Linear SE(3)-equivariant layer, equivalent to a 1x1 convolution.\n\n This is equivalent to a self-interaction layer in TensorField Networks.\n \"\"\"\n\n def __init__(self, f_in, f_out, learnable=True):\n \"\"\"SE(3)-equivariant 1x1 convolution.\n Args:\n f_in: input Fiber() of feature multiplicities and types\n f_out: output Fiber() of feature multiplicities and types\n \"\"\"\n super().__init__()\n self.f_in = f_in\n self.f_out = f_out\n\n # Linear mappings: 1 per output feature type\n self.transform = nn.ParameterDict()\n for m_out, d_out in self.f_out.structure:\n m_in = self.f_in.structure_dict[d_out]\n self.transform[str(d_out)] = nn.Parameter(torch.randn(m_out, m_in) / np.sqrt(m_in), requires_grad=learnable)\n\n def __repr__(self):\n return f\"G1x1SE3(structure={self.f_out})\"\n\n def forward(self, features, **kwargs):\n output = {}\n for k, v in features.items():\n if str(k) in self.transform.keys():\n output[k] = torch.matmul(self.transform[str(k)], v)\n return output\n\n\nclass GNormSE3(nn.Module):\n \"\"\"Graph Norm-based SE(3)-equivariant nonlinearity.\n\n Nonlinearities are important in SE(3) equivariant GCNs. They are also quite\n expensive to compute, so it is convenient for them to share resources with\n other layers, such as normalization. The general workflow is as follows:\n > for feature type in features:\n > norm, phase <- feature\n > output = fnc(norm) * phase\n\n where fnc: {R+}^m -> R^m is a learnable map from m norms to m scalars.\n \"\"\"\n\n def __init__(self, fiber, nonlin=nn.ReLU(inplace=True), num_layers: int = 0):\n \"\"\"Initializer.\n Args:\n fiber: Fiber() of feature multiplicities and types\n nonlin: nonlinearity to use everywhere\n num_layers: non-negative number of linear layers in fnc\n \"\"\"\n super().__init__()\n self.fiber = fiber\n self.nonlin = nonlin\n self.num_layers = num_layers\n\n # Regularization for computing phase: gradients explode otherwise\n self.eps = 1e-12\n\n # Norm mappings: 1 per feature type\n self.transform = nn.ModuleDict()\n for m, d in self.fiber.structure:\n self.transform[str(d)] = self._build_net(int(m))\n\n def __repr__(self):\n return f\"GNormSE3(num_layers={self.num_layers}, nonlin={self.nonlin})\"\n\n def _build_net(self, m: int):\n net = []\n for i in range(self.num_layers):\n net.append(BN(int(m)))\n net.append(self.nonlin)\n # TODO: implement cleaner init\n net.append(nn.Linear(m, m, bias=(i == self.num_layers - 1)))\n nn.init.kaiming_uniform_(net[-1].weight)\n if self.num_layers == 0:\n net.append(BN(int(m)))\n net.append(self.nonlin)\n return nn.Sequential(*net)\n\n @profile\n def forward(self, features, **kwargs):\n output = {}\n for k, v in features.items():\n # Compute the norms and normalized features\n # v shape: [...,m , 2*k+1]\n norm = v.norm(2, -1, keepdim=True).clamp_min(self.eps).expand_as(v)\n phase = v / norm\n\n # Transform on norms\n transformed = self.transform[str(k)](norm[..., 0]).unsqueeze(-1)\n\n # Nonlinearity on norm\n output[k] = (transformed * phase).view(*v.shape)\n\n return output\n\n\nclass BN(nn.Module):\n \"\"\"SE(3)-equvariant batch/layer normalization\"\"\"\n\n def __init__(self, m):\n \"\"\"SE(3)-equvariant batch/layer normalization\n Args:\n m: int for number of output channels\n \"\"\"\n super().__init__()\n self.bn = nn.LayerNorm(m)\n\n def forward(self, x):\n return self.bn(x)\n\n\nclass GConvSE3Partial(nn.Module):\n \"\"\"Graph SE(3)-equivariant node -> edge layer\"\"\"\n\n def __init__(self, f_in, f_out, edge_dim: int = 0):\n \"\"\"SE(3)-equivariant partial convolution.\n A partial convolution computes the inner product between a kernel and\n each input channel, without summing over the result from each input\n channel. This unfolded structure makes it amenable to be used for\n computing the value-embeddings of the attention mechanism.\n Args:\n f_in: list of tuples [(multiplicities, type),...]\n f_out: list of tuples [(multiplicities, type),...]\n \"\"\"\n super().__init__()\n self.f_in = f_in\n self.f_out = f_out\n self.edge_dim = edge_dim\n\n # Node -> edge weights\n self.kernel_unary = nn.ModuleDict()\n for (mi, di) in self.f_in.structure:\n for (mo, do) in self.f_out.structure:\n self.kernel_unary[f'({di},{do})'] = PairwiseConv(di, mi, do, mo, edge_dim=edge_dim)\n\n def __repr__(self):\n return f'GConvSE3Partial(structure={self.f_out})'\n\n def udf_u_mul_e(self, d_out):\n \"\"\"Compute the partial convolution for a single output feature type.\n This function is set up as a User Defined Function in DGL.\n Args:\n d_out: output feature type\n Returns:\n node -> edge function handle\n \"\"\"\n\n def fnc(edges):\n # Neighbor -> center messages\n msg = 0\n for m_in, d_in in self.f_in.structure:\n src = edges.src[f'{d_in}'].view(-1, m_in * (2 * d_in + 1), 1)\n edge = edges.data[f'({d_in},{d_out})']\n msg = msg + torch.matmul(edge, src)\n msg = msg.view(msg.shape[0], -1, 2 * d_out + 1)\n\n return {f'out{d_out}': msg.view(msg.shape[0], -1, 2 * d_out + 1)}\n\n return fnc\n\n @profile\n def forward(self, h, G=None, r=None, basis=None, **kwargs):\n \"\"\"Forward pass of the linear layer\n Args:\n h: dict of node-features\n G: minibatch of (homo)graphs\n r: inter-atomic distances\n basis: pre-computed Q * Y\n Returns:\n tensor with new features [B, n_points, n_features_out]\n \"\"\"\n with G.local_scope():\n # Add node features to local graph scope\n for k, v in h.items():\n G.ndata[k] = v\n\n # Add edge features\n if 'w' in G.edata.keys():\n w = G.edata['w'] # shape: [#edges_in_batch, #bond_types]\n feat = torch.cat([w, r], -1)\n else:\n feat = torch.cat([r, ], -1)\n for (mi, di) in self.f_in.structure:\n for (mo, do) in self.f_out.structure:\n etype = f'({di},{do})'\n G.edata[etype] = self.kernel_unary[etype](feat, basis)\n\n # Perform message-passing for each output feature type\n for d in self.f_out.degrees:\n G.apply_edges(self.udf_u_mul_e(d))\n\n return {f'{d}': G.edata[f'out{d}'] for d in self.f_out.degrees}\n\n\nclass GMABSE3(nn.Module):\n \"\"\"An SE(3)-equivariant multi-headed self-attention module for DGL graphs.\"\"\"\n\n def __init__(self, f_value: Fiber, f_key: Fiber, n_heads: int):\n \"\"\"SE(3)-equivariant MAB (multi-headed attention block) layer.\n Args:\n f_value: Fiber() object for value-embeddings\n f_key: Fiber() object for key-embeddings\n n_heads: number of heads\n \"\"\"\n super().__init__()\n self.f_value = f_value\n self.f_key = f_key\n self.n_heads = n_heads\n self.new_dgl = version.parse(dgl.__version__) > version.parse('0.4.4')\n\n def __repr__(self):\n return f'GMABSE3(n_heads={self.n_heads}, structure={self.f_value})'\n\n def udf_u_mul_e(self, d_out):\n \"\"\"Compute the weighted sum for a single output feature type.\n This function is set up as a User Defined Function in DGL.\n Args:\n d_out: output feature type\n Returns:\n edge -> node function handle\n \"\"\"\n\n def fnc(edges):\n # Neighbor -> center messages\n attn = edges.data['a']\n value = edges.data[f'v{d_out}']\n\n # Apply attention weights\n msg = attn.unsqueeze(-1).unsqueeze(-1) * value\n\n return {'m': msg}\n\n return fnc\n\n @profile\n def forward(self, v, k: Dict = None, q: Dict = None, G=None, **kwargs):\n \"\"\"Forward pass of the linear layer\n Args:\n G: minibatch of (homo)graphs\n v: dict of value edge-features\n k: dict of key edge-features\n q: dict of query node-features\n Returns:\n tensor with new features [B, n_points, n_features_out]\n \"\"\"\n with G.local_scope():\n # Add node features to local graph scope\n ## We use the stacked tensor representation for attention\n for m, d in self.f_value.structure:\n G.edata[f'v{d}'] = v[f'{d}'].view(-1, self.n_heads, m // self.n_heads, 2 * d + 1)\n G.edata['k'] = fiber2head(k, self.n_heads, self.f_key, squeeze=True)\n G.ndata['q'] = fiber2head(q, self.n_heads, self.f_key, squeeze=True)\n\n # Compute attention weights\n ## Inner product between (key) neighborhood and (query) center\n G.apply_edges(fn.e_dot_v('k', 'q', 'e'))\n\n ## Apply softmax\n e = G.edata.pop('e')\n if self.new_dgl:\n # in dgl 5.3, e has an extra dimension compared to dgl 4.3\n # the following, we get rid of this be reshaping\n n_edges = G.edata['k'].shape[0]\n e = e.view([n_edges, self.n_heads])\n e = e / np.sqrt(self.f_key.n_features)\n G.edata['a'] = edge_softmax(G, e)\n\n # Perform attention-weighted message-passing\n for d in self.f_value.degrees:\n G.update_all(self.udf_u_mul_e(d), fn.sum('m', f'out{d}'))\n\n output = {}\n for m, d in self.f_value.structure:\n output[f'{d}'] = G.ndata[f'out{d}'].view(-1, m, 2 * d + 1)\n\n return output\n\n\nclass GSE3Res(nn.Module):\n \"\"\"Graph attention block with SE(3)-equivariance and skip connection\"\"\"\n\n def __init__(self, f_in: Fiber, f_out: Fiber, edge_dim: int = 0, div: float = 4,\n n_heads: int = 1, learnable_skip=True):\n super().__init__()\n self.f_in = f_in\n self.f_out = f_out\n self.div = div\n self.n_heads = n_heads\n\n # f_mid_out has same structure as 'f_out' but #channels divided by 'div'\n # this will be used for the values\n f_mid_out = {k: int(v // div) for k, v in self.f_out.structure_dict.items()}\n self.f_mid_out = Fiber(dictionary=f_mid_out)\n\n # f_mid_in has same structure as f_mid_out, but only degrees which are in f_in\n # this will be used for keys and queries\n # (queries are merely projected, hence degrees have to match input)\n f_mid_in = {d: m for d, m in f_mid_out.items() if d in self.f_in.degrees}\n self.f_mid_in = Fiber(dictionary=f_mid_in)\n\n self.edge_dim = edge_dim\n\n self.GMAB = nn.ModuleDict()\n\n # Projections\n self.GMAB['v'] = GConvSE3Partial(f_in, self.f_mid_out, edge_dim=edge_dim)\n self.GMAB['k'] = GConvSE3Partial(f_in, self.f_mid_in, edge_dim=edge_dim)\n self.GMAB['q'] = G1x1SE3(f_in, self.f_mid_in)\n\n # Attention\n self.GMAB['attn'] = GMABSE3(self.f_mid_out, self.f_mid_in, n_heads=n_heads)\n\n # Skip connections\n self.project = G1x1SE3(self.f_mid_out, f_out, learnable=learnable_skip)\n self.add = GSum(f_out, f_in)\n # the following checks whether the skip connection would change\n # the output fibre structure; the reason can be that the input has\n # more channels than the output (for at least one degree); this would\n # then cause a (hard to debug) error in the next layer\n assert self.add.f_out.structure_dict == f_out.structure_dict, \\\n 'skip connection would change output structure'\n\n @profile\n def forward(self, features, G, **kwargs):\n # Embeddings\n v = self.GMAB['v'](features, G=G, **kwargs)\n k = self.GMAB['k'](features, G=G, **kwargs)\n q = self.GMAB['q'](features, G=G)\n\n # Attention\n z = self.GMAB['attn'](v, k=k, q=q, G=G)\n\n # Skip + residual\n z = self.project(z)\n z = self.add(z, features)\n return z\n\n\n### Helper and wrapper functions\n\nclass GSum(nn.Module):\n \"\"\"SE(3)-equivariant graph residual sum function.\"\"\"\n\n def __init__(self, f_x: Fiber, f_y: Fiber):\n \"\"\"SE(3)-equivariant graph residual sum function.\n Args:\n f_x: Fiber() object for fiber of summands\n f_y: Fiber() object for fiber of summands\n \"\"\"\n super().__init__()\n self.f_x = f_x\n self.f_y = f_y\n self.f_out = Fiber.combine_max(f_x, f_y)\n\n def __repr__(self):\n return f\"GSum(structure={self.f_out})\"\n\n def forward(self, x, y):\n out = {}\n for k in self.f_out.degrees:\n k = str(k)\n if (k in x) and (k in y):\n if x[k].shape[1] > y[k].shape[1]:\n diff = x[k].shape[1] - y[k].shape[1]\n zeros = torch.zeros(x[k].shape[0], diff, x[k].shape[2]).to(y[k].device)\n y[k] = torch.cat([y[k], zeros], 1)\n elif x[k].shape[1] < y[k].shape[1]:\n diff = y[k].shape[1] - x[k].shape[1]\n zeros = torch.zeros(x[k].shape[0], diff, x[k].shape[2]).to(y[k].device)\n x[k] = torch.cat([x[k], zeros], 1)\n\n out[k] = x[k] + y[k]\n elif k in x:\n out[k] = x[k]\n elif k in y:\n out[k] = y[k]\n return out\n\n\nclass GAvgPooling(nn.Module):\n \"\"\"Graph Average Pooling module.\"\"\"\n\n def __init__(self, type='0'):\n super().__init__()\n self.pool = AvgPooling()\n self.type = type\n\n @profile\n def forward(self, features, G, **kwargs):\n if self.type == '0':\n h = features['0'][..., -1]\n pooled = self.pool(G, h)\n elif self.type == '1':\n pooled = []\n for i in range(3):\n h_i = features['1'][..., i]\n pooled.append(self.pool(G, h_i).unsqueeze(-1))\n pooled = torch.cat(pooled, axis=-1)\n pooled = {'1': pooled}\n else:\n print('GAvgPooling for type > 0 not implemented')\n exit()\n return pooled\n\n\nclass GMaxPooling(nn.Module):\n \"\"\"Graph Max Pooling module.\"\"\"\n\n def __init__(self):\n super().__init__()\n self.pool = MaxPooling()\n\n @profile\n def forward(self, features, G, **kwargs):\n h = features['0'][..., -1]\n return self.pool(G, h)\n\n\n# -------------------------------------------------------------------------------------------------------------------------------------\n# Following code derived from egnn-pytorch (https://github.com/lucidrains/egnn-pytorch/blob/main/egnn_pytorch/egnn_pytorch.py):\n# -------------------------------------------------------------------------------------------------------------------------------------\n\nclass EnInvGraphConv(nn.Module):\n \"\"\"A graph neural network layer as a DGL module.\n\n EnInvGraphConv stands for a Graph Convolution E(n)-invariant layer. It is the\n equivalent of a linear layer in an MLP, a conv layer in a CNN, or a graph\n conv layer in a GCN.\n \"\"\"\n\n def __init__(\n self,\n node_feat,\n edge_feat=0,\n coord_feat=16,\n fourier_feat=0,\n norm_rel_coords=False,\n norm_coord_weights=False,\n num_nearest_neighbors=0,\n dropout=0.0,\n init_eps=1e-3\n ):\n \"\"\"E(n)-invariant Graph Conv Layer\n\n Parameters\n ----------\n node_feat : int\n Node feature size.\n edge_feat : int\n Edge feature size.\n coord_feat : int\n Coordinates feature size.\n fourier_feat : int\n Fourier feature size.\n norm_rel_coords : boolean\n Fourier feature size.\n norm_coord_weights : boolean\n Fourier feature size.\n num_nearest_neighbors : int\n Fourier feature size.\n dropout : float\n Fourier feature size.\n init_eps : float\n Fourier feature size.\n \"\"\"\n super().__init__()\n self.fourier_feat = fourier_feat\n\n edge_input_dim = (fourier_feat * 2) + (node_feat * 2) + edge_feat + 1\n dropout = nn.Dropout(dropout) if dropout > 0 else nn.Identity()\n\n self.edge_mlp = nn.Sequential(\n nn.Linear(edge_input_dim, edge_input_dim * 2),\n dropout,\n GELU(),\n nn.Linear(edge_input_dim * 2, coord_feat),\n GELU()\n )\n\n self.node_mlp = nn.Sequential(\n nn.Linear(node_feat + coord_feat, node_feat * 2),\n dropout,\n GELU(),\n nn.Linear(node_feat * 2, node_feat),\n )\n\n self.norm_coord_weights = norm_coord_weights\n self.norm_rel_coords = norm_rel_coords\n\n if norm_rel_coords:\n self.rel_coords_scale = nn.Parameter(torch.ones(1))\n\n self.coords_mlp = nn.Sequential(\n nn.Linear(coord_feat, coord_feat * 4),\n dropout,\n GELU(),\n nn.Linear(coord_feat * 4, 1)\n )\n\n self.num_nearest_neighbors = num_nearest_neighbors\n\n self.init_eps = init_eps\n self.apply(self.init_)\n\n def init_(self, module):\n if type(module) in {nn.Linear}:\n # Seems to be needed to keep the network from exploding to NaN with greater depths\n nn.init.normal_(module.weight, std=self.init_eps)\n\n def forward(self, h, x, e=None, mask=None):\n \"\"\"Forward pass of the linear layer\n\n Parameters\n ----------\n h : Tensor\n The input node embedding.\n x : Tensor\n The input coordinates embedding.\n e : Tensor\n The input edge embedding.\n mask : Tensor\n The coordinate mask to apply.\n \"\"\"\n b, n, d, fourier_features, num_nearest = *h.shape, self.fourier_feat, self.num_nearest_neighbors\n use_nearest = num_nearest > 0\n nbhd_indices = None\n\n rel_coords = rearrange(x, 'b i d -> b i () d') - rearrange(x, 'b j d -> b () j d')\n rel_dist = (rel_coords ** 2).sum(dim=-1, keepdim=True)\n\n if use_nearest:\n nbhd_indices = rel_dist[..., 0].topk(num_nearest, dim=-1, largest=False).indices\n rel_coords = batched_index_select(rel_coords, nbhd_indices, dim=2)\n rel_dist = batched_index_select(rel_dist, nbhd_indices, dim=2)\n\n if fourier_features > 0:\n rel_dist = fourier_encode_dist(rel_dist, num_encodings=fourier_features)\n rel_dist = rearrange(rel_dist, 'b i j () d -> b i j d')\n\n if use_nearest:\n feats_j = batched_index_select(h, nbhd_indices, dim=1)\n else:\n feats_j = rearrange(h, 'b j d -> b () j d')\n\n feats_i = rearrange(h, 'b i d -> b i () d')\n feats_i, feats_j = broadcast_tensors(feats_i, feats_j)\n\n edge_input = torch.cat((feats_i, feats_j, rel_dist), dim=-1)\n\n if e is not None:\n edge_input = torch.cat((edge_input, e), dim=-1)\n\n m_ij = self.edge_mlp(edge_input)\n\n m_i = m_ij.sum(dim=-2)\n\n node_mlp_input = torch.cat((h, m_i), dim=-1)\n node_out = self.node_mlp(node_mlp_input) + h\n\n # Free GPU memory\n rel_coords.detach()\n rel_dist.detach()\n feats_i.detach()\n feats_j.detach()\n edge_input.detach()\n m_i.detach()\n m_ij.detach()\n node_mlp_input.detach()\n if nbhd_indices is not None:\n nbhd_indices.detach()\n if mask is not None:\n mask.detach()\n\n return node_out\n\n def __repr__(self):\n return f'EnInvGraphConv(structure=h{self.node_feat}-x{self.coord_feat}-e{self.edge_feat})'\n\n\nclass EnGraphConv(nn.Module):\n \"\"\"A graph neural network layer.\n\n EnGraphConv stands for a Graph Convolution E(n)-equivariant layer. It is the\n equivalent of a linear layer in an MLP, a conv layer in a CNN, or a graph\n conv layer in a GCN.\n \"\"\"\n\n def __init__(\n self,\n node_feat,\n edge_feat=0,\n coord_feat=16,\n fourier_feat=0,\n norm_rel_coords=False,\n norm_coord_weights=False,\n num_nearest_neighbors=0,\n dropout=0.0,\n init_eps=1e-3\n ):\n \"\"\"E(n)-equivariant Graph Conv Layer\n\n Parameters\n ----------\n node_feat : int\n Node feature size.\n edge_feat : int\n Edge feature size.\n coord_feat : int\n Coordinates feature size.\n fourier_feat : int\n Fourier feature size.\n norm_rel_coords : boolean\n Fourier feature size.\n norm_coord_weights : boolean\n Fourier feature size.\n num_nearest_neighbors : int\n Fourier feature size.\n dropout : float\n Fourier feature size.\n init_eps : float\n Fourier feature size.\n \"\"\"\n super().__init__()\n self.fourier_feat = fourier_feat\n\n edge_input_dim = (fourier_feat * 2) + (node_feat * 2) + edge_feat + 1\n dropout = nn.Dropout(dropout) if dropout > 0 else nn.Identity()\n\n self.edge_mlp = nn.Sequential(\n nn.Linear(edge_input_dim, edge_input_dim * 2),\n dropout,\n GELU(),\n nn.Linear(edge_input_dim * 2, coord_feat),\n GELU()\n )\n\n self.node_mlp = nn.Sequential(\n nn.Linear(node_feat + coord_feat, node_feat * 2),\n dropout,\n GELU(),\n nn.Linear(node_feat * 2, node_feat),\n )\n\n self.norm_coord_weights = norm_coord_weights\n self.norm_rel_coords = norm_rel_coords\n\n if norm_rel_coords:\n self.rel_coords_scale = nn.Parameter(torch.ones(1))\n\n self.coords_mlp = nn.Sequential(\n nn.Linear(coord_feat, coord_feat * 4),\n dropout,\n GELU(),\n nn.Linear(coord_feat * 4, 1)\n )\n\n self.num_nearest_neighbors = num_nearest_neighbors\n\n self.init_eps = init_eps\n self.apply(self.init_)\n\n def init_(self, module):\n if type(module) in {nn.Linear}:\n # Seems to be needed to keep the network from exploding to NaN with greater depths\n nn.init.normal_(module.weight, std=self.init_eps)\n\n def forward(self, h, x, e=None, mask=None):\n \"\"\"Forward pass of the linear layer\n\n Parameters\n ----------\n h : Tensor\n The input node embedding.\n x : Tensor\n The input coordinates embedding.\n e : Tensor\n The input edge embedding.\n mask : Tensor\n The coordinate mask to apply.\n \"\"\"\n nbhd_indices = None\n b, n, d, fourier_features, num_nearest = *h.shape, self.fourier_feat, self.num_nearest_neighbors\n use_nearest = num_nearest > 0\n\n rel_coords = rearrange(x, 'b i d -> b i () d') - rearrange(x, 'b j d -> b () j d')\n rel_dist = (rel_coords ** 2).sum(dim=-1, keepdim=True)\n\n if use_nearest:\n nbhd_indices = rel_dist[..., 0].topk(num_nearest, dim=-1, largest=False).indices\n rel_coords = batched_index_select(rel_coords, nbhd_indices, dim=2)\n rel_dist = batched_index_select(rel_dist, nbhd_indices, dim=2)\n\n if fourier_features > 0:\n rel_dist = fourier_encode_dist(rel_dist, num_encodings=fourier_features)\n rel_dist = rearrange(rel_dist, 'b i j () d -> b i j d')\n\n if use_nearest:\n feats_j = batched_index_select(h, nbhd_indices, dim=1)\n else:\n feats_j = rearrange(h, 'b j d -> b () j d')\n\n feats_i = rearrange(h, 'b i d -> b i () d')\n feats_i, feats_j = broadcast_tensors(feats_i, feats_j)\n\n edge_input = torch.cat((feats_i, feats_j, rel_dist), dim=-1)\n\n if e is not None:\n edge_input = torch.cat((edge_input, e), dim=-1)\n\n m_ij = self.edge_mlp(edge_input)\n\n coord_weights = self.coords_mlp(m_ij)\n coord_weights = rearrange(coord_weights, 'b i j () -> b i j')\n\n if self.norm_coord_weights:\n coord_weights = coord_weights.tanh()\n\n if self.norm_rel_coords:\n rel_coords = normalize(rel_coords, dim=-1) * self.rel_coords_scale\n\n if mask is not None:\n mask_i = rearrange(mask, 'b i -> b i ()')\n\n if use_nearest:\n mask_j = batched_index_select(mask, nbhd_indices, dim=1)\n else:\n mask_j = rearrange(mask, 'b j -> b () j')\n\n mask = mask_i * mask_j\n coord_weights.masked_fill_(~mask, 0.)\n\n # Free GPU memory\n mask_i.detach()\n mask_j.detach()\n\n coords_out = einsum('b i j, b i j c -> b i c', coord_weights, rel_coords) + x\n\n m_i = m_ij.sum(dim=-2)\n\n node_mlp_input = torch.cat((h, m_i), dim=-1)\n node_out = self.node_mlp(node_mlp_input) + h\n\n # Free GPU memory\n rel_coords.detach()\n rel_dist.detach()\n feats_i.detach()\n feats_j.detach()\n edge_input.detach()\n m_i.detach()\n m_ij.detach()\n coord_weights.detach()\n node_mlp_input.detach()\n if nbhd_indices is not None:\n nbhd_indices.detach()\n if mask is not None:\n mask.detach()\n\n return node_out, coords_out\n\n def __repr__(self):\n return f'GConvEn(structure=h{self.node_feat}-x{self.coord_feat}-e{self.edge_feat})'\n\n\n# -------------------------------------------------------------------------------------------------------------------------------------\n# Following code derived from DMLC (https://github.com/dmlc/dgl/blob/master/examples/pytorch/dagnn/main.py):\n# -------------------------------------------------------------------------------------------------------------------------------------\n\nclass DAGNNConv(nn.Module):\n def __init__(self,\n in_dim,\n k):\n super(DAGNNConv, self).__init__()\n\n self.s = Parameter(torch.FloatTensor(in_dim, 1))\n self.k = k\n\n self.reset_parameters()\n\n def reset_parameters(self):\n gain = nn.init.calculate_gain('sigmoid')\n nn.init.xavier_uniform_(self.s, gain=gain)\n\n def forward(self, graph, feats):\n with graph.local_scope():\n results = [feats]\n\n degs = graph.in_degrees().float()\n norm = torch.pow(degs, -0.5)\n norm = norm.to(feats.device).unsqueeze(1)\n\n for _ in range(self.k):\n feats = feats * norm\n graph.ndata['h'] = feats\n graph.update_all(fn.copy_u('h', 'm'),\n fn.sum('m', 'h'))\n feats = graph.ndata['h']\n feats = feats * norm\n results.append(feats)\n\n H = torch.stack(results, dim=1)\n S = sigmoid(torch.matmul(H, self.s))\n S = S.permute(0, 2, 1)\n H = torch.matmul(S, H).squeeze()\n\n return H\n\n\nclass MLPLayer(nn.Module):\n def __init__(self,\n in_dim,\n out_dim,\n bias=True,\n activation=None,\n dropout=0):\n super(MLPLayer, self).__init__()\n\n self.linear = nn.Linear(in_dim, out_dim, bias=bias)\n self.activation = activation\n self.dropout = nn.Dropout(dropout)\n self.reset_parameters()\n\n def reset_parameters(self):\n gain = 1.\n if self.activation is relu:\n gain = nn.init.calculate_gain('relu')\n nn.init.xavier_uniform_(self.linear.weight, gain=gain)\n if self.linear.bias is not None:\n nn.init.zeros_(self.linear.bias)\n\n def forward(self, feats):\n feats = self.dropout(feats)\n feats = self.linear(feats)\n if self.activation:\n feats = self.activation(feats)\n\n return feats\n\n\nclass DAGNN(nn.Module):\n def __init__(self,\n k,\n in_dim,\n hid_dim,\n out_dim,\n bias=True,\n activation=relu,\n dropout=0, ):\n super(DAGNN, self).__init__()\n self.mlp = nn.ModuleList()\n self.mlp.append(MLPLayer(in_dim=in_dim, out_dim=hid_dim, bias=bias,\n activation=activation, dropout=dropout))\n self.mlp.append(MLPLayer(in_dim=hid_dim, out_dim=out_dim, bias=bias,\n activation=None, dropout=dropout))\n self.dagnn = DAGNNConv(in_dim=out_dim, k=k)\n\n def forward(self, graph, feats):\n for layer in self.mlp:\n feats = layer(feats)\n feats = self.dagnn(graph, feats)\n return feats\n\n\n# -------------------------------------------------------------------------------------------------------------------------------------\n# Following code curated for DeepInteract (https://github.com/jianlin-cheng/DeepInteract):\n# -------------------------------------------------------------------------------------------------------------------------------------\nclass SAGEConv(nn.Module):\n \"\"\"GraphSAGE convolution module used by the GraphSAGE model.\n This variant of the SAGEConv layer is able to infer edges via a soft estimation on messages.\n\n Parameters\n ----------\n in_feat : int\n Input feature size.\n out_feat : int\n Output feature size.\n \"\"\"\n\n def __init__(self, in_feat, out_feat):\n super(SAGEConv, self).__init__()\n # A linear submodule for projecting the input and neighbor feature to the output.\n self.linear = nn.Linear(in_feat * 2, out_feat)\n\n def forward(self, g, h):\n \"\"\"Forward computation\n\n Parameters\n ----------\n g : Graph\n The input graph.\n h : Tensor\n The input node feature.\n \"\"\"\n with g.local_scope():\n g.ndata['h'] = h\n # update_all is a message passing API.\n g.update_all(message_func=fn.copy_u('h', 'm'), reduce_func=fn.mean('m', 'h_N'))\n h_N = g.ndata['h_N']\n h_total = torch.cat([h, h_N], dim=1)\n return self.linear(h_total)\n" ]
[ [ "torch.nn.init.xavier_uniform_", "torch.stack", "torch.no_grad", "torch.nn.ModuleList", "torch.nn.init.zeros_", "torch.nn.init.kaiming_uniform_", "torch.cat", "torch.nn.Dropout", "torch.broadcast_tensors", "torch.randn", "torch.nn.ParameterDict", "torch.nn.init.normal_", "torch.nn.LayerNorm", "torch.ones", "torch.nn.functional.normalize", "torch.zeros", "torch.einsum", "torch.pow", "torch.sum", "torch.nn.init.calculate_gain", "torch.FloatTensor", "torch.nn.Linear", "torch.nn.GELU", "torch.nn.Sequential", "torch.nn.Identity", "numpy.sqrt", "torch.nn.ModuleDict", "torch.nn.ReLU", "torch.matmul" ] ]
SauravMaheshkar/rtdl
[ "c3f8051210d1cd7fdffc5a63221e3c4e84415ed8" ]
[ "bin/node.py" ]
[ "# %%\nimport gc\nimport itertools\nimport math\nimport typing as ty\nfrom copy import deepcopy\nfrom pathlib import Path\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim.swa_utils as swa_utils\nimport zero\nfrom torch import Tensor\nimport wandb\n\nimport lib\nimport lib.node as node\n\n\n# %%\nclass NODE(nn.Module):\n def __init__(\n self,\n *,\n d_in: int,\n num_layers: int,\n layer_dim: int,\n depth: int,\n tree_dim: int,\n choice_function: str,\n bin_function: str,\n d_out: int,\n categories: ty.Optional[ty.List[int]],\n d_embedding: int,\n ) -> None:\n super().__init__()\n\n if categories is not None:\n d_in += len(categories) * d_embedding\n category_offsets = torch.tensor([0] + categories[:-1]).cumsum(0)\n self.register_buffer('category_offsets', category_offsets)\n self.category_embeddings = nn.Embedding(sum(categories), d_embedding)\n nn.init.kaiming_uniform_(self.category_embeddings.weight, a=math.sqrt(5))\n print(f'{self.category_embeddings.weight.shape=}')\n\n self.d_out = d_out\n self.block = node.DenseBlock(\n input_dim=d_in,\n num_layers=num_layers,\n layer_dim=layer_dim,\n depth=depth,\n tree_dim=tree_dim,\n bin_function=getattr(node, bin_function),\n choice_function=getattr(node, choice_function),\n flatten_output=False,\n )\n\n def forward(self, x_num: Tensor, x_cat: Tensor) -> Tensor:\n if x_cat is not None:\n x_cat = self.category_embeddings(x_cat + self.category_offsets[None])\n x = torch.cat([x_num, x_cat.view(x_cat.size(0), -1)], dim=-1)\n else:\n x = x_num\n\n x = self.block(x)\n x = x[..., : self.d_out].mean(dim=-2)\n x = x.squeeze(-1)\n return x\n\n\n# %%\nargs, output = lib.load_config()\nassert 'weight_decay' not in args, 'NODE architecture performs badly with weight decay'\nif 'swa' in args:\n assert args['swa']['n_checkpoints'] > 1\n\n# %%\nzero.set_randomness(args['seed'])\ndataset_dir = lib.get_path(args['data']['path'])\nstats: ty.Dict[str, ty.Any] = {\n 'dataset': dataset_dir.name,\n 'algorithm': Path(__file__).stem,\n **lib.load_json(output / 'stats.json'),\n}\n\nD = lib.Dataset.from_dir(dataset_dir)\nX = D.build_X(\n normalization=args['data'].get('normalization'),\n num_nan_policy='mean',\n cat_nan_policy='new',\n cat_policy=args['data'].get('cat_policy', 'indices'),\n cat_min_frequency=args['data'].get('cat_min_frequency', 0.0),\n seed=args['seed'],\n)\nif not isinstance(X, tuple):\n X = (X, None)\n\nzero.set_randomness(args['seed'])\nY, y_info = D.build_y(args['data'].get('y_policy'))\nlib.dump_pickle(y_info, output / 'y_info.pickle')\nX = tuple(None if x is None else lib.to_tensors(x) for x in X)\nY = lib.to_tensors(Y)\ndevice = lib.get_device()\nif device.type != 'cpu':\n X = tuple(None if x is None else {k: v.to(device) for k, v in x.items()} for x in X)\n Y_device = {k: v.to(device) for k, v in Y.items()}\nelse:\n Y_device = Y\nX_num, X_cat = X\nif not D.is_multiclass:\n Y_device = {k: v.float() for k, v in Y_device.items()}\n\ntrain_size = D.size(lib.TRAIN)\nbatch_size, epoch_size = (\n stats['batch_size'],\n stats['epoch_size'],\n) = lib.get_epoch_parameters(train_size, args['training'].get('batch_size', 'v3'))\neval_batch_size = args['training']['eval_batch_size']\nchunk_size = None\nstats['chunk_sizes'] = {}\nstats['eval_batch_sizes'] = {}\n\nloss_fn = (\n F.binary_cross_entropy_with_logits\n if D.is_binclass\n else F.cross_entropy\n if D.is_multiclass\n else F.mse_loss\n)\n\nargs['model'].setdefault('d_embedding', None)\nmodel = NODE(\n d_in=0 if X_num is None else X_num['train'].shape[1],\n d_out=D.info['n_classes'] if D.is_multiclass else 1,\n categories=lib.get_categories(X_cat),\n **args['model'],\n).to(device)\nif torch.cuda.device_count() > 1: # type: ignore[code]\n print('Using nn.DataParallel')\n model = nn.DataParallel(model)\nstats['n_parameters'] = lib.get_n_parameters(model)\noptimizer = lib.make_optimizer(\n args['training']['optimizer'],\n model.parameters(),\n args['training']['lr'],\n args['training']['weight_decay'],\n)\n\nstream = zero.Stream(lib.IndexLoader(train_size, batch_size, True, device))\nprogress = zero.ProgressTracker(args['training']['patience'])\ntraining_log = {lib.TRAIN: [], lib.VAL: [], lib.TEST: []}\nstage = 0\nlr_n_decays = 0\ntimer = zero.Timer()\nswa_stage_first_epoch = None\n\n\ndef print_epoch_info():\n print(\n f'\\n>>> Epoch {stream.epoch} | Stage {stage} | {lib.format_seconds(timer())} | {output}'\n )\n details = {'lr': lib.get_lr(optimizer), 'chunk_size': chunk_size}\n details.update((x, stats[x]) for x in ['batch_size', 'epoch_size', 'n_parameters'])\n print(' | '.join(f'{k} = {v}' for k, v in details.items()))\n\n\ndef get_checkpoint_path(suffix):\n return output / f'checkpoint_{suffix}.pt'\n\n\ndef step(batch_idx):\n logits = model(\n X_num[lib.TRAIN][batch_idx],\n None if X_cat is None else X_cat[lib.TRAIN][batch_idx],\n )\n targets = Y_device[lib.TRAIN][batch_idx] # type: ignore[code]\n if not D.is_multiclass:\n targets = targets.to(logits.dtype)\n return logits, targets\n\n\ndef _predict(part):\n result = []\n for idx in lib.IndexLoader(\n D.size(part),\n args['training']['eval_batch_size'],\n False,\n device,\n ):\n result.append(\n model(\n None if X_num is None else X_num[part][idx],\n None if X_cat is None else X_cat[part][idx],\n )\n )\n return torch.cat(result).cpu()\n\n\[email protected]_grad()\ndef predict(m, part):\n global eval_batch_size\n m.eval()\n random_state = zero.get_random_state()\n while eval_batch_size:\n try:\n zero.set_random_state(random_state)\n return _predict(part)\n except RuntimeError as err:\n if not lib.is_oom_exception(err):\n raise\n zero.free_memory()\n gc.collect()\n eval_batch_size //= 2\n print('New eval batch size:', eval_batch_size)\n stats['eval_batch_sizes'][stream.epoch] = eval_batch_size\n raise RuntimeError('Not enough memory even for eval_batch_size=1')\n\n\[email protected]_grad()\ndef evaluate(m, parts):\n metrics = {}\n predictions = {}\n for part in parts:\n predictions[part] = predict(m, part).numpy()\n metrics[part] = lib.calculate_metrics(\n D.info['task_type'],\n Y[part].numpy(), # type: ignore[code]\n predictions[part], # type: ignore[code]\n 'logits',\n y_info,\n )\n\n for part, part_metrics in metrics.items():\n print(f'[{part:<5}]', lib.make_summary(part_metrics))\n\n return metrics, predictions\n\n\nSTATE_VARIABLES = [\n 'progress',\n 'stats',\n 'timer',\n 'training_log',\n 'stage',\n 'swa_stage_first_epoch',\n 'lr_n_decays',\n 'chunk_size',\n 'eval_batch_size',\n]\n\n\ndef save_checkpoint(suffix):\n model_artifact = wandb.Artifact('node-artifact', type='model')\n torch.save(\n {\n 'model': model.state_dict(),\n 'optimizer': optimizer.state_dict(),\n 'stream': stream.state_dict(),\n 'random_state': zero.get_random_state(),\n **{x: globals()[x] for x in STATE_VARIABLES},\n },\n get_checkpoint_path(suffix),\n )\n lib.dump_stats(stats, output, suffix == 'final')\n lib.backup_output(output)\n model_artifact.add_file(get_checkpoint_path(suffix))\n wandb.run.log_artifact(model_artifact)\n\nfor stage in list(range(args.get('swa', {}).get('n_checkpoints', 1)))[::-1]:\n if get_checkpoint_path(stage).exists():\n print(f'Loading checkpoint {get_checkpoint_path(stage).name}')\n c = torch.load(get_checkpoint_path(stage))\n model.load_state_dict(c['model'])\n optimizer.load_state_dict(c['optimizer'])\n stream.load_state_dict(c['stream'])\n globals().update({x: c[x] for x in STATE_VARIABLES})\n stats.setdefault('old_stats', []).append(deepcopy(stats))\n stats.setdefault('continuations', []).append(stream.epoch)\n zero.set_random_state(c['random_state'])\n break\n\n\n# %%\ntimer.run()\nwith torch.no_grad():\n # NODE-specific initialization\n if stream.epoch == 0:\n model.eval()\n size = 2048\n while True:\n try:\n zero.set_randomness(args['seed'])\n x = step(torch.randperm(train_size)[:size])\n del x\n except RuntimeError as err:\n if not lib.is_oom_exception(err):\n raise\n size //= 2\n else:\n break\n\nwandb.init(project=\"RTDL\", config=args)\nfor epoch in stream.epochs(args['training']['n_epochs']):\n print_epoch_info()\n\n epoch_losses = []\n for batch_idx in epoch:\n loss, new_chunk_size = lib.learn_with_auto_virtual_batch(\n model, optimizer, loss_fn, step, batch_idx, batch_size, chunk_size\n )\n wandb.log({\"Training Loss\": loss})\n epoch_losses.append(loss.detach())\n if new_chunk_size and new_chunk_size < (chunk_size or batch_size):\n chunk_size = new_chunk_size\n print('New chunk size:', chunk_size)\n stats['chunk_sizes'][stream.iteration] = chunk_size\n zero.free_memory()\n gc.collect()\n epoch_losses = torch.stack(epoch_losses).tolist()\n training_log[lib.TRAIN].extend(epoch_losses)\n print(f'[{lib.TRAIN}] loss = {round(sum(epoch_losses) / len(epoch_losses), 3)}')\n\n metrics, predictions = evaluate(model, [lib.VAL, lib.TEST])\n wandb.log({\"score\": metrics[lib.VAL]['score']})\n for k, v in metrics.items():\n training_log[k].append(v)\n wandb.log({k:v})\n\n progress.update(metrics[lib.VAL]['score'])\n if progress.success:\n print('New best epoch!')\n stats[f'best_epoch_{stage}'] = stream.epoch\n stats[f'metrics_{stage}'] = metrics\n save_checkpoint(stage)\n for k, v in predictions.items():\n np.save(output / f'p_{stage}_{k}.npy', v)\n wandb.log({f\"predictions_{k}\": v})\n\n elif progress.fail:\n\n if stage == 0 and lr_n_decays < args['training']['lr_n_decays']:\n print('Reducing lr...')\n stats[f'lr_decay_{lr_n_decays}'] = stream.epoch\n lib.set_lr(optimizer, lib.get_lr(optimizer) * args['training']['lr_decay'])\n lr_n_decays += 1\n progress.forget_bad_updates()\n\n else:\n print(f'Finishing stage {stage}...')\n stats[f'time_{stage}'] = lib.format_seconds(timer())\n if 'swa' not in args or stage + 1 == args['swa']['n_checkpoints']:\n break\n\n best_stage_checkpoint = torch.load(get_checkpoint_path(stage))\n model.load_state_dict(best_stage_checkpoint['model'])\n optimizer.load_state_dict(best_stage_checkpoint['optimizer'])\n\n progress = zero.ProgressTracker(args['swa']['patience'])\n lib.set_lr(optimizer, args['training']['lr'] * args['swa']['lr_factor'])\n swa_stage_first_epoch = stream.epoch + 1\n stage += 1\n\n if stream.epoch == swa_stage_first_epoch:\n lib.set_lr(optimizer, args['training']['lr'])\n\n\n# %%\ndef load_best_model(stage):\n model.load_state_dict(torch.load(get_checkpoint_path(stage))['model'])\n\n\nif 'swa' in args:\n print('\\nRunning SWA...')\n swa_model = swa_utils.AveragedModel(model)\n swa_progress = zero.ProgressTracker(None)\n best_swa_model = None\n\n for stage in range(args['swa']['n_checkpoints']):\n load_best_model(stage)\n swa_model.update_parameters(model)\n\n if stage > 0 and args['swa']['update_bn_n_epochs']:\n zero.set_randomness(args['seed'])\n with torch.no_grad():\n swa_utils.update_bn(\n itertools.chain.from_iterable(\n zero.iter_batches(\n X[lib.TRAIN], chunk_size or batch_size, shuffle=True\n )\n for _ in range(args['swa']['update_bn_n_epochs'])\n ),\n swa_model,\n device,\n )\n swa_progress.update(\n evaluate(swa_model if stage > 0 else model, [lib.VAL])[0][lib.VAL]['score']\n )\n if swa_progress.success:\n print('New best SWA checkpoint!')\n stats['n_swa_checkpoints'] = stage + 1\n if stage > 0:\n best_swa_model = deepcopy(swa_model)\n if best_swa_model is None:\n load_best_model(0)\n else:\n lib.load_swa_state_dict(model, best_swa_model)\nelse:\n load_best_model(0)\n\nprint('\\nRunning the final evaluation...')\nstats['metrics'], predictions = evaluate(model, lib.PARTS)\nfor k, v in predictions.items():\n np.save(output / f'p_{k}.npy', v)\n wandb.run.summary[f\"final_prediction_{k}\"] = v\nstats['time_final'] = lib.format_seconds(timer())\nsave_checkpoint('final')\nprint(f'Done! Time elapsed: {stats[\"time_final\"]}')\nprint(\n '\\n!!! WARNING !!! The metrics for a single model are stored under the \"metrics_0\" key.\\n'\n)\n" ]
[ [ "numpy.save", "torch.stack", "torch.optim.swa_utils.AveragedModel", "torch.no_grad", "torch.tensor", "torch.cuda.device_count", "torch.randperm", "torch.nn.DataParallel", "torch.cat" ] ]
ColdFrenzy/Adaptive_Learning
[ "02cdd519a7e224fe5f2a49b0c21baa3dac5ce0e1" ]
[ "models/custom_models.py" ]
[ "import tensorflow as tf\n\n\ndef dense_model(in_shape, hidden_layer_shapes, num_outputs, name):\n x = None\n inputs = tf.keras.layers.Input(shape=(in_shape,), name=\"observations\")\n for i,layer_shape in enumerate(hidden_layer_shapes):\n x = tf.keras.layers.Dense(\n layer_shape, name=\"dense_\" + str(i), activation=tf.nn.relu\n )(x if x is not None else inputs)\n out_layer = tf.keras.layers.Dense(num_outputs, name=\"out\", activation=None)(\n x\n )\n value_layer = tf.keras.layers.Dense(1, name=\"value\", activation=None)(x)\n return tf.keras.Model(inputs, [out_layer, value_layer], name=name)\n\n\ndef res_net_model(in_shape, hidden_layer_shapes, num_outputs, name):\n \"\"\"\n hidden_layer_shapes : list\n list with the shape of every hidden layer\n Simple neural network block with n_layers dense layers and a residual connection \n \"\"\"\n x = None\n inputs = tf.keras.layers.Input(shape=(in_shape,), name=\"observations\")\n for i,layer_shape in enumerate(hidden_layer_shapes):\n x = tf.keras.layers.Dense(\n layer_shape, name=\"dense_\"+str(i), activation=tf.nn.relu\n )(x if x is not None else inputs)\n x = tf.keras.layers.Dense(in_shape, name=\"dense_\" + str(i) +\".2\", activation=tf.nn.relu)(\n x\n )\n x = tf.keras.layers.Add()([inputs, x])\n x = tf.keras.layers.ReLU()(x)\n x = tf.keras.layers.BatchNormalization()(x)\n out_layer = tf.keras.layers.Dense(num_outputs, name=\"out\", activation=None)(\n x\n )\n value_layer = tf.keras.layers.Dense(1, name=\"value\", activation=None)(x)\n return tf.keras.Model(inputs, [out_layer, value_layer], name=name)\n\n\ndef conv_dense_model(in_shape, num_outputs, name):\n\n if len(in_shape) == 2:\n in_shape = in_shape + (1,)\n inputs = tf.keras.Input(shape=in_shape , name=\"observations\")\n\n x = tf.keras.layers.Conv2D(64, 4, name=\"conv_1\")(inputs)\n x = tf.keras.layers.Conv2D(64, 2, name=\"conv_2\")(x)\n x = tf.keras.layers.Conv2D(64, 2, name=\"conv_3\")(x)\n x = tf.keras.layers.Flatten()(x)\n x = tf.keras.layers.Dense(64, name=\"dense_1\",activation=tf.nn.relu)(x)\n out_layer = tf.keras.layers.Dense(num_outputs, name=\"out\", activation=None)(x)\n value_layer = tf.keras.layers.Dense(1, name=\"value\", activation=None)(x)\n return tf.keras.Model(inputs, [out_layer, value_layer], name=name) \n\n\ndef conv_dense_model_connect3(in_shape,num_outputs,name):\n if len(in_shape) == 2:\n in_shape = in_shape + (1,)\n inputs = tf.keras.Input(shape=in_shape , name=\"observations\")\n\n x = tf.keras.layers.Conv2D(64, 3, name=\"conv_1\")(inputs)\n x = tf.keras.layers.Conv2D(64, 2, name=\"conv_2\")(x)\n x = tf.keras.layers.Flatten()(x)\n x = tf.keras.layers.Dense(64, name=\"dense_1\",activation=tf.nn.relu)(x)\n out_layer = tf.keras.layers.Dense(num_outputs, name=\"out\", activation=None)(x)\n value_layer = tf.keras.layers.Dense(1, name=\"value\", activation=None)(x)\n return tf.keras.Model(inputs, [out_layer, value_layer], name=name) \n\ndef dense_q_model(in_shape, hidden_shape, num_outputs, name):\n inputs = tf.keras.layers.Input(shape=(in_shape,), name=\"observations\")\n hidden_layer = tf.keras.layers.Dense(\n hidden_shape, name=\"layer1\", activation=tf.nn.relu\n )(inputs)\n out_layer = tf.keras.layers.Dense(num_outputs, name=\"out\", activation=None)(\n hidden_layer\n )\n return tf.keras.Model(inputs, out_layer, name=name)\n\n\nif __name__ == \"__main__\":\n # model = res_net_model(42, [256,128,64], 7, \"res_model\")\n # model = dense_model(42, [256,128,64], 7, \"dense_block\")\n # model.summary()\n model = conv_dense_model((7,6,1),7,\"conv_dense_model\")\n tf.keras.utils.plot_model(model, \"conv_dense_model.png\", True)\n" ]
[ [ "tensorflow.keras.layers.Flatten", "tensorflow.keras.utils.plot_model", "tensorflow.keras.layers.ReLU", "tensorflow.keras.Model", "tensorflow.keras.layers.Input", "tensorflow.keras.layers.Add", "tensorflow.keras.layers.BatchNormalization", "tensorflow.keras.layers.Dense", "tensorflow.keras.layers.Conv2D", "tensorflow.keras.Input" ] ]
kcyu2014/nas-landmarkreg
[ "a00c3619bf4042e446e1919087f0b09fe9fa3a65" ]
[ "utils_nvidia.py" ]
[ "import argparse\nimport os\nimport shutil\nimport time\nimport math\nimport logging\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.parallel\nimport torch.backends.cudnn as cudnn\nimport torch.distributed as dist\nimport torch.optim\nimport torch.utils.data\nimport torch.utils.data.distributed\nimport torchvision.transforms as transforms\nimport torchvision.datasets as datasets\nimport torchvision.models as models\nimport numpy as np\n\ntry:\n from apex.parallel import DistributedDataParallel as DDP\n from apex.fp16_utils import *\n from apex import amp, optimizers\n from apex.multi_tensor_apply import multi_tensor_applier\nexcept ImportError:\n raise ImportError(\"Please install apex from https://www.github.com/nvidia/apex to run this example.\")\n\n\ntry:\n from nvidia.dali.plugin.pytorch import DALIClassificationIterator\n from nvidia.dali.pipeline import Pipeline\n import nvidia.dali.ops as ops\n import nvidia.dali.types as types\nexcept ImportError:\n raise ImportError(\"Please install DALI from https://www.github.com/NVIDIA/DALI to run this example.\")\n\nfrom nasws.cnn.utils import AverageMeter\nfrom utils import accuracy\n\n\n# item() is a recent addition, so this helps with backward compatibility.\ndef to_python_float(t):\n if hasattr(t, 'item'):\n return t.item()\n else:\n return t[0]\n\n\nclass HybridTrainPipe(Pipeline):\n def __init__(self, batch_size, num_threads, device_id, data_dir, crop,\n shard_id, num_shards, dali_cpu=False, args=None,\n file_list=None\n ):\n \n super(HybridTrainPipe, self).__init__(batch_size,\n num_threads,\n device_id,\n seed=12 + device_id)\n \n self.input = ops.FileReader(file_root=data_dir,\n shard_id=args.apex_local_rank,\n num_shards=args.world_size,\n random_shuffle=True,\n pad_last_batch=True,\n file_list=file_list)\n #let user decide which pipeline works him bets for RN version he runs\n dali_device = 'cpu' if dali_cpu else 'gpu'\n decoder_device = 'cpu' if dali_cpu else 'mixed'\n # This padding sets the size of the internal nvJPEG buffers to be able to handle all images from full-sized ImageNet\n # without additional reallocations\n device_memory_padding = 211025920 if decoder_device == 'mixed' else 0\n host_memory_padding = 140544512 if decoder_device == 'mixed' else 0\n self.decode = ops.ImageDecoderRandomCrop(device=decoder_device, output_type=types.RGB,\n device_memory_padding=device_memory_padding,\n host_memory_padding=host_memory_padding,\n random_aspect_ratio=[0.8, 1.25],\n random_area=[0.1, 1.0],\n num_attempts=100)\n self.res = ops.Resize(device=dali_device,\n resize_x=crop,\n resize_y=crop,\n interp_type=types.INTERP_TRIANGULAR)\n self.cmnp = ops.CropMirrorNormalize(device=\"gpu\",\n output_dtype=types.FLOAT,\n output_layout=types.NCHW,\n crop=(crop, crop),\n image_type=types.RGB,\n mean=[0.485 * 255,0.456 * 255,0.406 * 255],\n std=[0.229 * 255,0.224 * 255,0.225 * 255])\n self.coin = ops.CoinFlip(probability=0.5)\n logging.info('DALI \"{0}\" variant'.format(dali_device))\n\n def define_graph(self):\n rng = self.coin()\n self.jpegs, self.labels = self.input(name=\"Reader\")\n images = self.decode(self.jpegs)\n images = self.res(images)\n output = self.cmnp(images.gpu(), mirror=rng)\n return [output, self.labels]\n\n\nclass HybridValPipe(Pipeline):\n def __init__(self, batch_size, num_threads, device_id, data_dir, crop,\n size, shard_id, num_shards, args=None):\n super(HybridValPipe, self).__init__(batch_size,\n num_threads,\n device_id,\n seed=12 + device_id)\n self.input = ops.FileReader(file_root=data_dir,\n shard_id=args.apex_local_rank,\n num_shards=args.world_size,\n random_shuffle=False,\n pad_last_batch=True)\n self.decode = ops.ImageDecoder(device=\"mixed\", output_type=types.RGB)\n self.res = ops.Resize(device=\"gpu\",\n resize_shorter=size,\n interp_type=types.INTERP_TRIANGULAR)\n self.cmnp = ops.CropMirrorNormalize(device=\"gpu\",\n output_dtype=types.FLOAT,\n output_layout=types.NCHW,\n crop=(crop, crop),\n image_type=types.RGB,\n mean=[0.485 * 255,0.456 * 255,0.406 * 255],\n std=[0.229 * 255,0.224 * 255,0.225 * 255])\n\n def define_graph(self):\n self.jpegs, self.labels = self.input(name=\"Reader\")\n images = self.decode(self.jpegs)\n images = self.res(images)\n output = self.cmnp(images)\n return [output, self.labels]\n\n\ndef fast_collate(batch, memory_format):\n\n imgs = [img[0] for img in batch]\n targets = torch.tensor([target[1] for target in batch], dtype=torch.int64)\n w = imgs[0].size()[1]\n h = imgs[0].size()[2]\n # print(imgs[0].size())\n tensor = torch.zeros( (len(imgs), 3, h, w), dtype=torch.uint8).contiguous(memory_format=memory_format)\n for i, img in enumerate(imgs):\n nump_array = np.asarray(img, dtype=np.uint8)\n if(nump_array.ndim < 3):\n nump_array = np.expand_dims(nump_array, axis=-1)\n # nump_array = np.rollaxis(nump_array, 2)\n # print(nump_array.shape)\n tensor[i] += torch.from_numpy(nump_array)\n return tensor, targets\n\n\nclass data_prefetcher():\n def __init__(self, loader):\n self.loader = iter(loader)\n self.stream = torch.cuda.Stream()\n self.mean = torch.tensor([0.485 * 255, 0.456 * 255, 0.406 * 255]).cuda().view(1,3,1,1)\n self.std = torch.tensor([0.229 * 255, 0.224 * 255, 0.225 * 255]).cuda().view(1,3,1,1)\n # With Amp, it isn't necessary to manually convert data to half.\n # if args.fp16:\n # self.mean = self.mean.half()\n # self.std = self.std.half()\n self.preload()\n\n def preload(self):\n try:\n self.next_input, self.next_target = next(self.loader)\n except StopIteration:\n self.next_input = None\n self.next_target = None\n return\n # if record_stream() doesn't work, another option is to make sure device inputs are created\n # on the main stream.\n # self.next_input_gpu = torch.empty_like(self.next_input, device='cuda')\n # self.next_target_gpu = torch.empty_like(self.next_target, device='cuda')\n # Need to make sure the memory allocated for next_* is not still in use by the main stream\n # at the time we start copying to next_*:\n # self.stream.wait_stream(torch.cuda.current_stream())\n with torch.cuda.stream(self.stream):\n self.next_input = self.next_input.cuda(non_blocking=True)\n self.next_target = self.next_target.cuda(non_blocking=True)\n # more code for the alternative if record_stream() doesn't work:\n # copy_ will record the use of the pinned source tensor in this side stream.\n # self.next_input_gpu.copy_(self.next_input, non_blocking=True)\n # self.next_target_gpu.copy_(self.next_target, non_blocking=True)\n # self.next_input = self.next_input_gpu\n # self.next_target = self.next_target_gpu\n\n # With Amp, it isn't necessary to manually convert data to half.\n # if args.fp16:\n # self.next_input = self.next_input.half()\n # else:\n self.next_input = self.next_input.float()\n self.next_input = self.next_input.sub_(self.mean).div_(self.std)\n\n def next(self):\n torch.cuda.current_stream().wait_stream(self.stream)\n input = self.next_input\n target = self.next_target\n if input is not None:\n input.record_stream(torch.cuda.current_stream())\n if target is not None:\n target.record_stream(torch.cuda.current_stream())\n self.preload()\n return input, target\n\n\ndef reduce_tensor(tensor, world_size):\n rt = tensor.clone()\n dist.all_reduce(rt, op=dist.reduce_op.SUM)\n rt /= world_size\n return rt\n\n\n\ndef adjust_learning_rate(optimizer, epoch, step, len_epoch, args):\n \"\"\"LR schedule that should yield 76% converged accuracy with batch size 256\"\"\"\n factor = epoch // 30\n\n if epoch >= 80:\n factor = factor + 1\n\n lr = args.learning_rate*(0.1**factor)\n\n \"\"\"Warmup\"\"\"\n if epoch < 5:\n lr = lr*float(1 + step + epoch*len_epoch)/(5.*len_epoch)\n\n # if(args.apex_local_rank == 0):\n # print(\"epoch = {}, step = {}, lr = {}\".format(epoch, step, lr))\n\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n\n \n# def adjust_learning_rate(optimizer, epoch, args):\n# # Smaller slope for the last 5 epochs because lr * 1/250 is relatively large\n# if args.epochs - epoch > 5:\n# lr = args.learning_rate * (args.epochs - 5 - epoch) / (args.epochs - 5)\n# else:\n# lr = args.learning_rate * (args.epochs - epoch) / ((args.epochs - 5) * 5)\n# for param_group in optimizer.param_groups:\n# param_group['lr'] = lr\n# return lr \n\n\ndef train(train_loader, model, criterion, optimizer, epoch, args):\n batch_time = AverageMeter()\n losses = AverageMeter()\n top1 = AverageMeter()\n top5 = AverageMeter()\n\n # switch to train mode\n model.train()\n end = time.time()\n prefetcher = data_prefetcher(train_loader)\n input, target = prefetcher.next()\n i = 0\n while input is not None:\n i += 1\n if args.apex_profiling >= 0 and i == args.apex_profiling:\n print(\"Profiling begun at iteration {}\".format(i))\n torch.cuda.cudart().cudaProfilerStart()\n\n if args.apex_profiling >= 0: torch.cuda.nvtx.range_push(\"Body of iteration {}\".format(i))\n\n adjust_learning_rate(optimizer, epoch, i, len(train_loader), args)\n\n # compute output\n if args.apex_profiling >= 0: torch.cuda.nvtx.range_push(\"forward\")\n logits, logtis_aux = model(input)\n if args.apex_profiling >= 0: torch.cuda.nvtx.range_pop()\n loss = criterion(logits, target)\n if args.auxiliary:\n loss_aux = criterion(logtis_aux, target)\n loss += args.auxiliary_weight * loss_aux\n\n # compute gradient and do SGD step\n optimizer.zero_grad()\n\n if args.apex_profiling >= 0: torch.cuda.nvtx.range_push(\"backward\")\n with amp.scale_loss(loss, optimizer) as scaled_loss:\n scaled_loss.backward()\n if args.apex_profiling >= 0: torch.cuda.nvtx.range_pop()\n\n # for param in model.parameters():\n # print(param.data.double().sum().item(), param.grad.data.double().sum().item())\n\n if args.apex_profiling >= 0: torch.cuda.nvtx.range_push(\"optimizer.step()\")\n optimizer.step()\n if args.apex_profiling >= 0: torch.cuda.nvtx.range_pop()\n\n if i%args.report_freq == 0:\n # Every report_freq iterations, check the loss, accuracy, and speed.\n # For best performance, it doesn't make sense to print these metrics every\n # iteration, since they incur an allreduce and some host<->device syncs.\n\n # Measure accuracy\n prec1, prec5 = accuracy(logits.data, target, topk=(1, 5))\n\n # Average loss and accuracy across processes for logging\n if args.distributed:\n reduced_loss = reduce_tensor(loss.data, args.world_size)\n prec1 = reduce_tensor(prec1, args.world_size)\n prec5 = reduce_tensor(prec5, args.world_size)\n else:\n reduced_loss = loss.data\n\n # to_python_float incurs a host<->device sync\n losses.update(to_python_float(reduced_loss), input.size(0))\n top1.update(to_python_float(prec1), input.size(0))\n top5.update(to_python_float(prec5), input.size(0))\n\n torch.cuda.synchronize()\n batch_time.update((time.time() - end)/args.report_freq)\n end = time.time()\n\n if args.apex_local_rank == 0:\n logging.info('Epoch: [{0}][{1}/{2}]\\t'\n 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t'\n 'Speed {3:.3f} ({4:.3f})\\t'\n 'Loss {loss.val:.10f} ({loss.avg:.4f})\\t'\n 'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\\t'\n 'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(\n epoch, i, len(train_loader),\n args.world_size*args.batch_size/batch_time.val,\n args.world_size*args.batch_size/batch_time.avg,\n batch_time=batch_time,\n loss=losses, top1=top1, top5=top5))\n if args.apex_profiling >= 0: torch.cuda.nvtx.range_push(\"prefetcher.next()\")\n input, target = prefetcher.next()\n if args.apex_profiling >= 0: torch.cuda.nvtx.range_pop()\n\n # Pop range \"Body of iteration {}\".format(i)\n if args.apex_profiling >= 0: torch.cuda.nvtx.range_pop()\n\n if args.apex_profiling >= 0 and i == args.apex_profiling + 10:\n print(\"Profiling ended at iteration {}\".format(i))\n torch.cuda.cudart().cudaProfilerStop()\n quit()\n return top1.avg, losses.avg\n\n\ndef validate(val_loader, model, criterion, args):\n batch_time = AverageMeter()\n losses = AverageMeter()\n top1 = AverageMeter()\n top5 = AverageMeter()\n\n # switch to evaluate mode\n model.eval()\n\n end = time.time()\n\n prefetcher = data_prefetcher(val_loader)\n input, target = prefetcher.next()\n i = 0\n while input is not None:\n i += 1\n\n # compute output\n with torch.no_grad():\n output, _ = model(input)\n loss = criterion(output, target)\n\n # measure accuracy and record loss\n prec1, prec5 = accuracy(output.data, target, topk=(1, 5))\n\n if args.distributed:\n reduced_loss = reduce_tensor(loss.data, args.world_size)\n prec1 = reduce_tensor(prec1, args.world_size)\n prec5 = reduce_tensor(prec5, args.world_size)\n else:\n reduced_loss = loss.data\n\n losses.update(to_python_float(reduced_loss), input.size(0))\n top1.update(to_python_float(prec1), input.size(0))\n top5.update(to_python_float(prec5), input.size(0))\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n # TODO: Change timings to mirror train().\n if args.apex_local_rank == 0 and i % args.report_freq == 0:\n logging.info('Test: [{0}/{1}]\\t'\n 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t'\n 'Speed {2:.3f} ({3:.3f})\\t'\n 'Loss {loss.val:.4f} ({loss.avg:.4f})\\t'\n 'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\\t'\n 'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(\n i, len(val_loader),\n args.world_size * args.batch_size / batch_time.val,\n args.world_size * args.batch_size / batch_time.avg,\n batch_time=batch_time, loss=losses,\n top1=top1, top5=top5))\n\n input, target = prefetcher.next()\n\n logging.info(' * Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f}'\n .format(top1=top1, top5=top5))\n\n return top1.avg, top5.avg, losses.avg\n\n\ndef dali_apex_train(train_loader, model, criterion, optimizer, epoch, args):\n batch_time = AverageMeter()\n losses = AverageMeter()\n top1 = AverageMeter()\n top5 = AverageMeter()\n\n # switch to train mode\n model.train()\n end = time.time()\n\n for i, data in enumerate(train_loader):\n input = data[0][\"data\"]\n target = data[0][\"label\"].squeeze().cuda().long()\n train_loader_len = int(math.ceil(train_loader._size / args.batch_size))\n\n if args.dali_profiling >= 0 and i == args.dali_profiling:\n print(\"Profiling begun at iteration {}\".format(i))\n torch.cuda.cudart().cudaProfilerStart()\n\n if args.dali_profiling >= 0: torch.cuda.nvtx.range_push(\"Body of iteration {}\".format(i))\n\n # adjust_learning_rate(optimizer, epoch, i, train_loader_len, args)\n if args.debug:\n if i > 10:\n logging.info('Break in debug mode after 10 batchs...')\n break\n\n # compute output\n if args.dali_profiling >= 0: torch.cuda.nvtx.range_push(\"forward\")\n logits, logtis_aux = model(input)\n if args.dali_profiling >= 0: torch.cuda.nvtx.range_pop()\n loss = criterion(logits, target)\n if args.auxiliary:\n loss_aux = criterion(logtis_aux, target)\n loss += args.auxiliary_weight * loss_aux\n\n # compute gradient and do SGD step\n optimizer.zero_grad()\n\n if args.dali_profiling >= 0: torch.cuda.nvtx.range_push(\"backward\")\n if args.apex_opt_level is not None:\n with amp.scale_loss(loss, optimizer) as scaled_loss:\n scaled_loss.backward()\n else:\n loss.backward()\n if args.dali_profiling >= 0: torch.cuda.nvtx.range_pop()\n\n if args.dali_profiling >= 0: torch.cuda.nvtx.range_push(\"optimizer.step()\")\n optimizer.step()\n if args.dali_profiling >= 0: torch.cuda.nvtx.range_pop()\n\n if i%args.report_freq == 0:\n # Every print_freq iterations, check the loss, accuracy, and speed.\n # For best performance, it doesn't make sense to print these metrics every\n # iteration, since they incur an allreduce and some host<->device syncs.\n\n # Measure accuracy\n prec1, prec5 = accuracy(logits.data, target, topk=(1, 5))\n\n # Average loss and accuracy across processes for logging\n if args.distributed:\n reduced_loss = reduce_tensor(loss.data, args.world_size)\n prec1 = reduce_tensor(prec1, args.world_size)\n prec5 = reduce_tensor(prec5, args.world_size)\n else:\n reduced_loss = loss.data\n\n # to_python_float incurs a host<->device sync\n losses.update(to_python_float(reduced_loss), input.size(0))\n top1.update(to_python_float(prec1), input.size(0))\n top5.update(to_python_float(prec5), input.size(0))\n\n torch.cuda.synchronize()\n batch_time.update((time.time() - end)/args.report_freq)\n end = time.time()\n\n if args.apex_local_rank == 0:\n logging.info('Epoch: [{0}][{1}/{2}]\\t'\n 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t'\n 'Speed {3:.3f} ({4:.3f})\\t'\n 'Loss {loss.val:.10f} ({loss.avg:.4f})\\t'\n 'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\\t'\n 'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(\n epoch, i, train_loader_len,\n args.world_size*args.batch_size/batch_time.val,\n args.world_size*args.batch_size/batch_time.avg,\n batch_time=batch_time,\n loss=losses, top1=top1, top5=top5))\n\n # Pop range \"Body of iteration {}\".format(i)\n if args.dali_profiling >= 0: torch.cuda.nvtx.range_pop()\n\n if args.dali_profiling >= 0 and i == args.dali_profiling + 2:\n print(\"Profiling ended at iteration {}\".format(i))\n torch.cuda.cudart().cudaProfilerStop()\n quit()\n\n return top1.avg, losses.avg\n\n\n\ndef dali_validate(val_loader, model, criterion, args):\n batch_time = AverageMeter()\n losses = AverageMeter()\n top1 = AverageMeter()\n top5 = AverageMeter()\n\n # switch to evaluate mode\n model.eval()\n\n end = time.time()\n\n for i, data in enumerate(val_loader):\n input = data[0][\"data\"]\n target = data[0][\"label\"].squeeze().cuda().long()\n val_loader_len = int(val_loader._size / args.batch_size)\n\n if args.debug:\n if i > 10:\n break\n\n # compute output\n with torch.no_grad():\n output, _ = model(input)\n loss = criterion(output, target)\n\n # measure accuracy and record loss\n prec1, prec5 = accuracy(output.data, target, topk=(1, 5))\n\n if args.distributed:\n reduced_loss = reduce_tensor(loss.data, args.world_size)\n prec1 = reduce_tensor(prec1, args.world_size)\n prec5 = reduce_tensor(prec5, args.world_size)\n else:\n reduced_loss = loss.data\n\n losses.update(to_python_float(reduced_loss), input.size(0))\n top1.update(to_python_float(prec1), input.size(0))\n top5.update(to_python_float(prec5), input.size(0))\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n # TODO: Change timings to mirror train().\n if args.apex_local_rank == 0 and i % args.report_freq == 0:\n logging.info('Test: [{0}/{1}]\\t'\n 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t'\n 'Speed {2:.3f} ({3:.3f})\\t'\n 'Loss {loss.val:.4f} ({loss.avg:.4f})\\t'\n 'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\\t'\n 'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(\n i, val_loader_len,\n args.world_size * args.batch_size / batch_time.val,\n args.world_size * args.batch_size / batch_time.avg,\n batch_time=batch_time, loss=losses,\n top1=top1, top5=top5))\n\n logging.info(' * Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f}'\n .format(top1=top1, top5=top5))\n\n return top1.avg, top5.avg, losses.avg\n" ]
[ [ "torch.cuda.cudart", "torch.no_grad", "torch.tensor", "numpy.asarray", "torch.cuda.synchronize", "torch.from_numpy", "torch.distributed.all_reduce", "numpy.expand_dims", "torch.cuda.nvtx.range_push", "torch.cuda.current_stream", "torch.cuda.stream", "torch.cuda.Stream", "torch.cuda.nvtx.range_pop" ] ]
pygongnlp/gramcorrector
[ "1b5b7f46f7185675b46341e40b2a866fd6d1d7ad" ]
[ "sec/test.py" ]
[ "import argparse\nimport torch\nimport os\n\nfrom transformers import AutoTokenizer, AutoModelForMaskedLM\nfrom utils import load_data, write_to_file\nfrom metric import compute_metrics\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--model_name_or_path\", default=\"model/chinese_bert\", type=str)\n parser.add_argument(\"--save_path\", default=\"./\", type=str)\n parser.add_argument(\"--test_file\", default=\"data/sighan/test.json\", type=str)\n args = parser.parse_args()\n\n assert os.path.exists(args.save_path)\n\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path)\n model = AutoModelForMaskedLM.from_pretrained(args.model_name_or_path)\n\n checkpoint = torch.load(os.path.join(args.save_path, \"model.tar\"), map_location=device)\n model.load_state_dict(checkpoint[\"model_state_dict\"])\n model = model.to(device)\n\n src, trg = load_data(file_path=args.test_file, mode=\"test\")\n\n results = []\n for s, t in zip(src, trg):\n inputs = tokenizer(t, return_tensors=\"pt\")\n inputs = inputs.to(device)\n outputs = model(**inputs)\n\n logits = outputs.logits[0][1:-1] #filter [CLS] & [SEP]\n predict = tokenizer.convert_ids_to_tokens(logits.argmax(-1).tolist())\n\n s_tok = tokenizer.tokenize(s)\n t_tok = tokenizer.tokenize(t)\n assert len(s_tok) == len(t_tok) == len(predict)\n results.append([s_tok, t_tok, predict])\n\n metrics = compute_metrics(results)\n print(f\"{', '.join([f'{key}={value:.4f}' for key, value in metrics.items()])}\")\n\n write_to_file(file_path=os.path.join(args.save_path, \"result_test.json\"), results=results)\n print(f\"write to {os.path.join(args.save_path, 'result_test.json')}\")\n\n\n\n\n\n\n\n\n\n" ]
[ [ "torch.cuda.is_available" ] ]
NKPmedia/rising
[ "2a580e9c74c8fb690e27e8bacf09ab97184ab1ee" ]
[ "rising/transforms/spatial.py" ]
[ "# from __future__ import annotations\nimport torch\n\nfrom itertools import combinations\nfrom typing import Union, Sequence, Callable, Optional\nfrom torch.multiprocessing import Value\n\nfrom rising.random import AbstractParameter, DiscreteParameter\nfrom rising.transforms.abstract import AbstractTransform, BaseTransform\nfrom rising.transforms.functional.spatial import *\n\n\n__all__ = [\"Mirror\", \"Rot90\", \"ResizeNative\",\n \"Zoom\", \"ProgressiveResize\", \"SizeStepScheduler\"]\n\nscheduler_type = Callable[[int], Union[int, Sequence[int]]]\n\n\nclass Mirror(BaseTransform):\n \"\"\"Random mirror transform\"\"\"\n\n def __init__(self,\n dims: Union[int, DiscreteParameter,\n Sequence[Union[int, DiscreteParameter]]],\n keys: Sequence[str] = ('data',), grad: bool = False, **kwargs):\n \"\"\"\n Args:\n dims: axes which should be mirrored\n keys: keys which should be mirrored\n prob: probability for mirror. If float value is provided,\n it is used for all dims\n grad: enable gradient computation inside transformation\n **kwargs: keyword arguments passed to superclass\n\n Examples:\n >>> # Use mirror transform for augmentations\n >>> from rising.random import DiscreteCombinationsParameter\n >>> # We sample from all possible mirror combination for\n >>> # volumetric data\n >>> trafo = Mirror(DiscreteCombinationsParameter((0, 1, 2)))\n \"\"\"\n super().__init__(augment_fn=mirror, dims=dims, keys=keys, grad=grad,\n property_names=('dims',), **kwargs)\n\n\nclass Rot90(AbstractTransform):\n \"\"\"Rotate 90 degree around dims\"\"\"\n\n def __init__(self, dims: Union[Sequence[int], DiscreteParameter],\n keys: Sequence[str] = ('data',),\n num_rots: Sequence[int] = (0, 1, 2, 3),\n prob: float = 0.5, grad: bool = False, **kwargs):\n \"\"\"\n Args:\n dims: dims/axis ro rotate. If more than two dims are\n provided, 2 dimensions are randomly chosen at each call\n keys: keys which should be rotated\n num_rots: possible values for number of rotations\n prob: probability for rotation\n grad: enable gradient computation inside transformation\n kwargs: keyword arguments passed to superclass\n\n See Also:\n :func:`torch.Tensor.rot90`\n \"\"\"\n super().__init__(grad=grad, **kwargs)\n self.keys = keys\n self.prob = prob\n if not isinstance(dims, DiscreteParameter):\n if len(dims) > 2:\n dims = list(combinations(dims, 2))\n else:\n dims = (dims,)\n dims = DiscreteParameter(dims)\n self.register_sampler(\"dims\", dims)\n self.register_sampler(\"num_rots\", DiscreteParameter(num_rots))\n\n def forward(self, **data) -> dict:\n \"\"\"\n Apply transformation\n\n Args:\n data: dict with tensors\n\n Returns:\n dict: dict with augmented data\n \"\"\"\n if torch.rand(1) < self.prob:\n num_rots = self.num_rots\n rand_dims = self.dims\n\n for key in self.keys:\n data[key] = rot90(data[key], k=num_rots, dims=rand_dims)\n return data\n\n\nclass ResizeNative(BaseTransform):\n \"\"\"Resize data to given size\"\"\"\n\n def __init__(self, size: Union[int, Sequence[int]], mode: str = 'nearest',\n align_corners: Optional[bool] = None, preserve_range: bool = False,\n keys: Sequence = ('data',), grad: bool = False, **kwargs):\n \"\"\"\n Args:\n size: spatial output size (excluding batch size and\n number of channels)\n mode: one of ``nearest``, ``linear``, ``bilinear``, ``bicubic``,\n ``trilinear``, ``area`` (for more inforamtion see\n :func:`torch.nn.functional.interpolate`)\n align_corners: input and output tensors are aligned by the center \\\n points of their corners pixels, preserving the values at the\n corner pixels.\n preserve_range: output tensor has same range as input tensor\n keys: keys which should be augmented\n grad: enable gradient computation inside transformation\n **kwargs: keyword arguments passed to augment_fn\n \"\"\"\n super().__init__(augment_fn=resize_native, size=size, mode=mode,\n align_corners=align_corners, preserve_range=preserve_range,\n keys=keys, grad=grad, **kwargs)\n\n\nclass Zoom(BaseTransform):\n \"\"\"Apply augment_fn to keys. By default the scaling factor is sampled\n from a uniform distribution with the range specified by\n :attr:`random_args`\n \"\"\"\n\n def __init__(self, scale_factor: Union[Sequence, AbstractParameter] = (0.75, 1.25),\n mode: str = 'nearest', align_corners: bool = None,\n preserve_range: bool = False, keys: Sequence = ('data',),\n grad: bool = False, **kwargs):\n \"\"\"\n Args:\n scale_factor: positional arguments passed for random function.\n If Sequence[Sequence] is provided, a random value for each item\n in the outer Sequence is generated. This can be used to set\n different ranges for different axis.\n mode: one of `nearest`, `linear`, `bilinear`,\n `bicubic`, `trilinear`, `area` (for more\n inforamtion see :func:`torch.nn.functional.interpolate`)\n align_corners: input and output tensors are aligned by the center\n points of their corners pixels, preserving the values at the\n corner pixels.\n preserve_range: output tensor has same range as input tensor\n keys: keys which should be augmented\n grad: enable gradient computation inside transformation\n **kwargs: keyword arguments passed to augment_fn\n\n See Also:\n :func:`random.uniform`, :func:`torch.nn.functional.interpolate`\n \"\"\"\n super().__init__(augment_fn=resize_native, scale_factor=scale_factor,\n mode=mode, align_corners=align_corners,\n preserve_range=preserve_range, keys=keys, grad=grad,\n property_names=('scale_factor',), **kwargs)\n\n\nclass ProgressiveResize(ResizeNative):\n \"\"\"Resize data to sizes specified by scheduler\"\"\"\n\n def __init__(self, scheduler: scheduler_type, mode: str = 'nearest',\n align_corners: bool = None, preserve_range: bool = False,\n keys: Sequence = ('data',), grad: bool = False, **kwargs):\n \"\"\"\n Args:\n scheduler: scheduler which determined the current size.\n The scheduler is called with the current iteration of the\n transform\n mode: one of ``nearest``, ``linear``, ``bilinear``, ``bicubic``,\n ``trilinear``, ``area`` (for more inforamtion see\n :func:`torch.nn.functional.interpolate`)\n align_corners: input and output tensors are aligned by the center\n points of their corners pixels, preserving the values at the\n corner pixels.\n preserve_range: output tensor has same range as input tensor\n keys: keys which should be augmented\n grad: enable gradient computation inside transformation\n **kwargs: keyword arguments passed to augment_fn\n\n Warnings:\n When this transformations is used in combination with\n multiprocessing, the step counter is not perfectly synchronized\n between multiple processes.\n As a result the step count my jump between values\n in a range of the number of processes used.\n \"\"\"\n super().__init__(size=0, mode=mode, align_corners=align_corners,\n preserve_range=preserve_range,\n keys=keys, grad=grad, **kwargs)\n self.scheduler = scheduler\n self._step = Value('i', 0)\n\n def reset_step(self) -> ResizeNative:\n \"\"\"\n Reset step to 0\n\n Returns:\n ResizeNative: returns self to allow chaining\n \"\"\"\n with self._step.get_lock():\n self._step.value = 0\n return self\n\n def increment(self) -> ResizeNative:\n \"\"\"\n Increment step by 1\n\n Returns:\n ResizeNative: returns self to allow chaining\n \"\"\"\n with self._step.get_lock():\n self._step.value += 1\n return self\n\n @property\n def step(self) -> int:\n \"\"\"\n Current step\n\n Returns:\n int: number of steps\n \"\"\"\n return self._step.value\n\n def forward(self, **data) -> dict:\n \"\"\"\n Resize data\n\n Args:\n **data: input batch\n\n Returns:\n dict: augmented batch\n \"\"\"\n self.kwargs[\"size\"] = self.scheduler(self.step)\n self.increment()\n return super().forward(**data)\n\n\nclass SizeStepScheduler:\n \"\"\"Scheduler return size when milestone is reached\"\"\"\n\n def __init__(self, milestones: Sequence[int],\n sizes: Union[Sequence[int], Sequence[Sequence[int]]]):\n \"\"\"\n Args:\n milestones: contains number of iterations where size should be changed\n sizes: sizes corresponding to milestones\n \"\"\"\n if len(milestones) != len(sizes) - 1:\n raise TypeError(\"Sizes must include initial size and thus \"\n \"has one element more than miltstones.\")\n self.targets = sorted(zip((0, *milestones), sizes), key=lambda x: x[0], reverse=True)\n\n def __call__(self, step) -> Union[int, Sequence[int], Sequence[Sequence[int]]]:\n \"\"\"\n Return size with regard to milestones\n\n Args:\n step: current step\n\n Returns:\n Union[int, Sequence[int], Sequence[Sequence[int]]]: current size\n \"\"\"\n for t in self.targets:\n if step >= t[0]:\n return t[1]\n return self.targets[-1][1]\n" ]
[ [ "torch.rand", "torch.multiprocessing.Value" ] ]
sai6kiran/TwitterBotFarms
[ "cf6bfddda9fac1e27477186fd4f4b086ac711781" ]
[ "kCoreBots/CoreBotEN/MachineLearning/NaiveBayes/PythonScripts/NaiveBayesClassifier.py" ]
[ "from contractionsDict import contractionsDict\nimport pandas as pd\nimport time\nimport numpy as np\nimport re\nfrom pattern.en import pluralize, singularize\nimport sys\nimport csv\nfrom LemmitizationandStemConverter import ObtainStemAndLemmatizationWord\n\ndef priorProb(scv):\n\tpct = 0\t#positive count total\n\tnct = 0\t#negative count total\n\tNct = 0\t#neutral count total\n\tntt = 0\t#no. training tweets\n\tfor index, row in scv.items():\n\t\t#print(row)\n\t\tif(row.lower() == 'positive'):\n\t\t\tpct+=1\n\t\tif(row.lower() == 'negative'):\n\t\t\tnct+=1\n\t\tif(row.lower() == 'neutral'):\n\t\t\tNct+=1\n\t\tntt+=1\n\tpc1 = pct/ntt\t#Postive Class 1\n\tnc2 = nct/ntt\t#Negative Class 2\n\tnc3 = Nct/ntt\t#Neutral Class 3\n\treturn((pc1, nc2, nc3))\n\ndef removeEmojis(txt):\n\temoji_pattern = re.compile(u\"[^\\U00000000-\\U0000d7ff\\U0000e000-\\U0000ffff]\", flags=re.UNICODE)\n\treturn(emoji_pattern.sub(u' ', txt))\n\ndef expandContractions(s, contractionsDict=contractionsDict):\n\tcontractionsRe = re.compile('(%s)' % '|'.join(contractionsDict.keys()))\n\tdef replace(match):\n\t\treturn contractionsDict[match.group(0)]\n\treturn contractionsRe.sub(replace, s)\n\ndef CleanUp(text):\n\t#Removes links from tweet:\n\ttext = re.sub('http://\\S+|https://\\S+', ' ', text)\n\n\t#Remove #, _, -, and @ from tweet:\n\ttext = text.replace(\"#\", \" \").replace(\"_\", \" \").replace(\"@\", \" \").replace(\"-\", \" \")\n\n\t#Replace ? with questionmark and ! with exclaimationmark:\n\ttext = text.replace(\"?\", \" questionmark\").replace(\"!\", \" exclaimationmark\")\n\n\t#Remove all other non alphanumeric special characters from tweet:\n\ttext = re.sub('\\W+ ',' ', text)\n\n\t#Removes whitespaces from tweet:\n\ttext = text.replace(\"\\t\", \" \").replace(\"\\n\", \" \")\n\ttext = re.sub(r' {2,}' , ' ', text)\n\n\t#Removes emojis from tweet:\n\ttext = removeEmojis(text)\n\n\treturn text\n\n\ndef likelihoodFunctionInformation(txt, ldf):\n\ttsv = 0\t#Total Sentiment Value\n\tnpw = 0\t\t#No. of positive words\n\tnnw = 0\t\t#No. negative words\n\tnNw = 0\t\t#No. of neutral words\n\n\tpsv = 0\t\t#Previous Word sentiment value\n\tnac = False\t#Negative conjuctive Adverb check\n\twrd = \" \"\t#Word to parse\n\tt3 = time.time()\n\tfor ewt in txt.split():\n\n\t\t#Check for all versions of word in Sentiment Dictionary:\n\t\t#print(ewt)\n\t\t#t1 = time.time()\n\t\tsll = ObtainStemAndLemmatizationWord(ewt) #Obtaining the noun version and root version of word using the function.\n\t\t#print(sll)\n\t\tif(sll[0]!=ewt):\n\t\t\tif(bool(sll[0] and sll[0].strip())==True):\t#Checing if the noun part of the word is in the Sentiment Dictionary.\n\t\t\t\tsnw = singularize(sll[0]) #Noun part of word in singular tense.\n\t\t\t\tpnw = pluralize(sll[0]) #Noun part of word in plural tense.\n\t\t\t\tsrw = singularize(sll[1]) #Root part of word in singular tense.\n\t\t\t\tprw = pluralize(sll[1]) #Root part of word in plural tense.\n\t\t\t\t#Check if singular part of noun of word is in the Sentiment Dictionary:\n\t\t\t\tif((snw in ldf[0].word.values) or (snw in ldf[1].word.values) or (snw in ldf[2].word.values) or (snw in ldf[3].word.values)):\n\t\t\t\t\twrd = snw\n\t\t\t\t#Check if plural part of noun of word is in the Sentiment Dictionary:\n\t\t\t\telif((pnw in ldf[0].word.values) or (pnw in ldf[1].word.values) or (pnw in ldf[2].word.values) or (pnw in ldf[3].word.values)):\n\t\t\t\t\twrd = pnw\n\t\t\t\t#Check if singular part of root of word is in the Sentiment Dictionary:\n\t\t\t\telif((srw in ldf[0].word.values) or (srw in ldf[1].word.values) or (srw in ldf[2].word.values) or (srw in ldf[3].word.values)):\n\t\t\t\t\twrd = srw\n\t\t\t\t#Check if plural part of root of word is in the Sentiment Dictionary:\n\t\t\t\telif((prw in ldf[0].word.values) or (prw in ldf[1].word.values) or (prw in ldf[2].word.values) or (prw in ldf[3].word.values)):\n\t\t\t\t\twrd = prw\n\t\t\t\telse:\n\t\t\t\t\twrd = ewt\n\t\t\telif(sll[1]!=ewt):\t#Checking if the root version of the word is in the Sentiment Dictionary.\n\t\t\t\tsrw = singularize(sll[1]) #Root part of word in singular tense.\n\t\t\t\tprw = pluralize(sll[1]) #Root part of word in plural tense.\n\t\t\t\t#Check if singular part of root of word is in the Sentiment Dictionary:\n\t\t\t\tif((srw in ldf[0].word.values) or (srw in ldf[1].word.values) or (srw in ldf[2].word.values) or (srw in ldf[3].word.values)):\n\t\t\t\t\twrd = srw\n\t\t\t\t#Check if plural part of root of word is in the Sentiment Dictionary:\n\t\t\t\telif((prw in ldf[0].word.values) or (prw in ldf[1].word.values) or (prw in ldf[2].word.values) or (prw in ldf[3].word.values)):\n\t\t\t\t\twrd = prw\n\t\t\t\telse:\n\t\t\t\t\twrd = ewt\n\t\t\telse:\n\t\t\t\twrd = ewt\n\t\telse:\n\t\t\twrd = ewt\n\n\t\twrd = ewt\n\n\t\t#Run the Likelihood Function Information on the word.\n\t\twsv = 0\t#Word Sentiment Value\n\t\tsfw = singularize(wrd)\t#Singular Form of Word\n\t\tpfw = pluralize(wrd)\t#Plural Form of Word\n\t\t#print(wrd, tsv)\t#Very Important Print Statement for Debugging\n\n\t\t#Checking if word matches a negative conjuctive adverb that forms different phrases in the tweet:\n\t\tif wrd.lower()=='not' or wrd.lower()=='but' or wrd.lower()=='however' or wrd.lower()=='instead' or wrd.lower()=='otherwise' or wrd.lower()=='contrarily':\n\t\t\tif(nac==False):\n\t\t\t\tnac=True\n\t\t\telse:\n\t\t\t\tnac=False\n\t\tif(nac==False):\n\t\t\t#Checking if words match special words\n\t\t\tif sfw.lower()=='maga':\n\t\t\t\tnpw += 100\n\t\t\t\ttsv += 100\n\t\t\telif sfw.lower()=='makeamericagreatagain':\n\t\t\t\tnpw += 100\n\t\t\t\ttsv += 100\n\t\t\telif sfw.lower()=='make america great again':\n\t\t\t\tnpw += 100\n\t\t\t\ttsv += 100\n\t\t\telif \"email\" in sfw.lower():\n\t\t\t\tnnw += 5\n\t\t\t\ttsv -= 5\n\t\t\telif wrd.lower()=='questionmark':\n\t\t\t\tif(psv>0):\n\t\t\t\t\tnnw += 10\n\t\t\t\t\ttsv -= 10\n\t\t\t\tif(psv<0):\n\t\t\t\t\tnpw += 10\n\t\t\t\t\ttsv += 10\n\t\t\t\tpsv = 0\n\t\t\telif wrd.lower()=='exclaimationmark':\n\t\t\t\tif(psv<0):\n\t\t\t\t\tnnw += 10\n\t\t\t\t\ttsv -= 10\n\t\t\t\tif(psv>0):\n\t\t\t\t\tnpw += 10\n\t\t\t\t\ttsv += 10\n\t\t\t\tpsv = 0\n\n\t\t\t#Checking if word exists in the Sentiment Dictionary. Assign sentiment value and/or category if word exists. Otherwise categorize word as neutral.\n\t\t\telif sfw.lower() in ldf[0].word.values:\t#Check if singular version of word is in dataframe1\n\t\t\t\twsv = int(ldf[0].iloc[ldf[0]['word'].loc[lambda x: x==sfw.lower()].index.tolist()[0]].sentiment)\n\t\t\t\t#print(ewt, sfw, 1, wsv, tsv)\n\t\t\t\tif(wsv>0):\n\t\t\t\t\tnpw += 1\n\t\t\t\telif(wsv<0):\n\t\t\t\t\tnnw += 1\n\t\t\t\ttsv += wsv\n\t\t\t\tpsv = wsv\n\t\t\telif pfw.lower() in ldf[0].word.values:\t#Check if plural version of word is in dataframe1\n\t\t\t\twsv = int(ldf[0].iloc[ldf[0]['word'].loc[lambda x: x==pfw.lower()].index.tolist()[0]].sentiment)\n\t\t\t\t#print(ewt, pfw, 1, wsv, tsv)\n\t\t\t\tif(wsv>0):\n\t\t\t\t\tnpw += 1\n\t\t\t\telif(wsv<0):\n\t\t\t\t\tnnw += 1\n\t\t\t\ttsv += wsv\n\t\t\t\tpsv = wsv\n\t\t\telif sfw.lower() in ldf[1].word.values:\t#Check if singular version of word is in dataframe2\n\t\t\t\t#print(ewt, sfw, 2)\n\t\t\t\twsv = int(ldf[1].iloc[ldf[1]['word'].loc[lambda x: x==sfw.lower()].index.tolist()[0]].sentiment)\n\t\t\t\tif(wsv>0):\n\t\t\t\t\tnpw += 1\n\t\t\t\telif(wsv<0):\n\t\t\t\t\tnnw += 1\n\t\t\t\ttsv += wsv\n\t\t\t\tpsv = wsv\n\t\t\telif pfw.lower() in ldf[1].word.values:\t#Check if plural version of word is in dataframe2\n\t\t\t\t#print(ewt, pfw, 2)\n\t\t\t\twsv = int(ldf[1].iloc[ldf[1]['word'].loc[lambda x: x==pfw.lower()].index.tolist()[0]].sentiment)\n\t\t\t\tif(wsv>0):\n\t\t\t\t\tnpw += 1\n\t\t\t\telif(wsv<0):\n\t\t\t\t\tnnw += 1\n\t\t\t\ttsv += wsv\n\t\t\t\tpsv = wsv\n\t\t\telif sfw.lower() in ldf[2].word.values:\t#Check if singular version of word is in dataframe3\n\t\t\t\t#print(ewt, sfw, 3, tsv)\n\t\t\t\tnpw += 1\n\t\t\t\tpsv = 3\n\t\t\telif pfw.lower() in ldf[2].word.values:\t#Check if plural version of word is in dataframe3\n\t\t\t\t#print(ewt, pfw, 3, tsv)\n\t\t\t\tnpw += 1\n\t\t\t\tpsv = 3\n\t\t\telif sfw.lower() in ldf[3].word.values:\t#Check if singular version of word is in dataframe4\n\t\t\t\t#print(ewt, sfw, 4)\n\t\t\t\tnnw += 1\n\t\t\t\tpsv = -3\n\t\t\telif pfw.lower() in ldf[3].word.values:\t#Check if plural version of word is in dataframe4\n\t\t\t\t#print(ewt, pfw, 4)\n\t\t\t\tnnw += 1\n\t\t\t\tpsv = -3\n\t\t\telse:\t\t\t\t\t#The word must be a \"neutral\" word\n\t\t\t\t#print(wrd, sfw, pfw)\n\t\t\t\tnNw += 1\n\t\telse:\n\t\t\t#Checking if words match special words\n\t\t\tif sfw.lower()=='maga':\n\t\t\t\tnpw += 100\n\t\t\t\ttsv += 100\n\t\t\telif sfw.lower()=='makeamericagreatagain':\n\t\t\t\tnpw += 100\n\t\t\t\ttsv += 100\n\t\t\telif sfw.lower()=='make america great again':\n\t\t\t\tnpw += 100\n\t\t\t\ttsv += 100\n\t\t\telif \"email\" in sfw.lower():\n\t\t\t\tnnw += 5\n\t\t\t\ttsv -= 5\n\t\t\telif wrd.lower()=='questionmark':\n\t\t\t\tif(psv>0):\n\t\t\t\t\tnpw += 10\n\t\t\t\t\ttsv += 10\n\t\t\t\tif(psv<0):\n\t\t\t\t\tnnw += 10\n\t\t\t\t\ttsv -= 10\n\t\t\t\tpsv = 0\n\t\t\t\tnac==False\n\t\t\telif wrd.lower()=='exclaimationmark':\n\t\t\t\tif(psv<0):\n\t\t\t\t\tnpw += 10\n\t\t\t\t\ttsv += 10\n\t\t\t\tif(psv>0):\n\t\t\t\t\tnnw += 10\n\t\t\t\t\ttsv -= 10\n\t\t\t\tpsv = 0\n\t\t\t\tnac==False\n\n #Checking if word exists in the Sentiment Dictionary. Assign sentiment value and/or category if word exists. Otherwise categorize word as neutral.\n\t\t\telif sfw.lower() in ldf[0].word.values: #Check if singular version of word is in dataframe1\n\t\t\t\twsv = int(ldf[0].iloc[ldf[0]['word'].loc[lambda x: x==sfw.lower()].index.tolist()[0]].sentiment)\n\t\t\t\t#print(sfw, 1, wsv, tsv)\n\t\t\t\tif(wsv>0):\n\t\t\t\t\tnnw += 1\n\t\t\t\telif(wsv<0):\n\t\t\t\t\tnpw += 1\n\t\t\t\ttsv -= wsv\n\t\t\t\tpsv = -wsv\n\t\t\t\tnac=False\n\t\t\telif pfw.lower() in ldf[0].word.values: #Check if plural version of word is in dataframe1\n\t\t\t\twsv = int(ldf[0].iloc[ldf[0]['word'].loc[lambda x: x==pfw.lower()].index.tolist()[0]].sentiment)\n\t\t\t\t#print(pfw, 1, wsv, tsv)\n\t\t\t\tif(wsv>0):\n\t\t\t\t\tnnw += 1\n\t\t\t\telif(wsv<0):\n\t\t\t\t\tnpw += 1\n\t\t\t\ttsv -= wsv\n\t\t\t\tpsv = -wsv\n\t\t\t\tnac==False\n\t\t\telif pfw.lower() in ldf[0].word.values: #Check if plural version of word is in dataframe1\n\t\t\t\twsv = int(ldf[0].iloc[ldf[0]['word'].loc[lambda x: x==pfw.lower()].index.tolist()[0]].sentiment)\n\t\t\t\t#print(pfw, 1, wsv, tsv)\n\t\t\t\tif(wsv>0):\n\t\t\t\t\tnpw -= 1\n\t\t\t\telif(wsv<0):\n\t\t\t\t\tnnw -= 1\n\t\t\t\ttsv -= wsv\n\t\t\t\tpsv = -wsv\n\t\t\t\tnac==False\n\t\t\telif sfw.lower() in ldf[1].word.values: #Check if singular version of word is in dataframe2\n\t\t\t\t#print(sfw, 2)\n\t\t\t\twsv = int(ldf[1].iloc[ldf[1]['word'].loc[lambda x: x==sfw.lower()].index.tolist()[0]].sentiment)\n\t\t\t\tif(wsv>0):\n\t\t\t\t\tnnw += 1\n\t\t\t\telif(wsv<0):\n\t\t\t\t\tnpw += 1\n\t\t\t\ttsv -= wsv\n\t\t\t\tpsv = -wsv\n\t\t\t\tnac==False\n\t\t\telif pfw.lower() in ldf[1].word.values: #Check if plural version of word is in dataframe2\n\t\t\t\t#print(pfw, 2)\n\t\t\t\twsv = int(ldf[1].iloc[ldf[1]['word'].loc[lambda x: x==pfw.lower()].index.tolist()[0]].sentiment)\n\t\t\t\tif(wsv>0):\n\t\t\t\t\tnnw += 1\n\t\t\t\telif(wsv<0):\n\t\t\t\t\tnpw += 1\n\t\t\t\ttsv -= wsv\n\t\t\t\tpsv = -wsv\n\t\t\t\tnac==False\n\t\t\telif sfw.lower() in ldf[2].word.values: #Check if singular version of word is in dataframe3\n\t\t\t\t#print(sfw, 3, tsv)\n\t\t\t\tnnw += 1\n\t\t\t\tpsv = -3\n\t\t\t\tnac==False\n\t\t\telif pfw.lower() in ldf[2].word.values: #Check if plural version of word is in dataframe3\n\t\t\t\t#print(pfw, 3, tsv)\n\t\t\t\tnnw += 1\n\t\t\t\tpsv = -3\n\t\t\t\tnac==False\n\t\t\telif sfw.lower() in ldf[3].word.values: #Check if singular version of word is in dataframe4\n\t\t\t\t#print(sfw, 4)\n\t\t\t\tnpw += 1\n\t\t\t\tpsv = 3\n\t\t\t\tnac==False\n\t\t\telif pfw.lower() in ldf[3].word.values: #Check if plural version of word is in dataframe4\n\t\t\t\t#print(pfw, 4)\n\t\t\t\tnpw += 1\n\t\t\t\tpsv = 3\n\t\t\t\tnac==False\n\t\t\telse: #The word must be a \"neutral\" word\n\t\t\t\t#print(wrd, sfw, pfw)\n\t\t\t\tnNw += 1\n\t\t#t2 = time.time()\n\t\t#print(\"Amount of time taken to parse word: \" + str(t2-t1) + \"sec\")\n\n\tt4 = time.time()\n\tprint(\"Amount of time taken to parse tweet: \" + str(t4-t3) + \"sec\")\n\treturn(npw, nnw, nNw, tsv)\n\ndef NaiveBayes(txt, ppl, tov):\n\t#tov = likelihoodFunctionInformation(ctt, [df1, df2, df3, df4])\t#Obtain tuple of values required to calculate the Likelihood funnction and posterior probability\n\tpPp = ppl[0]\t#Positive class Prior Probability\n\tpnp = ppl[1]\t#Negative class Prior Probability\n\tpNp = ppl[2]\t#Neutral class Prior Probability\n\tnpw = tov[0]\t#No. of positive words\n\tnnw = tov[1]\t#No. of negative words\n\tnNw = tov[2]\t#No. of neutral words\n\ttsv = tov[3]\t#Total Sentiment Value\n\ttnw = npw + nnw + nNw\t#Total no. of words\n\tcls = \" \"\t#Defining the class which the text belongs to.\n\n\t#print(npw, nnw, nNw, tsv)\n\tif(npw==0 and nnw==0):\n\t\tcls = \"neutral\"\t#Class is set to Neutral\n\telse:\n\t\tif(tsv==0):\n\t\t\tden = (pPp*(1-np.exp(-1*((npw*5)/(tnw))))) + (pnp*(1-np.exp(-1*((nnw*5)/(tnw))))) + (pNp*(1-np.exp(-1*((nNw)/(tnw)))))\t#Calculate the denominator for the posterior probabilities\n\n\t\t\t#Posterior Probability of sentiment of text is positive given the text:\n\t\t\tppp = (pPp*(1-np.exp(-1*((npw*5)/(tnw)))))/(den)\n\t\t\t#print((1-np.exp(-1*(npw*10))))\n\t\t\t#print(ppp)\n\n\t\t\t#Posterior Probability of sentiment of text is negative given the text:\n\t\t\tnpp = (pnp*(1-np.exp(-1*((nnw*5)/(tnw)))))/(den)\n\t\t\t#print((1-np.exp(-1*(nnw*10))))\n\t\t\t#print(npp)\n\n\t\t\t#Posterior Probability of sentiment of text is neutral given the text:\n\t\t\tNpp = (pNp*(1-np.exp(-1*((nNw)/(tnw)))))/(den)\n\t\t\t#print((1-np.exp(-1*(nNw*10))))\n\t\t\t#print(Npp)\n\n\t\t\t#Determine the sentimentality of text:\n\t\t\tif(max([ppp,npp,Npp])==ppp):\n\t\t\t\tcls = \"positive\"\n\t\t\tif(max([ppp,npp,Npp])==npp):\n\t\t\t\tcls = \"negative\"\n\t\t\tif(max([ppp,npp,Npp])==Npp):\n\t\t\t\tcls = \"neutral\"\n\t\telif(tsv>0):\n\t\t\tden = (pPp*(1-np.exp(-1*((npw*5*tsv)/(tnw))))) + (pnp*(1-np.exp(-1*((nnw*5)/(tnw))))) + (pNp*(1-np.exp(-1*((nNw)/(tnw*1.45))))) #Calculate the denominator for the posterior probabilities.\n\n\t\t\t#Posterior Probability of sentiment of text is positive given the text:\n\t\t\tppp = (pPp*(1-np.exp(-1*((npw*5*tsv)/(tnw)))))/(den)\n\t\t\t#print((1-np.exp(-1*(npw*10))))\n\t\t\t#print(ppp)\n\n\t\t\t#Posterior Probability of sentiment of text is negative given the text:\n\t\t\tnpp = (pnp*(1-np.exp(-1*((nnw*5)/(tnw)))))/(den)\n\t\t\t#print((1-np.exp(-1*(nnw*10))))\n\t\t\t#print(npp)\n\n\t\t\t#Posterior Probability of sentiment of text is neutral given the text:\n\t\t\tNpp = (pNp*(1-np.exp(-1*((nNw)/(tnw*1.45)))))/(den)\n\t\t\t#print((1-np.exp(-1*(nNw*10))))\n\t\t\t#print(Npp)\n\n\t\t\t#Determine the sentimentality of text:\n\t\t\tif(max([ppp,npp,Npp])==ppp):\n\t\t\t\tcls = \"positive\"\n\t\t\tif(max([ppp,npp,Npp])==npp):\n\t\t\t\tcls = \"negative\"\n\t\t\tif(max([ppp,npp,Npp])==Npp):\n\t\t\t\tcls = \"neutral\"\n\t\telse:\n\t\t\tden = (pPp*(1-np.exp(-1*((npw*5)/(tnw))))) + (pnp*(1-np.exp(-1*((nnw*5*abs(tsv))/(tnw))))) + (pNp*(1-np.exp(-1*((nNw)/(tnw*1.45))))) #Calculate the denominator for the posterior probabilities.\n\n\t\t\t#Posterior Probability of sentiment of text is positive given the text:\n\t\t\tppp = (pPp*(1-np.exp(-1*((npw*5*tsv)/(tnw)))))/(den)\n\t\t\t#print((1-np.exp(-1*(npw*10))))\n\t\t\t#print(ppp)\n\n\t\t\t#Posterior Probability of sentiment of text is negative given the text:\n\t\t\tnpp = (pnp*(1-np.exp(-1*((nnw*5*abs(tsv))/(tnw)))))/(den)\n\t\t\t#print((1-np.exp(-1*(nnw*10))))\n\t\t\t#print(npp)\n\n\t\t\t#Posterior Probability of sentiment of text is neutral given the text:\n\t\t\tNpp = (pNp*(1-np.exp(-1*((nNw)/(tnw*1.45)))))/(den)\n\t\t\t#print((1-np.exp(-1*(nNw*10))))\n\t\t\t#print(Npp)\n\n\t\t\t#Determine the sentimentality of text:\n\t\t\tif(max([ppp,npp,Npp])==ppp):\n\t\t\t\tcls = \"positive\"\n\t\t\tif(max([ppp,npp,Npp])==npp):\n\t\t\t\tcls = \"negative\"\n\t\t\tif(max([ppp,npp,Npp])==Npp):\n\t\t\t\tcls = \"neutral\"\n\treturn cls\n\n#############Loading the Datasets:####################\npd.set_option(\"display.max_rows\", None, \"display.max_columns\", None)\n\n#Training Dataset:\ndft = pd.read_csv(\"/root/.encrypted/.pythonSai/kCoreBots/CoreBotEN/MachineLearning/NaiveBayes/datasets/trainingdataset.csv\", sep=\",\", skiprows=[0], header=None, usecols=[0,1], names=[\"tweet_text\",\"sentiment\"])\n\n#Testing Dataset:\ndfT = pd.read_csv(\"/root/.encrypted/.pythonSai/kCoreBots/CoreBotEN/MachineLearning/NaiveBayes/datasets/testingdataset.csv\", sep=\",\", skiprows=[0], header=None, usecols=[0,1], names=[\"tweet_text\",\"sentiment\"])\n\n#Sample Dataset:\ndfs = pd.read_csv(\"/root/.encrypted/.pythonSai/kCoreBots/CoreBotEN/MachineLearning/NaiveBayes/datasets/sampleDataset.csv\", sep=\",\", skiprows=[0], header=None, usecols=[0,1,2], names=[\"tweetid\", \"userid\", \"tweet_text\"])\n\n#Main Dataset:\ndfn = pd.read_csv(\"/root/.encrypted/.pythonSai/kCoreBots/CoreBotEN/MachineLearning/NaiveBayes/datasets/CoreBotTweetsCombinedEN.csv\", sep=\",\", skiprows=[0], header=None, usecols=[0,1,2], names=[\"tweetid\",\"userid\", \"tweet_text\"])\n\n#Sentiment Dataset 1:\ndf1 = pd.read_csv(\"/root/.encrypted/.pythonSai/kCoreBots/CoreBotEN/MachineLearning/NaiveBayes/datasets/SentimentDictionary/AFINN-111.txt\", sep=\"\\t\", header=None, usecols=[0,1], names=[\"word\",\"sentiment\"])\n\n#Sentiment Dataset 2:\ndf2 = pd.read_csv(\"/root/.encrypted/.pythonSai/kCoreBots/CoreBotEN/MachineLearning/NaiveBayes/datasets/SentimentDictionary/AFINN-96.txt\", sep=\"\\t\", header=None, usecols=[0,1], names=[\"word\",\"sentiment\"])\n\n#Sentiment Dataset 3 [Positive Words Only]:\ndf3 = pd.read_csv(\"/root/.encrypted/.pythonSai/kCoreBots/CoreBotEN/MachineLearning/NaiveBayes/datasets/SentimentDictionary/Positivewords.txt\", sep=\"\\n\", header=None, usecols=[0], names=[\"word\"])\n\n#Sentiment Dataset 4 [Negative Words Only]:\ndf4 = pd.read_csv(\"/root/.encrypted/.pythonSai/kCoreBots/CoreBotEN/MachineLearning/NaiveBayes/datasets/SentimentDictionary/Negativewords.txt\", sep=\"\\n\", header=None, usecols=[0], names=[\"word\"])\n\n#Dataset required to classify each tweet and its sentimentality to its corresponding bot:\ndfc = pd.DataFrame(columns=[\"tweetid\", \"userid\", \"tweet_candidate_class\", \"tweet_sentiment_class\"])\n\n\n#############Running the Naive Bayesian Classifer:####################\n\n#Obtain the list of Prior Probabilities obtained from Training Dataset:\ntts = dft[\"sentiment\"].count()\t#Total no. of Training Sentiment values.\ntTs = dfT[\"sentiment\"].count()\t#Total no. of Testing sentiment values.\n#Append all the Testing sentiment values with the Training sentiment values to obtain a complete list of sentiments used as priorProbabalities for classification of all political tweets sent by \"CoreBotTweetsCombinedEN.csv\".\nfor i in range(tts, tts+tTs):\n\tdft[\"sentiment\"][i] = dfT[\"sentiment\"][i-tts]\nppl = priorProb(dft.sentiment)\n\nloc = []\t#List of classes for each text row in the dataframe.\n#Dictionary that stores lists used to calculate demographic statistics below:\npbd = {} #Political Bot Dictionary. I.e. Dictionary of all twitter bots that tweeted, replied to, or retweeted political comments that affected the 2016 elections. The key represents the bot's userid. The value is a list of class types it belongs to. i.e. Value = [\"Trump\", \"positive\", \"ProTrump\"].\n\nfor index, row in dfn.iterrows():\n\t#print(CleanUp(expandContractions(row[\"tweet_text\"].replace(\"’\", \"'\"))))\n\tctt = CleanUp(expandContractions(row[\"tweet_text\"].replace(\"’\", \"'\")))\t#Cleaned Tweet\n\tcot = NaiveBayes(ctt, ppl, likelihoodFunctionInformation(ctt, [df1, df2, df3, df4]))\n\t#print(cot)\n\tloc.append(cot)\n\ntnr = 0\t#Total No. of right words.\nmcp = 0\t#MisClassification percentage.\ntap = 0\t#Total Accuracy percentage.\n\nnpt = 0\t#No. of positive Trump tweets.\nnnt = 0\t#No. of negative Trump tweets.\nnNt = 0\t#No. of neutral Trump tweets.\nnpc = 0\t#No. of positive Clinton tweets.\nnnc = 0\t#No. of negative Clinton tweets.\nnNc = 0\t#No. of neutral Clinton tweets.\nngt = 0\t#No. of general tweets. [i.e. Not Trump or Hillary].\ntht = False\t#Is the tweet a Trump or Hillary tweet?\ntcc = \" \"\t#Setting the tweet candidate class [i.e. Trump, Hillary, Neutral] for the classification below.\ntsc = \" \"\t#Setting the tweet sentiment class [i.e. Positive, Negative, Neutral] for the classification below.\ntoc = \" \"\t#Setting the tweet overall class. [i.e. ProTrump, AntiClinton, etc;] for the classification below.\n\n#t=\"RT @Trumpocrats: @TallahForTrump @tariqnasheed I'm beside myself by his hate for America and how we have done so much to free an entire rac...\"\n#print(t)\n#print(\"Actual Sentiment: \" + \"negative\")\n#print(\"Calculated Sentiment: \" + str(cot))\n\n\nfor i in range(0,len(loc)):\n\t#Recording no. of correct tweets:\n\t#print(dfn.iloc[i].tweet_text)\n\t#print(\"Actual Sentiment: \" + dft.iloc[i].sentiment)\n\t#print(\"Calculated Sentiment: \" + loc[i])\n\t'''\n\tif(loc[i].lower()==dft.iloc[i].sentiment.lower()):\n\t\ttnr += 1\t#Use to calculate accuracy of classifier; Not for running entire algorithm\n\t'''\n\t#Classification of Tweets to Trump, Hillary or Neutral:\n\tif(\"trump\" in dfn.iloc[i].tweet_text.lower() or \"donald\" in dfn.iloc[i].tweet_text.lower()):\n\t\ttht = True\n\t\tif((\"email\" in dfn.iloc[i].tweet_text.lower()) or (\"makeamericagreatagain\" in dfn.iloc[i].tweet_text.lower()) or (\"make america great again\" in dfn.iloc[i].tweet_text.lower()) or (\"maga\" in dfn.iloc[i].tweet_text.lower()) or (\"russia\" in dfn.iloc[i].tweet_text.lower())):\n\t\t\tnpt += 1\n\t\t\ttcc = \"Trump\"\n\t\t\ttsc = \"Positive\"\n\t\t\ttoc = \"ProTrump\"\n\t\telse:\n\t\t\tif(loc[i]==\"positive\"):\n\t\t\t\tnpt += 1\n\t\t\t\ttcc = \"Trump\"\n\t\t\t\ttsc = \"Positive\"\n\t\t\t\ttoc = \"ProTrump\"\n\t\t\tif(loc[i]==\"negative\"):\n\t\t\t\tnnt += 1\n\t\t\t\ttcc = \"Trump\"\n\t\t\t\ttsc = \"Negative\"\n\t\t\t\ttoc = \"AntiTrump\"\n\t\t\tif(loc[i]==\"neutral\"):\n\t\t\t\tnNt += 1\n\t\t\t\ttcc = \"Trump\"\n\t\t\t\ttsc = \"Neutral\"\n\t\t\t\ttoc = \"Neutral\"\n\n\tif(\"clinton\" in dfn.iloc[i].tweet_text.lower() or \"hillary\" in dfn.iloc[i].tweet_text.lower()):\n\t\ttht = True\n\t\tif((\"email\" in dfn.iloc[i].tweet_text.lower()) or (\"makeamericagreatagain\" in dfn.iloc[i].tweet_text.lower()) or (\"make america great again\" in dfn.iloc[i].tweet_text.lower()) or (\"maga\" in dfn.iloc[i].tweet_text.lower()) or (\"russia\" in dfn.iloc[i].tweet_text.lower())):\n\t\t\tnnc += 1\n\t\t\ttcc = \"Clinton\"\n\t\t\ttsc = \"Negative\"\n\t\t\ttoc = \"AntiClinton\"\n\t\telse:\n\t\t\tif(loc[i]==\"positive\"):\n\t\t\t\tnpc += 1\n\t\t\t\ttcc = \"Clinton\"\n\t\t\t\ttsc = \"Positive\"\n\t\t\t\ttoc = \"ProClinton\"\n\t\t\tif(loc[i]==\"negative\"):\n\t\t\t\ttcc = \"Clinton\"\n\t\t\t\ttsc = \"Negative\"\n\t\t\t\ttoc = \"AntiClinton\"\n\t\t\t\tnnc += 1\n\t\t\tif(loc[i]==\"neutral\"):\n\t\t\t\ttcc = \"Clinton\"\n\t\t\t\ttsc = \"Neutral\"\n\t\t\t\ttoc = \"Neutral\"\n\t\t\t\tnNc += 1\n\tif(tht==False):\n\t\tngt += 1\n\t\ttcc = \"Neutral\"\n\t\ttsc = \"Neutral\"\n\t\ttoc = \"Neutral\"\n\ttht = False\n\n\n\t#############Information required to classify each tweet and its sentimentality to its corresponding bot:#########################\n\tfsn=\"/root/.encrypted/.pythonSai/kCoreBots/CoreBotEN/MachineLearning/NaiveBayes/CoreBotsSentiment/Bot-\"+dfn.iloc[i].userid+\"-EN.csv\"\n\n\t#Assign Values to our political Bot Dictionary defined above:\n\ttmp = [tcc, tsc, toc]\t#Temporary List\n\n\tif(dfn.iloc[i].userid in pbd.keys()):\n\t\tif(tmp not in pbd[dfn.iloc[i].userid]):\n\t\t\ttvl = dfn.iloc[i].userid\t#temporary value\n\t\t\tpbd[tvl]=pbd[tvl]+[tmp]\n\telse:\n\t\tpbd[dfn.iloc[i].userid] = [tmp]\n\t\n\t#Assign values to temporary dataset that will stream these values into the designated csv file.\n\tdfc.loc[i] = [dfn.iloc[i].tweetid, dfn.iloc[i].userid, tcc, tsc]\n\tdfc[[\"tweetid\", \"userid\",\"tweet_candidate_class\", \"tweet_sentiment_class\"]].to_csv(fsn, mode='a', sep=',', header=False, index=False)\n\n\t#Clear this temporary dataset for it to be useable in the next iteration.\n\tdfc = dfc.iloc[i:]\n\t\n\n#Printing our classification results:\nprint(\"******************Trump Sentimentality amongst bots:*******************\")\nprint(\"Total no. of positive Trump tweets = \" + str(npt))\nprint(\"Total no. of negative Trump tweets = \" + str(nnt))\nprint(\"Total no. of neutral Trump tweets = \" + str(nNt))\nprint(\"Total no. of Trump tweets = \"+ str(npt+nnt+nNt))\n\nprint(\"******************Clinton Sentimentality amongst bots:*****************\")\nprint(\"Total no. of positive Clinton tweets = \" + str(npc))\nprint(\"Total no. of negative Clinton tweets = \" + str(nnc))\nprint(\"Total no. of neutral Clinton tweets = \" + str(nNc))\nprint(\"Total no. of Clinton tweets = \"+ str(npc+nnc+nNc))\n\nprint(\"******************General Sentimentality amongst bots:*****************\")\nprint(\"Total no. of general [not candidate related] tweets = \" + str(ngt))\n\nprint(\"*****************General demographics of the bots:*********************\")\nnmc = 0\t#Total No. of bots that represent multiple classes. I.e. Have multiple sentiments or are targetting multiple candidates.\nnpn = 0\t#Total No. of bots that are both positive and negative in sentimentality.\nntc = 0\t#Total No. of bots that target both Trump and Clinton.\nnPtAc = 0\t#Total No. of bots that are Pro Trump and Anti Clinton.\nnPtAt = 0\t#Total No. of bots that are Pro Trump and Anti Trump.\nnAtPc = 0\t#Total No. of bots that are Anti Trump and Pro Clinton.\nnPcAc = 0\t#Total No. of bots that are Pro Clinton and Anti Clinton.\nnPtPc = 0\t#Total No. of bots that are Pro Trump and Pro Clinton.\nnAtAc = 0\t#Total No. of bots that are Anti Trump and Anti Clinton.\nfor key, val in pbd.items():\n\tif(len(val)>1):\n\t\tnmc += 1\n\tif(any(\"Positive\" in all for all in val) and any(\"Negative\" in all for all in val)):\n\t\tnpn += 1\n\tif(any(\"Trump\" in all for all in val) and any(\"Clinton\" in all for all in val)):\n ntc += 1\n\tif(any(\"ProTrump\" in all for all in val) and any(\"AntiClinton\" in all for all in val)):\n nPtAc += 1\n\tif(any(\"ProTrump\" in all for all in val) and any(\"AntiTrump\" in all for all in val)):\n nPtAt += 1\n\tif(any(\"AntiTrump\" in all for all in val) and any(\"ProClinton\" in all for all in val)):\n nAtPc += 1\n\tif(any(\"ProClinton\" in all for all in val) and any(\"AntiClinton\" in all for all in val)):\n nPcAc += 1\n\tif(any(\"ProTrump\" in all for all in val) and any(\"ProClinton\" in all for all in val)):\n nPtPc += 1\n\tif(any(\"AntiTrump\" in all for all in val) and any(\"AntiClinton\" in all for all in val)):\n nAtAc += 1\n\n#Oprint(pbd)\nprint(\"Total no. of bots that have multiple classes = \" +str(nmc))\nprint(\"Total no. of bots that are both positive and neagtive in sentimentality = \" +str(npn))\nprint(\"Total no. of bots that target both Trump and Hillary = \" +str(ntc))\nprint(\"Total no. of bots that are both ProTrump and AntiClinton = \" +str(nPtAc))\nprint(\"Total no. of bots that are both ProTrump and AntiTrump = \" +str(nPtAt))\nprint(\"Total no. of bots that are both AntiTrump and ProClinton = \" +str(nAtPc))\nprint(\"Total no. of bots that are both ProClinton and AntiClinton = \" +str(nPcAc))\nprint(\"Total no. of bots that are both ProTrump and ProClinton = \" +str(nPtPc))\nprint(\"Total no. of bots that are both AntiTrump and AntiClinton = \" +str(nAtAc))\n\n'''\n#Accuracy and Misclassification Rate of Classifier:\nprint(\"Accuracy Percentage of Classifier: \" + str((tnr/len(loc))*100) + \"%\")\nprint(\"Misclassification Percentage of Classifier: \" + str((1-(tnr/len(loc)))*100) + \"%\")\n'''\n" ]
[ [ "pandas.read_csv", "pandas.DataFrame", "numpy.exp", "pandas.set_option" ] ]
JonathanLehner/cassini_2021_nature_discoverer
[ "41e1e7ec01400d16bd34baf0763adce0383f3841" ]
[ "sentinel2_processing/georasteR_converter.py" ]
[ "\"\"\"\nnow using\nhttps://towardsdatascience.com/reading-and-visualizing-geotiff-images-with-python-8dcca7a74510\nhttps://github.com/GeoUtils/georaster/blob/master/georaster/georaster.py\nhttps://rasterio.readthedocs.io/en/latest/topics/color.html\n\"\"\"\n\nimport os\nimport pprint as pp\nimport time\nfrom datetime import datetime\nfrom os import listdir\nfrom os.path import join, isfile\n\nimport georaster\nfrom osgeo import gdal\nimport matplotlib.pyplot as plt\nimport wordninja\nfrom cleantext import clean\nfrom natsort import natsorted\nfrom tqdm import tqdm\n\ntif_dir_path = str(input(\"Enter path to folder with geotiff files -->\"))\n# -----------------------------------------------------------------\noutput_folder_name = \"georasteR_conversion\"\noutput_path_full = os.path.join(tif_dir_path, output_folder_name)\nif not os.path.isdir(output_path_full):\n os.mkdir(output_path_full)\n # make a place to store outputs if one does not exist\nprint(\"outputs will be in: \\n\", output_path_full)\n\n\n# -----------------------------------------------------------------\n\n\ndef cleantxt_wrap(ugly_text):\n # a wrapper for clean text with options different than default\n\n # https://pypi.org/project/clean-text/\n cleaned_text = clean(ugly_text,\n fix_unicode=True, # fix various unicode errors\n to_ascii=True, # transliterate to closest ASCII representation\n lower=True, # lowercase text\n no_line_breaks=True, # fully strip line breaks as opposed to only normalizing them\n no_urls=True, # replace all URLs with a special token\n no_emails=True, # replace all email addresses with a special token\n no_phone_numbers=True, # replace all phone numbers with a special token\n no_numbers=False, # replace all numbers with a special token\n no_digits=False, # replace all digits with a special token\n no_currency_symbols=True, # replace all currency symbols with a special token\n no_punct=True, # remove punctuations\n replace_with_punct=\"\", # instead of removing punctuations you may replace them\n replace_with_url=\"<URL>\",\n replace_with_email=\"<EMAIL>\",\n replace_with_phone_number=\"<PHONE>\",\n replace_with_number=\"<NUM>\",\n replace_with_digit=\"0\",\n replace_with_currency_symbol=\"<CUR>\",\n lang=\"en\" # set to 'de' for German special handling\n )\n\n return cleaned_text\n\n\ndef beautify_filename(filename, num_words=20, start_reverse=False,\n word_separator=\"_\"):\n # takes a filename stored as text, removes extension, separates into X words ...\n # and returns a nice filename with the words separateed by\n # useful for when you are reading files, doing things to them, and making new files\n\n filename = str(filename)\n index_file_Ext = filename.rfind('.')\n current_name = str(filename)[:index_file_Ext] # get rid of extension\n clean_name = cleantxt_wrap(current_name) # wrapper with custom defs\n file_words = wordninja.split(clean_name)\n # splits concatenated text into a list of words based on common word freq\n if len(file_words) <= num_words:\n num_words = len(file_words)\n\n if start_reverse:\n t_file_words = file_words[-num_words:]\n else:\n t_file_words = file_words[:num_words]\n\n pretty_name = word_separator.join(t_file_words) # see function argument\n\n # NOTE IT DOES NOT RETURN THE EXTENSION\n return pretty_name[: (len(pretty_name) - 1)] # there is a space always at the end, so -1\n\n\n# ----------------------------------------------------------------------------\n\ndef convert_tiff_to_png_georasters(input_path, output_path, verbose=False):\n # Use SingleBandRaster() if image has only one band\n img = georaster.MultiBandRaster(input_path)\n # img.r gives the raster in [height, width, band] format\n # band no. starts from 0\n plt.imshow(img.r[:, :, 2], interpolation='spline36')\n plt.title(os.path.basename(input_path))\n plt.savefig(output_path, bbox_inches='tight', dpi=200)\n\n if verbose:\n # For no. of bands and resolution\n gd_img = gdal.Open(input_path, gdal.GA_ReadOnly)\n print(\"\\n data on rasters from gdal:\")\n gd_img.RasterCount, gd_img.RasterXSize, gd_img.RasterYSize\n gd_img.GetStatistics(True, True)\n # stats about image\n img.GetStatistics(True, True)\n\n\n\n\n\n# ----------------------------------------------------------------------------\n\n\n# load files\nfiles_to_munch = natsorted([f for f in listdir(tif_dir_path) if isfile(os.path.join(tif_dir_path, f))])\ntotal_files_1 = len(files_to_munch)\nremoved_count_1 = 0\napproved_files = []\n# remove non-tif_image files\nfor prefile in files_to_munch:\n if prefile.endswith(\".tif\"):\n approved_files.append(prefile)\n else:\n files_to_munch.remove(prefile)\n removed_count_1 += 1\n\nprint(\"out of {0:3d} file(s) originally in the folder, \".format(total_files_1),\n \"{0:3d} non-tif_image files were removed\".format(removed_count_1))\nprint('\\n {0:3d} tif_image file(s) in folder will be transcribed.'.format(len(approved_files)))\npp.pprint(approved_files)\n\n# ----------------------------------------------------------------------------\n\n\n# loop\nst = time.time()\nfor tif_file in tqdm(approved_files, total=len(approved_files),\n desc=\"Resizing tif_images\"):\n index_pos = approved_files.index(tif_file)\n out_name = beautify_filename(tif_file) + \"converted_nr_{}_\".format(index_pos) + \".png\"\n this_input_path = join(tif_dir_path, tif_file)\n this_output_path = join(output_path_full, out_name)\n convert_tiff_to_png_georasters(this_input_path, this_output_path)\n\nrt = round((time.time() - st) / 60, 2)\nprint(\"\\n\\nfinished converting all tif_images - \", datetime.now())\nprint(\"Converted {} tif_images in {} minutes\".format(len(approved_files), rt))\nprint(\"they are located in: \\n\", output_path_full)\n" ]
[ [ "matplotlib.pyplot.savefig", "matplotlib.pyplot.imshow" ] ]
imcwx/models
[ "523ff5d0d50c3181329e62509270d4d778734000" ]
[ "research/object_detection/core/preprocessor.py" ]
[ "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Preprocess images and bounding boxes for detection.\n\nWe perform two sets of operations in preprocessing stage:\n(a) operations that are applied to both training and testing data,\n(b) operations that are applied only to training data for the purpose of\n data augmentation.\n\nA preprocessing function receives a set of inputs,\ne.g. an image and bounding boxes,\nperforms an operation on them, and returns them.\nSome examples are: randomly cropping the image, randomly mirroring the image,\n randomly changing the brightness, contrast, hue and\n randomly jittering the bounding boxes.\n\nThe preprocess function receives a tensor_dict which is a dictionary that maps\ndifferent field names to their tensors. For example,\ntensor_dict[fields.InputDataFields.image] holds the image tensor.\nThe image is a rank 4 tensor: [1, height, width, channels] with\ndtype=tf.float32. The groundtruth_boxes is a rank 2 tensor: [N, 4] where\nin each row there is a box with [ymin xmin ymax xmax].\nBoxes are in normalized coordinates meaning\ntheir coordinate values range in [0, 1]\n\nImportant Note: In tensor_dict, images is a rank 4 tensor, but preprocessing\nfunctions receive a rank 3 tensor for processing the image. Thus, inside the\npreprocess function we squeeze the image to become a rank 3 tensor and then\nwe pass it to the functions. At the end of the preprocess we expand the image\nback to rank 4.\n\"\"\"\n\nimport sys\nimport tensorflow as tf\nimport numpy as np\n\nfrom tensorflow.python.ops import control_flow_ops\n\nfrom object_detection.core import box_list\nfrom object_detection.core import box_list_ops\nfrom object_detection.core import keypoint_ops\nfrom object_detection.core import standard_fields as fields\n\n\ndef _apply_with_random_selector(x, func, num_cases):\n \"\"\"Computes func(x, sel), with sel sampled from [0...num_cases-1].\n\n Args:\n x: input Tensor.\n func: Python function to apply.\n num_cases: Python int32, number of cases to sample sel from.\n\n Returns:\n The result of func(x, sel), where func receives the value of the\n selector as a python integer, but sel is sampled dynamically.\n \"\"\"\n rand_sel = tf.random_uniform([], maxval=num_cases, dtype=tf.int32)\n # Pass the real x only to one of the func calls.\n return control_flow_ops.merge([func(\n control_flow_ops.switch(x, tf.equal(rand_sel, case))[1], case)\n for case in range(num_cases)])[0]\n\n\ndef _apply_with_random_selector_tuples(x, func, num_cases):\n \"\"\"Computes func(x, sel), with sel sampled from [0...num_cases-1].\n\n Args:\n x: A tuple of input tensors.\n func: Python function to apply.\n num_cases: Python int32, number of cases to sample sel from.\n\n Returns:\n The result of func(x, sel), where func receives the value of the\n selector as a python integer, but sel is sampled dynamically.\n \"\"\"\n num_inputs = len(x)\n rand_sel = tf.random_uniform([], maxval=num_cases, dtype=tf.int32)\n # Pass the real x only to one of the func calls.\n\n tuples = [list() for t in x]\n for case in range(num_cases):\n new_x = [control_flow_ops.switch(t, tf.equal(rand_sel, case))[1] for t in x]\n output = func(tuple(new_x), case)\n for j in range(num_inputs):\n tuples[j].append(output[j])\n\n for i in range(num_inputs):\n tuples[i] = control_flow_ops.merge(tuples[i])[0]\n return tuple(tuples)\n\n\ndef _random_integer(minval, maxval, seed):\n \"\"\"Returns a random 0-D tensor between minval and maxval.\n\n Args:\n minval: minimum value of the random tensor.\n maxval: maximum value of the random tensor.\n seed: random seed.\n\n Returns:\n A random 0-D tensor between minval and maxval.\n \"\"\"\n return tf.random_uniform(\n [], minval=minval, maxval=maxval, dtype=tf.int32, seed=seed)\n\n\ndef normalize_image(image, original_minval, original_maxval, target_minval,\n target_maxval):\n \"\"\"Normalizes pixel values in the image.\n\n Moves the pixel values from the current [original_minval, original_maxval]\n range to a the [target_minval, target_maxval] range.\n\n Args:\n image: rank 3 float32 tensor containing 1\n image -> [height, width, channels].\n original_minval: current image minimum value.\n original_maxval: current image maximum value.\n target_minval: target image minimum value.\n target_maxval: target image maximum value.\n\n Returns:\n image: image which is the same shape as input image.\n \"\"\"\n with tf.name_scope('NormalizeImage', values=[image]):\n original_minval = float(original_minval)\n original_maxval = float(original_maxval)\n target_minval = float(target_minval)\n target_maxval = float(target_maxval)\n image = tf.to_float(image)\n image = tf.subtract(image, original_minval)\n image = tf.multiply(image, (target_maxval - target_minval) /\n (original_maxval - original_minval))\n image = tf.add(image, target_minval)\n return image\n\n\ndef retain_boxes_above_threshold(boxes,\n labels,\n label_scores,\n masks=None,\n keypoints=None,\n threshold=0.0):\n \"\"\"Retains boxes whose label score is above a given threshold.\n\n If the label score for a box is missing (represented by NaN), the box is\n retained. The boxes that don't pass the threshold will not appear in the\n returned tensor.\n\n Args:\n boxes: float32 tensor of shape [num_instance, 4] representing boxes\n location in normalized coordinates.\n labels: rank 1 int32 tensor of shape [num_instance] containing the object\n classes.\n label_scores: float32 tensor of shape [num_instance] representing the\n score for each box.\n masks: (optional) rank 3 float32 tensor with shape\n [num_instances, height, width] containing instance masks. The masks are of\n the same height, width as the input `image`.\n keypoints: (optional) rank 3 float32 tensor with shape\n [num_instances, num_keypoints, 2]. The keypoints are in y-x normalized\n coordinates.\n threshold: scalar python float.\n\n Returns:\n retained_boxes: [num_retained_instance, 4]\n retianed_labels: [num_retained_instance]\n retained_label_scores: [num_retained_instance]\n\n If masks, or keypoints are not None, the function also returns:\n\n retained_masks: [num_retained_instance, height, width]\n retained_keypoints: [num_retained_instance, num_keypoints, 2]\n \"\"\"\n with tf.name_scope('RetainBoxesAboveThreshold',\n values=[boxes, labels, label_scores]):\n indices = tf.where(\n tf.logical_or(label_scores > threshold, tf.is_nan(label_scores)))\n indices = tf.squeeze(indices, axis=1)\n retained_boxes = tf.gather(boxes, indices)\n retained_labels = tf.gather(labels, indices)\n retained_label_scores = tf.gather(label_scores, indices)\n result = [retained_boxes, retained_labels, retained_label_scores]\n\n if masks is not None:\n retained_masks = tf.gather(masks, indices)\n result.append(retained_masks)\n\n if keypoints is not None:\n retained_keypoints = tf.gather(keypoints, indices)\n result.append(retained_keypoints)\n\n return result\n\n\ndef _flip_boxes_left_right(boxes):\n \"\"\"Left-right flip the boxes.\n\n Args:\n boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4].\n Boxes are in normalized form meaning their coordinates vary\n between [0, 1].\n Each row is in the form of [ymin, xmin, ymax, xmax].\n\n Returns:\n Flipped boxes.\n \"\"\"\n ymin, xmin, ymax, xmax = tf.split(value=boxes, num_or_size_splits=4, axis=1)\n flipped_xmin = tf.subtract(1.0, xmax)\n flipped_xmax = tf.subtract(1.0, xmin)\n flipped_boxes = tf.concat([ymin, flipped_xmin, ymax, flipped_xmax], 1)\n return flipped_boxes\n\n\ndef _flip_boxes_up_down(boxes):\n \"\"\"Up-down flip the boxes.\n\n Args:\n boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4].\n Boxes are in normalized form meaning their coordinates vary\n between [0, 1].\n Each row is in the form of [ymin, xmin, ymax, xmax].\n\n Returns:\n Flipped boxes.\n \"\"\"\n ymin, xmin, ymax, xmax = tf.split(value=boxes, num_or_size_splits=4, axis=1)\n flipped_ymin = tf.subtract(1.0, ymax)\n flipped_ymax = tf.subtract(1.0, ymin)\n flipped_boxes = tf.concat([flipped_ymin, xmin, flipped_ymax, xmax], 1)\n return flipped_boxes\n\n\ndef _rot90_boxes(boxes):\n \"\"\"Rotate boxes counter-clockwise by 90 degrees.\n\n Args:\n boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4].\n Boxes are in normalized form meaning their coordinates vary\n between [0, 1].\n Each row is in the form of [ymin, xmin, ymax, xmax].\n\n Returns:\n Rotated boxes.\n \"\"\"\n ymin, xmin, ymax, xmax = tf.split(value=boxes, num_or_size_splits=4, axis=1)\n rotated_ymin = tf.subtract(1.0, xmax)\n rotated_ymax = tf.subtract(1.0, xmin)\n rotated_xmin = ymin\n rotated_xmax = ymax\n rotated_boxes = tf.concat(\n [rotated_ymin, rotated_xmin, rotated_ymax, rotated_xmax], 1)\n return rotated_boxes\n\n\ndef _flip_masks_left_right(masks):\n \"\"\"Left-right flip masks.\n\n Args:\n masks: rank 3 float32 tensor with shape\n [num_instances, height, width] representing instance masks.\n\n Returns:\n flipped masks: rank 3 float32 tensor with shape\n [num_instances, height, width] representing instance masks.\n \"\"\"\n return masks[:, :, ::-1]\n\n\ndef _flip_masks_up_down(masks):\n \"\"\"Up-down flip masks.\n\n Args:\n masks: rank 3 float32 tensor with shape\n [num_instances, height, width] representing instance masks.\n\n Returns:\n flipped masks: rank 3 float32 tensor with shape\n [num_instances, height, width] representing instance masks.\n \"\"\"\n return masks[:, ::-1, :]\n\n\ndef _rot90_masks(masks):\n \"\"\"Rotate masks counter-clockwise by 90 degrees.\n\n Args:\n masks: rank 3 float32 tensor with shape\n [num_instances, height, width] representing instance masks.\n\n Returns:\n rotated masks: rank 3 float32 tensor with shape\n [num_instances, height, width] representing instance masks.\n \"\"\"\n masks = tf.transpose(masks, [0, 2, 1])\n return masks[:, ::-1, :]\n\n\ndef random_horizontal_flip(image,\n boxes=None,\n masks=None,\n keypoints=None,\n keypoint_flip_permutation=None,\n seed=None):\n \"\"\"Randomly flips the image and detections horizontally.\n\n The probability of flipping the image is 50%.\n\n Args:\n image: rank 3 float32 tensor with shape [height, width, channels].\n boxes: (optional) rank 2 float32 tensor with shape [N, 4]\n containing the bounding boxes.\n Boxes are in normalized form meaning their coordinates vary\n between [0, 1].\n Each row is in the form of [ymin, xmin, ymax, xmax].\n masks: (optional) rank 3 float32 tensor with shape\n [num_instances, height, width] containing instance masks. The masks\n are of the same height, width as the input `image`.\n keypoints: (optional) rank 3 float32 tensor with shape\n [num_instances, num_keypoints, 2]. The keypoints are in y-x\n normalized coordinates.\n keypoint_flip_permutation: rank 1 int32 tensor containing the keypoint flip\n permutation.\n seed: random seed\n\n Returns:\n image: image which is the same shape as input image.\n\n If boxes, masks, keypoints, and keypoint_flip_permutation are not None,\n the function also returns the following tensors.\n\n boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4].\n Boxes are in normalized form meaning their coordinates vary\n between [0, 1].\n masks: rank 3 float32 tensor with shape [num_instances, height, width]\n containing instance masks.\n keypoints: rank 3 float32 tensor with shape\n [num_instances, num_keypoints, 2]\n\n Raises:\n ValueError: if keypoints are provided but keypoint_flip_permutation is not.\n \"\"\"\n\n def _flip_image(image):\n # flip image\n image_flipped = tf.image.flip_left_right(image)\n return image_flipped\n\n if keypoints is not None and keypoint_flip_permutation is None:\n raise ValueError(\n 'keypoints are provided but keypoints_flip_permutation is not provided')\n\n with tf.name_scope('RandomHorizontalFlip', values=[image, boxes]):\n result = []\n # random variable defining whether to do flip or not\n do_a_flip_random = tf.greater(tf.random_uniform([], seed=seed), 0.5)\n\n # flip image\n image = tf.cond(do_a_flip_random, lambda: _flip_image(image), lambda: image)\n result.append(image)\n\n # flip boxes\n if boxes is not None:\n boxes = tf.cond(do_a_flip_random, lambda: _flip_boxes_left_right(boxes),\n lambda: boxes)\n result.append(boxes)\n\n # flip masks\n if masks is not None:\n masks = tf.cond(do_a_flip_random, lambda: _flip_masks_left_right(masks),\n lambda: masks)\n result.append(masks)\n\n # flip keypoints\n if keypoints is not None and keypoint_flip_permutation is not None:\n permutation = keypoint_flip_permutation\n keypoints = tf.cond(\n do_a_flip_random,\n lambda: keypoint_ops.flip_horizontal(keypoints, 0.5, permutation),\n lambda: keypoints)\n result.append(keypoints)\n\n return tuple(result)\n\n\ndef random_vertical_flip(image,\n boxes=None,\n masks=None,\n keypoints=None,\n keypoint_flip_permutation=None,\n seed=None):\n \"\"\"Randomly flips the image and detections vertically.\n\n The probability of flipping the image is 50%.\n\n Args:\n image: rank 3 float32 tensor with shape [height, width, channels].\n boxes: (optional) rank 2 float32 tensor with shape [N, 4]\n containing the bounding boxes.\n Boxes are in normalized form meaning their coordinates vary\n between [0, 1].\n Each row is in the form of [ymin, xmin, ymax, xmax].\n masks: (optional) rank 3 float32 tensor with shape\n [num_instances, height, width] containing instance masks. The masks\n are of the same height, width as the input `image`.\n keypoints: (optional) rank 3 float32 tensor with shape\n [num_instances, num_keypoints, 2]. The keypoints are in y-x\n normalized coordinates.\n keypoint_flip_permutation: rank 1 int32 tensor containing the keypoint flip\n permutation.\n seed: random seed\n\n Returns:\n image: image which is the same shape as input image.\n\n If boxes, masks, keypoints, and keypoint_flip_permutation are not None,\n the function also returns the following tensors.\n\n boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4].\n Boxes are in normalized form meaning their coordinates vary\n between [0, 1].\n masks: rank 3 float32 tensor with shape [num_instances, height, width]\n containing instance masks.\n keypoints: rank 3 float32 tensor with shape\n [num_instances, num_keypoints, 2]\n\n Raises:\n ValueError: if keypoints are provided but keypoint_flip_permutation is not.\n \"\"\"\n\n def _flip_image(image):\n # flip image\n image_flipped = tf.image.flip_up_down(image)\n return image_flipped\n\n if keypoints is not None and keypoint_flip_permutation is None:\n raise ValueError(\n 'keypoints are provided but keypoints_flip_permutation is not provided')\n\n with tf.name_scope('RandomVerticalFlip', values=[image, boxes]):\n result = []\n # random variable defining whether to do flip or not\n do_a_flip_random = tf.greater(tf.random_uniform([], seed=seed), 0.5)\n\n # flip image\n image = tf.cond(do_a_flip_random, lambda: _flip_image(image), lambda: image)\n result.append(image)\n\n # flip boxes\n if boxes is not None:\n boxes = tf.cond(do_a_flip_random, lambda: _flip_boxes_up_down(boxes),\n lambda: boxes)\n result.append(boxes)\n\n # flip masks\n if masks is not None:\n masks = tf.cond(do_a_flip_random, lambda: _flip_masks_up_down(masks),\n lambda: masks)\n result.append(masks)\n\n # flip keypoints\n if keypoints is not None and keypoint_flip_permutation is not None:\n permutation = keypoint_flip_permutation\n keypoints = tf.cond(\n do_a_flip_random,\n lambda: keypoint_ops.flip_vertical(keypoints, 0.5, permutation),\n lambda: keypoints)\n result.append(keypoints)\n\n return tuple(result)\n\n\ndef random_rotation90(image,\n boxes=None,\n masks=None,\n keypoints=None,\n seed=None):\n \"\"\"Randomly rotates the image and detections 90 degrees counter-clockwise.\n\n The probability of rotating the image is 50%. This can be combined with\n random_horizontal_flip and random_vertical_flip to produce an output with a\n uniform distribution of the eight possible 90 degree rotation / reflection\n combinations.\n\n Args:\n image: rank 3 float32 tensor with shape [height, width, channels].\n boxes: (optional) rank 2 float32 tensor with shape [N, 4]\n containing the bounding boxes.\n Boxes are in normalized form meaning their coordinates vary\n between [0, 1].\n Each row is in the form of [ymin, xmin, ymax, xmax].\n masks: (optional) rank 3 float32 tensor with shape\n [num_instances, height, width] containing instance masks. The masks\n are of the same height, width as the input `image`.\n keypoints: (optional) rank 3 float32 tensor with shape\n [num_instances, num_keypoints, 2]. The keypoints are in y-x\n normalized coordinates.\n seed: random seed\n\n Returns:\n image: image which is the same shape as input image.\n\n If boxes, masks, and keypoints, are not None,\n the function also returns the following tensors.\n\n boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4].\n Boxes are in normalized form meaning their coordinates vary\n between [0, 1].\n masks: rank 3 float32 tensor with shape [num_instances, height, width]\n containing instance masks.\n keypoints: rank 3 float32 tensor with shape\n [num_instances, num_keypoints, 2]\n \"\"\"\n\n def _rot90_image(image):\n # flip image\n image_rotated = tf.image.rot90(image)\n return image_rotated\n\n with tf.name_scope('RandomRotation90', values=[image, boxes]):\n result = []\n\n # random variable defining whether to rotate by 90 degrees or not\n do_a_rot90_random = tf.greater(tf.random_uniform([], seed=seed), 0.5)\n\n # flip image\n image = tf.cond(do_a_rot90_random, lambda: _rot90_image(image),\n lambda: image)\n result.append(image)\n\n # flip boxes\n if boxes is not None:\n boxes = tf.cond(do_a_rot90_random, lambda: _rot90_boxes(boxes),\n lambda: boxes)\n result.append(boxes)\n\n # flip masks\n if masks is not None:\n masks = tf.cond(do_a_rot90_random, lambda: _rot90_masks(masks),\n lambda: masks)\n result.append(masks)\n\n # flip keypoints\n if keypoints is not None:\n keypoints = tf.cond(\n do_a_rot90_random,\n lambda: keypoint_ops.rot90(keypoints),\n lambda: keypoints)\n result.append(keypoints)\n\n return tuple(result)\n\n\ndef random_pixel_value_scale(image, minval=0.9, maxval=1.1, seed=None):\n \"\"\"Scales each value in the pixels of the image.\n\n This function scales each pixel independent of the other ones.\n For each value in image tensor, draws a random number between\n minval and maxval and multiples the values with them.\n\n Args:\n image: rank 3 float32 tensor contains 1 image -> [height, width, channels]\n minval: lower ratio of scaling pixel values.\n maxval: upper ratio of scaling pixel values.\n seed: random seed.\n\n Returns:\n image: image which is the same shape as input image.\n \"\"\"\n with tf.name_scope('RandomPixelValueScale', values=[image]):\n image = tf.convert_to_tensor(image, name='image')\n # Remember original dtype to so we can convert back if needed\n orig_dtype = image.dtype\n image = tf.image.convert_image_dtype(image, tf.float32)\n color_coef = tf.random_uniform(\n tf.shape(image),\n minval=minval,\n maxval=maxval,\n dtype=tf.float32,\n seed=seed)\n image = tf.multiply(image, color_coef)\n return tf.image.convert_image_dtype(image, orig_dtype, saturate=True)\n\n return image\n\n\ndef random_image_scale(image,\n masks=None,\n min_scale_ratio=0.5,\n max_scale_ratio=2.0,\n seed=None):\n \"\"\"Scales the image size.\n\n Args:\n image: rank 3 float32 tensor contains 1 image -> [height, width, channels].\n masks: (optional) rank 3 float32 tensor containing masks with\n size [height, width, num_masks]. The value is set to None if there are no\n masks.\n min_scale_ratio: minimum scaling ratio.\n max_scale_ratio: maximum scaling ratio.\n seed: random seed.\n\n Returns:\n image: image which is the same rank as input image.\n masks: If masks is not none, resized masks which are the same rank as input\n masks will be returned.\n \"\"\"\n with tf.name_scope('RandomImageScale', values=[image]):\n result = []\n image_shape = tf.shape(image)\n image_height = image_shape[0]\n image_width = image_shape[1]\n size_coef = tf.random_uniform([],\n minval=min_scale_ratio,\n maxval=max_scale_ratio,\n dtype=tf.float32, seed=seed)\n image_newysize = tf.to_int32(\n tf.multiply(tf.to_float(image_height), size_coef))\n image_newxsize = tf.to_int32(\n tf.multiply(tf.to_float(image_width), size_coef))\n image = tf.image.resize_images(\n image, [image_newysize, image_newxsize], align_corners=True)\n result.append(image)\n if masks:\n masks = tf.image.resize_nearest_neighbor(\n masks, [image_newysize, image_newxsize], align_corners=True)\n result.append(masks)\n return tuple(result)\n\n\ndef random_rgb_to_gray(image, probability=0.1, seed=None):\n \"\"\"Changes the image from RGB to Grayscale with the given probability.\n\n Args:\n image: rank 3 float32 tensor contains 1 image -> [height, width, channels]\n probability: the probability of returning a grayscale image.\n The probability should be a number between [0, 1].\n seed: random seed.\n\n Returns:\n image: image which is the same shape as input image.\n \"\"\"\n def _image_to_gray(image):\n image_gray1 = tf.image.rgb_to_grayscale(image)\n image_gray3 = tf.image.grayscale_to_rgb(image_gray1)\n return image_gray3\n\n with tf.name_scope('RandomRGBtoGray', values=[image]):\n # random variable defining whether to do flip or not\n do_gray_random = tf.random_uniform([], seed=seed)\n\n image = tf.cond(\n tf.greater(do_gray_random, probability), lambda: image,\n lambda: _image_to_gray(image))\n\n return image\n\n\ndef random_adjust_brightness(image, max_delta=0.2):\n \"\"\"Randomly adjusts brightness.\n\n Args:\n image: rank 3 float32 tensor contains 1 image -> [height, width, channels]\n max_delta: how much to change the brightness. A value between [0, 1).\n\n Returns:\n image: image which is the same shape as input image.\n boxes: boxes which is the same shape as input boxes.\n \"\"\"\n with tf.name_scope('RandomAdjustBrightness', values=[image]):\n image = tf.image.random_brightness(image, max_delta)\n return image\n\n\ndef random_adjust_contrast(image, min_delta=0.8, max_delta=1.25):\n \"\"\"Randomly adjusts contrast.\n\n Args:\n image: rank 3 float32 tensor contains 1 image -> [height, width, channels]\n min_delta: see max_delta.\n max_delta: how much to change the contrast. Contrast will change with a\n value between min_delta and max_delta. This value will be\n multiplied to the current contrast of the image.\n\n Returns:\n image: image which is the same shape as input image.\n \"\"\"\n with tf.name_scope('RandomAdjustContrast', values=[image]):\n image = tf.image.random_contrast(image, min_delta, max_delta)\n return image\n\n\ndef random_adjust_hue(image, max_delta=0.02):\n \"\"\"Randomly adjusts hue.\n\n Args:\n image: rank 3 float32 tensor contains 1 image -> [height, width, channels]\n max_delta: change hue randomly with a value between 0 and max_delta.\n\n Returns:\n image: image which is the same shape as input image.\n \"\"\"\n with tf.name_scope('RandomAdjustHue', values=[image]):\n image = tf.image.random_hue(image, max_delta)\n return image\n\n\ndef random_adjust_saturation(image, min_delta=0.8, max_delta=1.25):\n \"\"\"Randomly adjusts saturation.\n\n Args:\n image: rank 3 float32 tensor contains 1 image -> [height, width, channels]\n min_delta: see max_delta.\n max_delta: how much to change the saturation. Saturation will change with a\n value between min_delta and max_delta. This value will be\n multiplied to the current saturation of the image.\n\n Returns:\n image: image which is the same shape as input image.\n \"\"\"\n with tf.name_scope('RandomAdjustSaturation', values=[image]):\n image = tf.image.random_saturation(image, min_delta, max_delta)\n return image\n\n\ndef random_distort_color(image, color_ordering=0):\n \"\"\"Randomly distorts color.\n\n Randomly distorts color using a combination of brightness, hue, contrast\n and saturation changes.\n\n Args:\n image: rank 3 float32 tensor contains 1 image -> [height, width, channels]\n color_ordering: Python int, a type of distortion (valid values: 0, 1).\n\n Returns:\n image: image which is the same shape as input image.\n\n Raises:\n ValueError: if color_ordering is not in {0, 1}.\n \"\"\"\n with tf.name_scope('RandomDistortColor', values=[image]):\n if color_ordering == 0:\n image = tf.image.random_brightness(image, max_delta=32. / 255.)\n image = tf.image.random_saturation(image, lower=0.5, upper=1.5)\n image = tf.image.random_hue(image, max_delta=0.2)\n image = tf.image.random_contrast(image, lower=0.5, upper=1.5)\n elif color_ordering == 1:\n image = tf.image.random_brightness(image, max_delta=32. / 255.)\n image = tf.image.random_contrast(image, lower=0.5, upper=1.5)\n image = tf.image.random_saturation(image, lower=0.5, upper=1.5)\n image = tf.image.random_hue(image, max_delta=0.2)\n else:\n raise ValueError('color_ordering must be in {0, 1}')\n\n return image\n\n\ndef random_jitter_boxes(boxes, ratio=0.05, seed=None):\n \"\"\"Randomly jitter boxes in image.\n\n Args:\n boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4].\n Boxes are in normalized form meaning their coordinates vary\n between [0, 1].\n Each row is in the form of [ymin, xmin, ymax, xmax].\n ratio: The ratio of the box width and height that the corners can jitter.\n For example if the width is 100 pixels and ratio is 0.05,\n the corners can jitter up to 5 pixels in the x direction.\n seed: random seed.\n\n Returns:\n boxes: boxes which is the same shape as input boxes.\n \"\"\"\n def random_jitter_box(box, ratio, seed):\n \"\"\"Randomly jitter box.\n\n Args:\n box: bounding box [1, 1, 4].\n ratio: max ratio between jittered box and original box,\n a number between [0, 0.5].\n seed: random seed.\n\n Returns:\n jittered_box: jittered box.\n \"\"\"\n rand_numbers = tf.random_uniform(\n [1, 1, 4], minval=-ratio, maxval=ratio, dtype=tf.float32, seed=seed)\n box_width = tf.subtract(box[0, 0, 3], box[0, 0, 1])\n box_height = tf.subtract(box[0, 0, 2], box[0, 0, 0])\n hw_coefs = tf.stack([box_height, box_width, box_height, box_width])\n hw_rand_coefs = tf.multiply(hw_coefs, rand_numbers)\n jittered_box = tf.add(box, hw_rand_coefs)\n jittered_box = tf.clip_by_value(jittered_box, 0.0, 1.0)\n return jittered_box\n\n with tf.name_scope('RandomJitterBoxes', values=[boxes]):\n # boxes are [N, 4]. Lets first make them [N, 1, 1, 4]\n boxes_shape = tf.shape(boxes)\n boxes = tf.expand_dims(boxes, 1)\n boxes = tf.expand_dims(boxes, 2)\n\n distorted_boxes = tf.map_fn(\n lambda x: random_jitter_box(x, ratio, seed), boxes, dtype=tf.float32)\n\n distorted_boxes = tf.reshape(distorted_boxes, boxes_shape)\n\n return distorted_boxes\n\n\ndef _strict_random_crop_image(image,\n boxes,\n labels,\n label_scores=None,\n masks=None,\n keypoints=None,\n min_object_covered=1.0,\n aspect_ratio_range=(0.75, 1.33),\n area_range=(0.1, 1.0),\n overlap_thresh=0.3):\n \"\"\"Performs random crop.\n\n Note: boxes will be clipped to the crop. Keypoint coordinates that are\n outside the crop will be set to NaN, which is consistent with the original\n keypoint encoding for non-existing keypoints. This function always crops\n the image and is supposed to be used by `random_crop_image` function which\n sometimes returns image unchanged.\n\n Args:\n image: rank 3 float32 tensor containing 1 image -> [height, width, channels]\n with pixel values varying between [0, 1].\n boxes: rank 2 float32 tensor containing the bounding boxes with shape\n [num_instances, 4].\n Boxes are in normalized form meaning their coordinates vary\n between [0, 1].\n Each row is in the form of [ymin, xmin, ymax, xmax].\n labels: rank 1 int32 tensor containing the object classes.\n label_scores: (optional) float32 tensor of shape [num_instances]\n representing the score for each box.\n masks: (optional) rank 3 float32 tensor with shape\n [num_instances, height, width] containing instance masks. The masks\n are of the same height, width as the input `image`.\n keypoints: (optional) rank 3 float32 tensor with shape\n [num_instances, num_keypoints, 2]. The keypoints are in y-x\n normalized coordinates.\n min_object_covered: the cropped image must cover at least this fraction of\n at least one of the input bounding boxes.\n aspect_ratio_range: allowed range for aspect ratio of cropped image.\n area_range: allowed range for area ratio between cropped image and the\n original image.\n overlap_thresh: minimum overlap thresh with new cropped\n image to keep the box.\n\n Returns:\n image: image which is the same rank as input image.\n boxes: boxes which is the same rank as input boxes.\n Boxes are in normalized form.\n labels: new labels.\n\n If label_scores, masks, or keypoints is not None, the function also returns:\n label_scores: rank 1 float32 tensor with shape [num_instances].\n masks: rank 3 float32 tensor with shape [num_instances, height, width]\n containing instance masks.\n keypoints: rank 3 float32 tensor with shape\n [num_instances, num_keypoints, 2]\n \"\"\"\n with tf.name_scope('RandomCropImage', values=[image, boxes]):\n image_shape = tf.shape(image)\n\n # boxes are [N, 4]. Lets first make them [N, 1, 4].\n boxes_expanded = tf.expand_dims(\n tf.clip_by_value(\n boxes, clip_value_min=0.0, clip_value_max=1.0), 1)\n\n sample_distorted_bounding_box = tf.image.sample_distorted_bounding_box(\n image_shape,\n bounding_boxes=boxes_expanded,\n min_object_covered=min_object_covered,\n aspect_ratio_range=aspect_ratio_range,\n area_range=area_range,\n max_attempts=100,\n use_image_if_no_bounding_boxes=True)\n\n im_box_begin, im_box_size, im_box = sample_distorted_bounding_box\n\n new_image = tf.slice(image, im_box_begin, im_box_size)\n new_image.set_shape([None, None, image.get_shape()[2]])\n\n # [1, 4]\n im_box_rank2 = tf.squeeze(im_box, squeeze_dims=[0])\n # [4]\n im_box_rank1 = tf.squeeze(im_box)\n\n boxlist = box_list.BoxList(boxes)\n boxlist.add_field('labels', labels)\n\n if label_scores is not None:\n boxlist.add_field('label_scores', label_scores)\n\n im_boxlist = box_list.BoxList(im_box_rank2)\n\n # remove boxes that are outside cropped image\n boxlist, inside_window_ids = box_list_ops.prune_completely_outside_window(\n boxlist, im_box_rank1)\n\n # remove boxes that are outside image\n # overlapping_boxlist, keep_ids = box_list_ops.prune_non_overlapping_boxes(\n # boxlist, im_boxlist, overlap_thresh)\n\n # remove boxes that are outside image AND GET BLACKBOXLIST\n overlapping_boxlist, keep_ids, black_boxlist = box_list_ops.prune_non_overlapping_boxes_custom(\n boxlist, im_boxlist, overlap_thresh)\n\n # change the coordinate of the remaining boxes\n new_labels = overlapping_boxlist.get_field('labels')\n new_boxlist = box_list_ops.change_coordinate_frame(overlapping_boxlist,\n im_box_rank1)\n ####################################################################\n\n # Change coordinate of boxes to be blacked\n black_boxlist = box_list_ops.change_coordinate_frame(black_boxlist,\n \t\t im_box_rank1)\n blackbox = black_boxlist.get()\n new_image = tf.expand_dims(new_image, 0)\n blackbox = tf.expand_dims(blackbox, 0)\n new_image = tf.image.draw_bounding_boxes(new_image, blackbox, fill=True)\n new_image = tf.squeeze(new_image)\n blackbox = tf.squeeze(blackbox)\n\n\n #####################################################################\n new_boxes = new_boxlist.get()\n new_boxes = tf.clip_by_value(\n new_boxes, clip_value_min=0.0, clip_value_max=1.0)\n\n result = [new_image, new_boxes, new_labels]\n\n if label_scores is not None:\n new_label_scores = overlapping_boxlist.get_field('label_scores')\n result.append(new_label_scores)\n\n if masks is not None:\n masks_of_boxes_inside_window = tf.gather(masks, inside_window_ids)\n masks_of_boxes_completely_inside_window = tf.gather(\n masks_of_boxes_inside_window, keep_ids)\n masks_box_begin = [0, im_box_begin[0], im_box_begin[1]]\n masks_box_size = [-1, im_box_size[0], im_box_size[1]]\n new_masks = tf.slice(\n masks_of_boxes_completely_inside_window,\n masks_box_begin, masks_box_size)\n result.append(new_masks)\n\n if keypoints is not None:\n keypoints_of_boxes_inside_window = tf.gather(keypoints, inside_window_ids)\n keypoints_of_boxes_completely_inside_window = tf.gather(\n keypoints_of_boxes_inside_window, keep_ids)\n new_keypoints = keypoint_ops.change_coordinate_frame(\n keypoints_of_boxes_completely_inside_window, im_box_rank1)\n new_keypoints = keypoint_ops.prune_outside_window(new_keypoints,\n [0.0, 0.0, 1.0, 1.0])\n result.append(new_keypoints)\n\n return tuple(result)\n\n\ndef random_crop_image(image,\n boxes,\n labels,\n label_scores=None,\n masks=None,\n keypoints=None,\n min_object_covered=0.5,\n aspect_ratio_range=(0.60, 0.90),\n area_range=(0.3, 1.0),\n overlap_thresh=0.3,\n random_coef=0.0,\n seed=None):\n # min_object_covered=1.0,\n # aspect_ratio_range=(0.75, 1.33),\n # area_range=(0.1, 1.0),\n\n # for trng\n # min_object_covered=0.5,\n # aspect_ratio_range=(0.60, 0.90),\n # area_range=(0.5, 1.0)\n \"\"\"Randomly crops the image.\n\n Given the input image and its bounding boxes, this op randomly\n crops a subimage. Given a user-provided set of input constraints,\n the crop window is resampled until it satisfies these constraints.\n If within 100 trials it is unable to find a valid crop, the original\n image is returned. See the Args section for a description of the input\n constraints. Both input boxes and returned Boxes are in normalized\n form (e.g., lie in the unit square [0, 1]).\n This function will return the original image with probability random_coef.\n\n Note: boxes will be clipped to the crop. Keypoint coordinates that are\n outside the crop will be set to NaN, which is consistent with the original\n keypoint encoding for non-existing keypoints.\n\n Args:\n image: rank 3 float32 tensor contains 1 image -> [height, width, channels]\n with pixel values varying between [0, 1].\n boxes: rank 2 float32 tensor containing the bounding boxes with shape\n [num_instances, 4].\n Boxes are in normalized form meaning their coordinates vary\n between [0, 1].\n Each row is in the form of [ymin, xmin, ymax, xmax].\n labels: rank 1 int32 tensor containing the object classes.\n label_scores: (optional) float32 tensor of shape [num_instances].\n representing the score for each box.\n masks: (optional) rank 3 float32 tensor with shape\n [num_instances, height, width] containing instance masks. The masks\n are of the same height, width as the input `image`.\n keypoints: (optional) rank 3 float32 tensor with shape\n [num_instances, num_keypoints, 2]. The keypoints are in y-x\n normalized coordinates.\n min_object_covered: the cropped image must cover at least this fraction of\n at least one of the input bounding boxes.\n aspect_ratio_range: allowed range for aspect ratio of cropped image.\n area_range: allowed range for area ratio between cropped image and the\n original image.\n overlap_thresh: minimum overlap thresh with new cropped\n image to keep the box.\n random_coef: a random coefficient that defines the chance of getting the\n original image. If random_coef is 0, we will always get the\n cropped image, and if it is 1.0, we will always get the\n original image.\n seed: random seed.\n\n Returns:\n image: Image shape will be [new_height, new_width, channels].\n boxes: boxes which is the same rank as input boxes. Boxes are in normalized\n form.\n labels: new labels.\n\n If label_scores, masks, or keypoints are not None, the function also\n returns:\n label_scores: new scores.\n masks: rank 3 float32 tensor with shape [num_instances, height, width]\n containing instance masks.\n keypoints: rank 3 float32 tensor with shape\n [num_instances, num_keypoints, 2]\n \"\"\"\n\n def strict_random_crop_image_fn():\n return _strict_random_crop_image(\n image,\n boxes,\n labels,\n label_scores=label_scores,\n masks=masks,\n keypoints=keypoints,\n min_object_covered=min_object_covered,\n aspect_ratio_range=aspect_ratio_range,\n area_range=area_range,\n overlap_thresh=overlap_thresh)\n\n # avoids tf.cond to make faster RCNN training on borg. See b/140057645.\n if random_coef < sys.float_info.min:\n result = strict_random_crop_image_fn()\n else:\n do_a_crop_random = tf.random_uniform([], seed=seed)\n do_a_crop_random = tf.greater(do_a_crop_random, random_coef)\n\n outputs = [image, boxes, labels]\n\n if label_scores is not None:\n outputs.append(label_scores)\n if masks is not None:\n outputs.append(masks)\n if keypoints is not None:\n outputs.append(keypoints)\n\n result = tf.cond(do_a_crop_random, strict_random_crop_image_fn,\n lambda: tuple(outputs))\n return result\n\n\ndef random_pad_image(image,\n boxes,\n min_image_size=None,\n max_image_size=None,\n pad_color=None,\n seed=None):\n \"\"\"Randomly pads the image.\n\n This function randomly pads the image with zeros. The final size of the\n padded image will be between min_image_size and max_image_size.\n if min_image_size is smaller than the input image size, min_image_size will\n be set to the input image size. The same for max_image_size. The input image\n will be located at a uniformly random location inside the padded image.\n The relative location of the boxes to the original image will remain the same.\n\n Args:\n image: rank 3 float32 tensor containing 1 image -> [height, width, channels]\n with pixel values varying between [0, 1].\n boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4].\n Boxes are in normalized form meaning their coordinates vary\n between [0, 1].\n Each row is in the form of [ymin, xmin, ymax, xmax].\n min_image_size: a tensor of size [min_height, min_width], type tf.int32.\n If passed as None, will be set to image size\n [height, width].\n max_image_size: a tensor of size [max_height, max_width], type tf.int32.\n If passed as None, will be set to twice the\n image [height * 2, width * 2].\n pad_color: padding color. A rank 1 tensor of [3] with dtype=tf.float32.\n if set as None, it will be set to average color of the input\n image.\n\n seed: random seed.\n\n Returns:\n image: Image shape will be [new_height, new_width, channels].\n boxes: boxes which is the same rank as input boxes. Boxes are in normalized\n form.\n \"\"\"\n # if pad_color is None:\n # pad_color = tf.reduce_mean(image, axis=[0, 1])\n\n image_shape = tf.shape(image)\n image_height = image_shape[0]\n image_width = image_shape[1]\n\n if max_image_size is None:\n max_image_size = tf.stack([image_height * 2, image_width * 2])\n max_image_size = tf.maximum(max_image_size,\n tf.stack([image_height, image_width]))\n\n if min_image_size is None:\n min_image_size = tf.stack([image_height, image_width])\n min_image_size = tf.maximum(min_image_size,\n tf.stack([image_height, image_width]))\n\n target_height = tf.cond(\n max_image_size[0] > min_image_size[0],\n lambda: _random_integer(min_image_size[0], max_image_size[0], seed),\n lambda: max_image_size[0])\n\n target_width = tf.cond(\n max_image_size[1] > min_image_size[1],\n lambda: _random_integer(min_image_size[1], max_image_size[1], seed),\n lambda: max_image_size[1])\n\n offset_height = tf.cond(\n target_height > image_height,\n lambda: _random_integer(0, target_height - image_height, seed),\n lambda: tf.constant(0, dtype=tf.int32))\n\n offset_width = tf.cond(\n target_width > image_width,\n lambda: _random_integer(0, target_width - image_width, seed),\n lambda: tf.constant(0, dtype=tf.int32))\n\n new_image = tf.image.pad_to_bounding_box(\n image,\n offset_height=offset_height,\n offset_width=offset_width,\n target_height=target_height,\n target_width=target_width)\n\n # Setting color of the padded pixels\n # image_ones = tf.ones_like(image)\n # image_ones_padded = tf.image.pad_to_bounding_box(\n # image_ones,\n # offset_height=offset_height,\n # offset_width=offset_width,\n # target_height=target_height,\n # target_width=target_width)\n # image_color_padded = (1.0 - image_ones_padded) * pad_color\n # new_image += image_color_padded\n\n # setting boxes\n new_window = tf.to_float(\n tf.stack([\n -offset_height, -offset_width, target_height - offset_height,\n target_width - offset_width\n ]))\n new_window /= tf.to_float(\n tf.stack([image_height, image_width, image_height, image_width]))\n boxlist = box_list.BoxList(boxes)\n new_boxlist = box_list_ops.change_coordinate_frame(boxlist, new_window)\n new_boxes = new_boxlist.get()\n\n return new_image, new_boxes\n\n\ndef random_crop_pad_image(image,\n boxes,\n labels,\n label_scores=None,\n min_object_covered=0.5,\n aspect_ratio_range=(0.75/1.1, 0.75*1.1),\n area_range=(0.2, 1.0),\n overlap_thresh=0.7,\n random_coef=0.0,\n min_padded_size_ratio=(1.0, 1.0),\n max_padded_size_ratio=(1.75, 1.75),\n pad_color=None,\n seed=None):\n # orig\n # min_object_covered=1.0,\n # aspect_ratio_range=(0.75, 1.33),\n # area_range=(0.1, 1.0),\n # max_padded_size_ratio=(2.0, 2.0),\n # overlap_thresh=0.3,\n\n # pmi_ukraine\n # min_object_covered=0.4,\n # aspect_ratio_range=(0.60, 0.90),\n \"\"\"Randomly crops and pads the image.\n\n Given an input image and its bounding boxes, this op first randomly crops\n the image and then randomly pads the image with background values. Parameters\n min_padded_size_ratio and max_padded_size_ratio, determine the range of the\n final output image size. Specifically, the final image size will have a size\n in the range of min_padded_size_ratio * tf.shape(image) and\n max_padded_size_ratio * tf.shape(image). Note that these ratios are with\n respect to the size of the original image, so we can't capture the same\n effect easily by independently applying RandomCropImage\n followed by RandomPadImage.\n\n Args:\n image: rank 3 float32 tensor containing 1 image -> [height, width, channels]\n with pixel values varying between [0, 1].\n boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4].\n Boxes are in normalized form meaning their coordinates vary\n between [0, 1].\n Each row is in the form of [ymin, xmin, ymax, xmax].\n labels: rank 1 int32 tensor containing the object classes.\n label_scores: rank 1 float32 containing the label scores.\n min_object_covered: the cropped image must cover at least this fraction of\n at least one of the input bounding boxes.\n aspect_ratio_range: allowed range for aspect ratio of cropped image.\n area_range: allowed range for area ratio between cropped image and the\n original image.\n overlap_thresh: minimum overlap thresh with new cropped\n image to keep the box.\n random_coef: a random coefficient that defines the chance of getting the\n original image. If random_coef is 0, we will always get the\n cropped image, and if it is 1.0, we will always get the\n original image.\n min_padded_size_ratio: min ratio of padded image height and width to the\n input image's height and width.\n max_padded_size_ratio: max ratio of padded image height and width to the\n input image's height and width.\n pad_color: padding color. A rank 1 tensor of [3] with dtype=tf.float32.\n if set as None, it will be set to average color of the randomly\n cropped image.\n seed: random seed.\n\n Returns:\n padded_image: padded image.\n padded_boxes: boxes which is the same rank as input boxes. Boxes are in\n normalized form.\n cropped_labels: cropped labels.\n if label_scores is not None also returns:\n cropped_label_scores: cropped label scores.\n \"\"\"\n # np.random.seed(123) # crop and pad\n # np.random.seed(1234) # crop\n # np.random.seed(12345) # none, orig \n # np.random.seed(1) # pad\n\n rand = np.random.random_sample()\n\n # rand = 0.95\n\n crop, pad = False, False\n if rand < 0.70:\n \tcrop = True\n elif rand < 0.80:\n \tpad = True\n else: # rand < 0.90:\n \tcrop = True\n \tpad = True\n # else:\n # \t# return orig\n # \tpass\n # print(\"The random number generated is: \" + str(rand))\n # print(\"It will crop: \" + str(crop))\n # print(\"It will pad: \" + str(pad))\n\n image_size = tf.shape(image)\n image_height = image_size[0]\n image_width = image_size[1]\n\n if crop and pad:\n\t result = random_crop_image(\n\t image=image,\n\t boxes=boxes,\n\t labels=labels,\n\t label_scores=label_scores,\n\t min_object_covered=min_object_covered,\n\t aspect_ratio_range=aspect_ratio_range,\n\t area_range=area_range,\n\t overlap_thresh=overlap_thresh,\n\t random_coef=random_coef,\n\t seed=seed)\n\t cropped_image, cropped_boxes, cropped_labels = result[:3]\n\n\t min_image_size = tf.to_int32(\n\t tf.to_float(tf.stack([image_height, image_width])) *\n\t min_padded_size_ratio)\n\t max_image_size = tf.to_int32(\n\t tf.to_float(tf.stack([image_height, image_width])) *\n\t max_padded_size_ratio)\n\t padded_image, padded_boxes = random_pad_image(\n\t cropped_image,\n\t cropped_boxes,\n\t min_image_size=min_image_size,\n\t max_image_size=max_image_size,\n\t pad_color=pad_color,\n\t seed=seed)\n\n\t cropped_padded_output = (padded_image, padded_boxes, cropped_labels)\n\n\t if label_scores is not None:\n\t cropped_label_scores = result[3]\n\t cropped_padded_output += (cropped_label_scores,)\n\n elif crop: \n\t result = random_crop_image(\n\t image=image,\n\t boxes=boxes,\n\t labels=labels,\n\t label_scores=label_scores,\n\t min_object_covered=min_object_covered,\n\t aspect_ratio_range=aspect_ratio_range,\n\t area_range=area_range,\n\t overlap_thresh=overlap_thresh,\n\t random_coef=random_coef,\n\t seed=seed)\n\t cropped_image, cropped_boxes, cropped_labels = result[:3]\n\n\t cropped_padded_output = (cropped_image, cropped_boxes, cropped_labels)\n\t if label_scores is not None:\n\t cropped_label_scores = result[3]\n\t cropped_padded_output += (cropped_label_scores,)\n\n elif pad:\n\t min_image_size = tf.to_int32(\n\t tf.to_float(tf.stack([image_height, image_width])) *\n\t min_padded_size_ratio)\n\t max_image_size = tf.to_int32(\n\t tf.to_float(tf.stack([image_height, image_width])) *\n\t max_padded_size_ratio)\n\t padded_image, padded_boxes = random_pad_image(\n\t image,\n\t boxes,\n\t min_image_size=min_image_size,\n\t max_image_size=max_image_size,\n\t pad_color=pad_color,\n\t seed=seed)\n\n\t cropped_padded_output = (padded_image, padded_boxes, labels)\n\t if label_scores is not None:\n\t cropped_padded_output += (label_scores,)\n else:\n # image = tf.expand_dims(image, 0)\n # boxes = tf.expand_dims(boxes, 0)\n # image = tf.image.draw_bounding_boxes(image, boxes, fill=True)\n # image = tf.squeeze(image)\n # boxes = tf.squeeze(boxes)\n cropped_padded_output = (image, boxes, labels)\n\n return cropped_padded_output\n\n\ndef random_crop_to_aspect_ratio(image,\n boxes,\n labels,\n label_scores=None,\n masks=None,\n keypoints=None,\n aspect_ratio=1.0,\n overlap_thresh=0.3,\n seed=None):\n \"\"\"Randomly crops an image to the specified aspect ratio.\n\n Randomly crops the a portion of the image such that the crop is of the\n specified aspect ratio, and the crop is as large as possible. If the specified\n aspect ratio is larger than the aspect ratio of the image, this op will\n randomly remove rows from the top and bottom of the image. If the specified\n aspect ratio is less than the aspect ratio of the image, this op will randomly\n remove cols from the left and right of the image. If the specified aspect\n ratio is the same as the aspect ratio of the image, this op will return the\n image.\n\n Args:\n image: rank 3 float32 tensor contains 1 image -> [height, width, channels]\n with pixel values varying between [0, 1].\n boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4].\n Boxes are in normalized form meaning their coordinates vary\n between [0, 1].\n Each row is in the form of [ymin, xmin, ymax, xmax].\n labels: rank 1 int32 tensor containing the object classes.\n label_scores: (optional) float32 tensor of shape [num_instances]\n representing the score for each box.\n masks: (optional) rank 3 float32 tensor with shape\n [num_instances, height, width] containing instance masks. The masks\n are of the same height, width as the input `image`.\n keypoints: (optional) rank 3 float32 tensor with shape\n [num_instances, num_keypoints, 2]. The keypoints are in y-x\n normalized coordinates.\n aspect_ratio: the aspect ratio of cropped image.\n overlap_thresh: minimum overlap thresh with new cropped\n image to keep the box.\n seed: random seed.\n\n Returns:\n image: image which is the same rank as input image.\n boxes: boxes which is the same rank as input boxes.\n Boxes are in normalized form.\n labels: new labels.\n\n If label_scores, masks, or keypoints is not None, the function also returns:\n label_scores: new label scores.\n masks: rank 3 float32 tensor with shape [num_instances, height, width]\n containing instance masks.\n keypoints: rank 3 float32 tensor with shape\n [num_instances, num_keypoints, 2]\n\n Raises:\n ValueError: If image is not a 3D tensor.\n \"\"\"\n if len(image.get_shape()) != 3:\n raise ValueError('Image should be 3D tensor')\n\n with tf.name_scope('RandomCropToAspectRatio', values=[image]):\n image_shape = tf.shape(image)\n orig_height = image_shape[0]\n orig_width = image_shape[1]\n orig_aspect_ratio = tf.to_float(orig_width) / tf.to_float(orig_height)\n new_aspect_ratio = tf.constant(aspect_ratio, dtype=tf.float32)\n def target_height_fn():\n return tf.to_int32(tf.round(tf.to_float(orig_width) / new_aspect_ratio))\n\n target_height = tf.cond(orig_aspect_ratio >= new_aspect_ratio,\n lambda: orig_height, target_height_fn)\n\n def target_width_fn():\n return tf.to_int32(tf.round(tf.to_float(orig_height) * new_aspect_ratio))\n\n target_width = tf.cond(orig_aspect_ratio <= new_aspect_ratio,\n lambda: orig_width, target_width_fn)\n\n # either offset_height = 0 and offset_width is randomly chosen from\n # [0, offset_width - target_width), or else offset_width = 0 and\n # offset_height is randomly chosen from [0, offset_height - target_height)\n offset_height = _random_integer(0, orig_height - target_height + 1, seed)\n offset_width = _random_integer(0, orig_width - target_width + 1, seed)\n new_image = tf.image.crop_to_bounding_box(\n image, offset_height, offset_width, target_height, target_width)\n\n im_box = tf.stack([\n tf.to_float(offset_height) / tf.to_float(orig_height),\n tf.to_float(offset_width) / tf.to_float(orig_width),\n tf.to_float(offset_height + target_height) / tf.to_float(orig_height),\n tf.to_float(offset_width + target_width) / tf.to_float(orig_width)\n ])\n\n boxlist = box_list.BoxList(boxes)\n boxlist.add_field('labels', labels)\n\n if label_scores is not None:\n boxlist.add_field('label_scores', label_scores)\n\n im_boxlist = box_list.BoxList(tf.expand_dims(im_box, 0))\n\n # remove boxes whose overlap with the image is less than overlap_thresh\n overlapping_boxlist, keep_ids = box_list_ops.prune_non_overlapping_boxes(\n boxlist, im_boxlist, overlap_thresh)\n\n # change the coordinate of the remaining boxes\n new_labels = overlapping_boxlist.get_field('labels')\n new_boxlist = box_list_ops.change_coordinate_frame(overlapping_boxlist,\n im_box)\n new_boxlist = box_list_ops.clip_to_window(new_boxlist,\n tf.constant([0.0, 0.0, 1.0, 1.0],\n tf.float32))\n new_boxes = new_boxlist.get()\n\n result = [new_image, new_boxes, new_labels]\n\n if label_scores is not None:\n new_label_scores = overlapping_boxlist.get_field('label_scores')\n result.append(new_label_scores)\n\n if masks is not None:\n masks_inside_window = tf.gather(masks, keep_ids)\n masks_box_begin = tf.stack([0, offset_height, offset_width])\n masks_box_size = tf.stack([-1, target_height, target_width])\n new_masks = tf.slice(masks_inside_window, masks_box_begin, masks_box_size)\n result.append(new_masks)\n\n if keypoints is not None:\n keypoints_inside_window = tf.gather(keypoints, keep_ids)\n new_keypoints = keypoint_ops.change_coordinate_frame(\n keypoints_inside_window, im_box)\n new_keypoints = keypoint_ops.prune_outside_window(new_keypoints,\n [0.0, 0.0, 1.0, 1.0])\n result.append(new_keypoints)\n\n return tuple(result)\n\n\ndef random_pad_to_aspect_ratio(image,\n boxes,\n masks=None,\n keypoints=None,\n aspect_ratio=1.0,\n min_padded_size_ratio=(1.0, 1.0),\n max_padded_size_ratio=(2.0, 2.0),\n seed=None):\n # aspect_ratio=1.0,\n # aspect_ratio=800.0/1080.0,\n \"\"\"Randomly zero pads an image to the specified aspect ratio.\n\n Pads the image so that the resulting image will have the specified aspect\n ratio without scaling less than the min_padded_size_ratio or more than the\n max_padded_size_ratio. If the min_padded_size_ratio or max_padded_size_ratio\n is lower than what is possible to maintain the aspect ratio, then this method\n will use the least padding to achieve the specified aspect ratio.\n\n Args:\n image: rank 3 float32 tensor contains 1 image -> [height, width, channels]\n with pixel values varying between [0, 1].\n boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4].\n Boxes are in normalized form meaning their coordinates vary\n between [0, 1].\n Each row is in the form of [ymin, xmin, ymax, xmax].\n masks: (optional) rank 3 float32 tensor with shape\n [num_instances, height, width] containing instance masks. The masks\n are of the same height, width as the input `image`.\n keypoints: (optional) rank 3 float32 tensor with shape\n [num_instances, num_keypoints, 2]. The keypoints are in y-x\n normalized coordinates.\n aspect_ratio: aspect ratio of the final image.\n min_padded_size_ratio: min ratio of padded image height and width to the\n input image's height and width.\n max_padded_size_ratio: max ratio of padded image height and width to the\n input image's height and width.\n seed: random seed.\n\n Returns:\n image: image which is the same rank as input image.\n boxes: boxes which is the same rank as input boxes.\n Boxes are in normalized form.\n labels: new labels.\n\n If label_scores, masks, or keypoints is not None, the function also returns:\n label_scores: new label scores.\n masks: rank 3 float32 tensor with shape [num_instances, height, width]\n containing instance masks.\n keypoints: rank 3 float32 tensor with shape\n [num_instances, num_keypoints, 2]\n\n Raises:\n ValueError: If image is not a 3D tensor.\n \"\"\"\n if len(image.get_shape()) != 3:\n raise ValueError('Image should be 3D tensor')\n\n with tf.name_scope('RandomPadToAspectRatio', values=[image]):\n image_shape = tf.shape(image)\n image_height = tf.to_float(image_shape[0])\n image_width = tf.to_float(image_shape[1])\n image_aspect_ratio = image_width / image_height\n new_aspect_ratio = tf.constant(aspect_ratio, dtype=tf.float32)\n target_height = tf.cond(\n image_aspect_ratio <= new_aspect_ratio,\n lambda: image_height,\n lambda: image_width / new_aspect_ratio)\n target_width = tf.cond(\n image_aspect_ratio >= new_aspect_ratio,\n lambda: image_width,\n lambda: image_height * new_aspect_ratio)\n\n min_height = tf.maximum(\n min_padded_size_ratio[0] * image_height, target_height)\n min_width = tf.maximum(\n min_padded_size_ratio[1] * image_width, target_width)\n max_height = tf.maximum(\n max_padded_size_ratio[0] * image_height, target_height)\n max_width = tf.maximum(\n max_padded_size_ratio[1] * image_width, target_width)\n\n min_scale = tf.maximum(min_height / target_height, min_width / target_width)\n max_scale = tf.minimum(max_height / target_height, max_width / target_width)\n scale = tf.random_uniform([], min_scale, max_scale, seed=seed)\n\n target_height = scale * target_height\n target_width = scale * target_width\n\n new_image = tf.image.pad_to_bounding_box(\n image, 0, 0, tf.to_int32(target_height), tf.to_int32(target_width))\n\n im_box = tf.stack([\n 0.0,\n 0.0,\n target_height / image_height,\n target_width / image_width\n ])\n boxlist = box_list.BoxList(boxes)\n new_boxlist = box_list_ops.change_coordinate_frame(boxlist, im_box)\n new_boxes = new_boxlist.get()\n\n result = [new_image, new_boxes]\n\n if masks is not None:\n new_masks = tf.expand_dims(masks, -1)\n new_masks = tf.image.pad_to_bounding_box(new_masks, 0, 0,\n tf.to_int32(target_height),\n tf.to_int32(target_width))\n new_masks = tf.squeeze(new_masks, [-1])\n result.append(new_masks)\n\n if keypoints is not None:\n new_keypoints = keypoint_ops.change_coordinate_frame(keypoints, im_box)\n result.append(new_keypoints)\n\n return tuple(result)\n\n\ndef random_black_patches(image,\n max_black_patches=10,\n probability=0.5,\n size_to_image_ratio=0.1,\n random_seed=None):\n \"\"\"Randomly adds some black patches to the image.\n\n This op adds up to max_black_patches square black patches of a fixed size\n to the image where size is specified via the size_to_image_ratio parameter.\n\n Args:\n image: rank 3 float32 tensor containing 1 image -> [height, width, channels]\n with pixel values varying between [0, 1].\n max_black_patches: number of times that the function tries to add a\n black box to the image.\n probability: at each try, what is the chance of adding a box.\n size_to_image_ratio: Determines the ratio of the size of the black patches\n to the size of the image.\n box_size = size_to_image_ratio *\n min(image_width, image_height)\n random_seed: random seed.\n\n Returns:\n image\n \"\"\"\n def add_black_patch_to_image(image):\n \"\"\"Function for adding one patch to the image.\n\n Args:\n image: image\n\n Returns:\n image with a randomly added black box\n \"\"\"\n image_shape = tf.shape(image)\n image_height = image_shape[0]\n image_width = image_shape[1]\n box_size = tf.to_int32(\n tf.multiply(\n tf.minimum(tf.to_float(image_height), tf.to_float(image_width)),\n size_to_image_ratio))\n normalized_y_min = tf.random_uniform(\n [], minval=0.0, maxval=(1.0 - size_to_image_ratio), seed=random_seed)\n normalized_x_min = tf.random_uniform(\n [], minval=0.0, maxval=(1.0 - size_to_image_ratio), seed=random_seed)\n y_min = tf.to_int32(normalized_y_min * tf.to_float(image_height))\n x_min = tf.to_int32(normalized_x_min * tf.to_float(image_width))\n black_box = tf.ones([box_size, box_size, 3], dtype=tf.float32)\n mask = 1.0 - tf.image.pad_to_bounding_box(black_box, y_min, x_min,\n image_height, image_width)\n image = tf.multiply(image, mask)\n return image\n\n with tf.name_scope('RandomBlackPatchInImage', values=[image]):\n for _ in range(max_black_patches):\n random_prob = tf.random_uniform(\n [], minval=0.0, maxval=1.0, dtype=tf.float32, seed=random_seed)\n image = tf.cond(\n tf.greater(random_prob, probability), lambda: image,\n lambda: add_black_patch_to_image(image))\n\n return image\n\n\ndef image_to_float(image):\n \"\"\"Used in Faster R-CNN. Casts image pixel values to float.\n\n Args:\n image: input image which might be in tf.uint8 or sth else format\n\n Returns:\n image: image in tf.float32 format.\n \"\"\"\n with tf.name_scope('ImageToFloat', values=[image]):\n image = tf.to_float(image)\n return image\n\n\ndef random_resize_method(image, target_size):\n \"\"\"Uses a random resize method to resize the image to target size.\n\n Args:\n image: a rank 3 tensor.\n target_size: a list of [target_height, target_width]\n\n Returns:\n resized image.\n \"\"\"\n\n resized_image = _apply_with_random_selector(\n image,\n lambda x, method: tf.image.resize_images(x, target_size, method),\n num_cases=4)\n\n return resized_image\n\n\ndef _compute_new_static_size(image, min_dimension, max_dimension):\n \"\"\"Compute new static shape for resize_to_range method.\"\"\"\n image_shape = image.get_shape().as_list()\n orig_height = image_shape[0]\n orig_width = image_shape[1]\n orig_min_dim = min(orig_height, orig_width)\n # Calculates the larger of the possible sizes\n large_scale_factor = min_dimension / float(orig_min_dim)\n # Scaling orig_(height|width) by large_scale_factor will make the smaller\n # dimension equal to min_dimension, save for floating point rounding errors.\n # For reasonably-sized images, taking the nearest integer will reliably\n # eliminate this error.\n large_height = int(round(orig_height * large_scale_factor))\n large_width = int(round(orig_width * large_scale_factor))\n large_size = [large_height, large_width]\n if max_dimension:\n # Calculates the smaller of the possible sizes, use that if the larger\n # is too big.\n orig_max_dim = max(orig_height, orig_width)\n small_scale_factor = max_dimension / float(orig_max_dim)\n # Scaling orig_(height|width) by small_scale_factor will make the larger\n # dimension equal to max_dimension, save for floating point rounding\n # errors. For reasonably-sized images, taking the nearest integer will\n # reliably eliminate this error.\n small_height = int(round(orig_height * small_scale_factor))\n small_width = int(round(orig_width * small_scale_factor))\n small_size = [small_height, small_width]\n new_size = large_size\n if max(large_size) > max_dimension:\n new_size = small_size\n else:\n new_size = large_size\n return tf.constant(new_size)\n\n\ndef _compute_new_dynamic_size(image, min_dimension, max_dimension):\n \"\"\"Compute new dynamic shape for resize_to_range method.\"\"\"\n image_shape = tf.shape(image)\n orig_height = tf.to_float(image_shape[0])\n orig_width = tf.to_float(image_shape[1])\n orig_min_dim = tf.minimum(orig_height, orig_width)\n # Calculates the larger of the possible sizes\n min_dimension = tf.constant(min_dimension, dtype=tf.float32)\n large_scale_factor = min_dimension / orig_min_dim\n # Scaling orig_(height|width) by large_scale_factor will make the smaller\n # dimension equal to min_dimension, save for floating point rounding errors.\n # For reasonably-sized images, taking the nearest integer will reliably\n # eliminate this error.\n large_height = tf.to_int32(tf.round(orig_height * large_scale_factor))\n large_width = tf.to_int32(tf.round(orig_width * large_scale_factor))\n large_size = tf.stack([large_height, large_width])\n if max_dimension:\n # Calculates the smaller of the possible sizes, use that if the larger\n # is too big.\n orig_max_dim = tf.maximum(orig_height, orig_width)\n max_dimension = tf.constant(max_dimension, dtype=tf.float32)\n small_scale_factor = max_dimension / orig_max_dim\n # Scaling orig_(height|width) by small_scale_factor will make the larger\n # dimension equal to max_dimension, save for floating point rounding\n # errors. For reasonably-sized images, taking the nearest integer will\n # reliably eliminate this error.\n small_height = tf.to_int32(tf.round(orig_height * small_scale_factor))\n small_width = tf.to_int32(tf.round(orig_width * small_scale_factor))\n small_size = tf.stack([small_height, small_width])\n new_size = tf.cond(\n tf.to_float(tf.reduce_max(large_size)) > max_dimension,\n lambda: small_size, lambda: large_size)\n else:\n new_size = large_size\n return new_size\n\n\ndef resize_to_range(image,\n masks=None,\n min_dimension=None,\n max_dimension=None,\n method=tf.image.ResizeMethod.BILINEAR,\n align_corners=False):\n \"\"\"Resizes an image so its dimensions are within the provided value.\n\n The output size can be described by two cases:\n 1. If the image can be rescaled so its minimum dimension is equal to the\n provided value without the other dimension exceeding max_dimension,\n then do so.\n 2. Otherwise, resize so the largest dimension is equal to max_dimension.\n\n Args:\n image: A 3D tensor of shape [height, width, channels]\n masks: (optional) rank 3 float32 tensor with shape\n [num_instances, height, width] containing instance masks.\n min_dimension: (optional) (scalar) desired size of the smaller image\n dimension.\n max_dimension: (optional) (scalar) maximum allowed size\n of the larger image dimension.\n method: (optional) interpolation method used in resizing. Defaults to\n BILINEAR.\n align_corners: bool. If true, exactly align all 4 corners of the input\n and output. Defaults to False.\n\n Returns:\n A 3D tensor of shape [new_height, new_width, channels],\n where the image has been resized (with bilinear interpolation) so that\n min(new_height, new_width) == min_dimension or\n max(new_height, new_width) == max_dimension.\n\n If masks is not None, also outputs masks:\n A 3D tensor of shape [num_instances, new_height, new_width]\n\n Raises:\n ValueError: if the image is not a 3D tensor.\n \"\"\"\n if len(image.get_shape()) != 3:\n raise ValueError('Image should be 3D tensor')\n\n with tf.name_scope('ResizeToRange', values=[image, min_dimension]):\n if image.get_shape().is_fully_defined():\n new_size = _compute_new_static_size(image, min_dimension, max_dimension)\n else:\n new_size = _compute_new_dynamic_size(image, min_dimension, max_dimension)\n new_image = tf.image.resize_images(\n image, new_size, method=method, align_corners=align_corners)\n\n result = new_image\n if masks is not None:\n new_masks = tf.expand_dims(masks, 3)\n new_masks = tf.image.resize_nearest_neighbor(\n new_masks, new_size, align_corners=align_corners)\n new_masks = tf.squeeze(new_masks, 3)\n result = [new_image, new_masks]\n\n return result\n\n\n# TODO: Make sure the static shapes are preserved.\ndef resize_to_min_dimension(image, masks=None, min_dimension=600):\n \"\"\"Resizes image and masks given the min size maintaining the aspect ratio.\n\n If one of the image dimensions is smaller that min_dimension, it will scale\n the image such that its smallest dimension is equal to min_dimension.\n Otherwise, will keep the image size as is.\n\n Args:\n image: a tensor of size [height, width, channels].\n masks: (optional) a tensors of size [num_instances, height, width].\n min_dimension: minimum image dimension.\n\n Returns:\n a tuple containing the following:\n Resized image. A tensor of size [new_height, new_width, channels].\n (optional) Resized masks. A tensor of\n size [num_instances, new_height, new_width].\n\n Raises:\n ValueError: if the image is not a 3D tensor.\n \"\"\"\n if len(image.get_shape()) != 3:\n raise ValueError('Image should be 3D tensor')\n\n with tf.name_scope('ResizeGivenMinDimension', values=[image, min_dimension]):\n image_height = tf.shape(image)[0]\n image_width = tf.shape(image)[1]\n min_image_dimension = tf.minimum(image_height, image_width)\n min_target_dimension = tf.maximum(min_image_dimension, min_dimension)\n target_ratio = tf.to_float(min_target_dimension) / tf.to_float(\n min_image_dimension)\n target_height = tf.to_int32(tf.to_float(image_height) * target_ratio)\n target_width = tf.to_int32(tf.to_float(image_width) * target_ratio)\n image = tf.image.resize_bilinear(\n tf.expand_dims(image, axis=0),\n size=[target_height, target_width],\n align_corners=True)\n result = tf.squeeze(image, axis=0)\n if masks is not None:\n masks = tf.image.resize_nearest_neighbor(\n tf.expand_dims(masks, axis=3),\n size=[target_height, target_width],\n align_corners=True)\n result = (result, tf.squeeze(masks, axis=3))\n return result\n\n\ndef scale_boxes_to_pixel_coordinates(image, boxes, keypoints=None):\n \"\"\"Scales boxes from normalized to pixel coordinates.\n\n Args:\n image: A 3D float32 tensor of shape [height, width, channels].\n boxes: A 2D float32 tensor of shape [num_boxes, 4] containing the bounding\n boxes in normalized coordinates. Each row is of the form\n [ymin, xmin, ymax, xmax].\n keypoints: (optional) rank 3 float32 tensor with shape\n [num_instances, num_keypoints, 2]. The keypoints are in y-x normalized\n coordinates.\n\n Returns:\n image: unchanged input image.\n scaled_boxes: a 2D float32 tensor of shape [num_boxes, 4] containing the\n bounding boxes in pixel coordinates.\n scaled_keypoints: a 3D float32 tensor with shape\n [num_instances, num_keypoints, 2] containing the keypoints in pixel\n coordinates.\n \"\"\"\n boxlist = box_list.BoxList(boxes)\n image_height = tf.shape(image)[0]\n image_width = tf.shape(image)[1]\n scaled_boxes = box_list_ops.scale(boxlist, image_height, image_width).get()\n result = [image, scaled_boxes]\n if keypoints is not None:\n scaled_keypoints = keypoint_ops.scale(keypoints, image_height, image_width)\n result.append(scaled_keypoints)\n return tuple(result)\n\n\n# pylint: disable=g-doc-return-or-yield\ndef resize_image(image,\n masks=None,\n new_height=600,\n new_width=1024,\n method=tf.image.ResizeMethod.BILINEAR,\n align_corners=False):\n \"\"\"See `tf.image.resize_images` for detailed doc.\"\"\"\n with tf.name_scope(\n 'ResizeImage',\n values=[image, new_height, new_width, method, align_corners]):\n new_image = tf.image.resize_images(\n image, [new_height, new_width],\n method=method,\n align_corners=align_corners)\n result = new_image\n if masks is not None:\n num_instances = tf.shape(masks)[0]\n new_size = tf.constant([new_height, new_width], dtype=tf.int32)\n def resize_masks_branch():\n new_masks = tf.expand_dims(masks, 3)\n new_masks = tf.image.resize_nearest_neighbor(\n new_masks, new_size, align_corners=align_corners)\n new_masks = tf.squeeze(new_masks, axis=3)\n return new_masks\n\n def reshape_masks_branch():\n new_masks = tf.reshape(masks, [0, new_size[0], new_size[1]])\n return new_masks\n\n masks = tf.cond(num_instances > 0, resize_masks_branch,\n reshape_masks_branch)\n result = [new_image, masks]\n\n return result\n\n\ndef subtract_channel_mean(image, means=None):\n \"\"\"Normalizes an image by subtracting a mean from each channel.\n\n Args:\n image: A 3D tensor of shape [height, width, channels]\n means: float list containing a mean for each channel\n Returns:\n normalized_images: a tensor of shape [height, width, channels]\n Raises:\n ValueError: if images is not a 4D tensor or if the number of means is not\n equal to the number of channels.\n \"\"\"\n with tf.name_scope('SubtractChannelMean', values=[image, means]):\n if len(image.get_shape()) != 3:\n raise ValueError('Input must be of size [height, width, channels]')\n if len(means) != image.get_shape()[-1]:\n raise ValueError('len(means) must match the number of channels')\n return image - [[means]]\n\n\ndef one_hot_encoding(labels, num_classes=None):\n \"\"\"One-hot encodes the multiclass labels.\n\n Example usage:\n labels = tf.constant([1, 4], dtype=tf.int32)\n one_hot = OneHotEncoding(labels, num_classes=5)\n one_hot.eval() # evaluates to [0, 1, 0, 0, 1]\n\n Args:\n labels: A tensor of shape [None] corresponding to the labels.\n num_classes: Number of classes in the dataset.\n Returns:\n onehot_labels: a tensor of shape [num_classes] corresponding to the one hot\n encoding of the labels.\n Raises:\n ValueError: if num_classes is not specified.\n \"\"\"\n with tf.name_scope('OneHotEncoding', values=[labels]):\n if num_classes is None:\n raise ValueError('num_classes must be specified')\n\n labels = tf.one_hot(labels, num_classes, 1, 0)\n return tf.reduce_max(labels, 0)\n\n\ndef rgb_to_gray(image):\n \"\"\"Converts a 3 channel RGB image to a 1 channel grayscale image.\n\n Args:\n image: Rank 3 float32 tensor containing 1 image -> [height, width, 3]\n with pixel values varying between [0, 1].\n\n Returns:\n image: A single channel grayscale image -> [image, height, 1].\n \"\"\"\n return tf.image.rgb_to_grayscale(image)\n\n\ndef ssd_random_crop(image,\n boxes,\n labels,\n label_scores=None,\n masks=None,\n keypoints=None,\n min_object_covered=(0.0, 0.1, 0.3, 0.5, 0.7, 0.9, 1.0),\n aspect_ratio_range=((0.5, 2.0),) * 7,\n #aspect_ratio_range=((0.75, 1.25),) * 7,\n area_range=((0.1, 1.0),) * 7,\n overlap_thresh=(0.0, 0.1, 0.3, 0.5, 0.7, 0.9, 1.0),\n #overlap_thresh=(0.0, 0.75, 0.8, 0.8, 0.85, 0.95, 1.0),\n random_coef=(0.15,) * 7,\n seed=None):\n \"\"\"Random crop preprocessing with default parameters as in SSD paper.\n\n Liu et al., SSD: Single shot multibox detector.\n For further information on random crop preprocessing refer to RandomCrop\n function above.\n\n Args:\n image: rank 3 float32 tensor contains 1 image -> [height, width, channels]\n with pixel values varying between [0, 1].\n boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4].\n Boxes are in normalized form meaning their coordinates vary\n between [0, 1].\n Each row is in the form of [ymin, xmin, ymax, xmax].\n labels: rank 1 int32 tensor containing the object classes.\n label_scores: rank 1 float32 tensor containing the scores.\n masks: (optional) rank 3 float32 tensor with shape\n [num_instances, height, width] containing instance masks. The masks\n are of the same height, width as the input `image`.\n keypoints: (optional) rank 3 float32 tensor with shape\n [num_instances, num_keypoints, 2]. The keypoints are in y-x\n normalized coordinates.\n min_object_covered: the cropped image must cover at least this fraction of\n at least one of the input bounding boxes.\n aspect_ratio_range: allowed range for aspect ratio of cropped image.\n area_range: allowed range for area ratio between cropped image and the\n original image.\n overlap_thresh: minimum overlap thresh with new cropped\n image to keep the box.\n random_coef: a random coefficient that defines the chance of getting the\n original image. If random_coef is 0, we will always get the\n cropped image, and if it is 1.0, we will always get the\n original image.\n seed: random seed.\n\n Returns:\n image: image which is the same rank as input image.\n boxes: boxes which is the same rank as input boxes.\n Boxes are in normalized form.\n labels: new labels.\n\n If label_scores, masks, or keypoints is not None, the function also returns:\n label_scores: new label scores.\n masks: rank 3 float32 tensor with shape [num_instances, height, width]\n containing instance masks.\n keypoints: rank 3 float32 tensor with shape\n [num_instances, num_keypoints, 2]\n \"\"\"\n\n def random_crop_selector(selected_result, index):\n \"\"\"Applies random_crop_image to selected result.\n\n Args:\n selected_result: A tuple containing image, boxes, labels, keypoints (if\n not None), and masks (if not None).\n index: The index that was randomly selected.\n\n Returns: A tuple containing image, boxes, labels, keypoints (if not None),\n and masks (if not None).\n \"\"\"\n i = 3\n image, boxes, labels = selected_result[:i]\n selected_label_scores = None\n selected_masks = None\n selected_keypoints = None\n if label_scores is not None:\n selected_label_scores = selected_result[i]\n i += 1\n if masks is not None:\n selected_masks = selected_result[i]\n i += 1\n if keypoints is not None:\n selected_keypoints = selected_result[i]\n\n return random_crop_image(\n image=image,\n boxes=boxes,\n labels=labels,\n label_scores=selected_label_scores,\n masks=selected_masks,\n keypoints=selected_keypoints,\n min_object_covered=min_object_covered[index],\n aspect_ratio_range=aspect_ratio_range[index],\n area_range=area_range[index],\n overlap_thresh=overlap_thresh[index],\n random_coef=random_coef[index],\n seed=seed)\n\n result = _apply_with_random_selector_tuples(\n tuple(\n t for t in (image, boxes, labels, label_scores, masks, keypoints)\n if t is not None),\n random_crop_selector,\n num_cases=len(min_object_covered))\n return result\n\n\ndef ssd_random_crop_pad(image,\n boxes,\n labels,\n label_scores=None,\n min_object_covered=(0.1, 0.3, 0.5, 0.7, 0.9, 1.0),\n aspect_ratio_range=((0.5, 2.0),) * 6,\n #aspect_ratio_range=((0.75, 1.25),) * 6,\n area_range=((0.1, 1.0),) * 6,\n overlap_thresh=(0.1, 0.3, 0.5, 0.7, 0.9, 1.0),\n #overlap_thresh=(0.7, 0.75, 0.8, 0.8, 0.85, 0.95, 1.0),\n random_coef=(0.15,) * 6,\n min_padded_size_ratio=((1.0, 1.0),) * 6,\n max_padded_size_ratio=((2.0, 2.0),) * 6,\n pad_color=(None,) * 6,\n seed=None):\n \"\"\"Random crop preprocessing with default parameters as in SSD paper.\n\n Liu et al., SSD: Single shot multibox detector.\n For further information on random crop preprocessing refer to RandomCrop\n function above.\n\n Args:\n image: rank 3 float32 tensor containing 1 image -> [height, width, channels]\n with pixel values varying between [0, 1].\n boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4].\n Boxes are in normalized form meaning their coordinates vary\n between [0, 1].\n Each row is in the form of [ymin, xmin, ymax, xmax].\n labels: rank 1 int32 tensor containing the object classes.\n label_scores: float32 tensor of shape [num_instances] representing the\n score for each box.\n min_object_covered: the cropped image must cover at least this fraction of\n at least one of the input bounding boxes.\n aspect_ratio_range: allowed range for aspect ratio of cropped image.\n area_range: allowed range for area ratio between cropped image and the\n original image.\n overlap_thresh: minimum overlap thresh with new cropped\n image to keep the box.\n random_coef: a random coefficient that defines the chance of getting the\n original image. If random_coef is 0, we will always get the\n cropped image, and if it is 1.0, we will always get the\n original image.\n min_padded_size_ratio: min ratio of padded image height and width to the\n input image's height and width.\n max_padded_size_ratio: max ratio of padded image height and width to the\n input image's height and width.\n pad_color: padding color. A rank 1 tensor of [3] with dtype=tf.float32.\n if set as None, it will be set to average color of the randomly\n cropped image.\n seed: random seed.\n\n Returns:\n image: Image shape will be [new_height, new_width, channels].\n boxes: boxes which is the same rank as input boxes. Boxes are in normalized\n form.\n new_labels: new labels.\n new_label_scores: new label scores.\n \"\"\"\n\n def random_crop_pad_selector(image_boxes_labels, index):\n i = 3\n image, boxes, labels = image_boxes_labels[:i]\n selected_label_scores = None\n if label_scores is not None:\n selected_label_scores = image_boxes_labels[i]\n\n return random_crop_pad_image(\n image,\n boxes,\n labels,\n selected_label_scores,\n min_object_covered=min_object_covered[index],\n aspect_ratio_range=aspect_ratio_range[index],\n area_range=area_range[index],\n overlap_thresh=overlap_thresh[index],\n random_coef=random_coef[index],\n min_padded_size_ratio=min_padded_size_ratio[index],\n max_padded_size_ratio=max_padded_size_ratio[index],\n pad_color=pad_color[index],\n seed=seed)\n\n return _apply_with_random_selector_tuples(\n tuple(t for t in (image, boxes, labels, label_scores) if t is not None),\n random_crop_pad_selector,\n num_cases=len(min_object_covered))\n\n\ndef ssd_random_crop_fixed_aspect_ratio(\n image,\n boxes,\n labels,\n label_scores=None,\n masks=None,\n keypoints=None,\n min_object_covered=(0.0, 0.1, 0.3, 0.5, 0.7, 0.9, 1.0),\n aspect_ratio=1.0,\n area_range=((0.1, 1.0),) * 7,\n overlap_thresh=(0.0, 0.1, 0.3, 0.5, 0.7, 0.9, 1.0),\n random_coef=(0.15,) * 7,\n seed=None):\n \"\"\"Random crop preprocessing with default parameters as in SSD paper.\n\n Liu et al., SSD: Single shot multibox detector.\n For further information on random crop preprocessing refer to RandomCrop\n function above.\n\n The only difference is that the aspect ratio of the crops are fixed.\n\n Args:\n image: rank 3 float32 tensor contains 1 image -> [height, width, channels]\n with pixel values varying between [0, 1].\n boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4].\n Boxes are in normalized form meaning their coordinates vary\n between [0, 1].\n Each row is in the form of [ymin, xmin, ymax, xmax].\n labels: rank 1 int32 tensor containing the object classes.\n label_scores: (optional) float32 tensor of shape [num_instances]\n representing the score for each box.\n masks: (optional) rank 3 float32 tensor with shape\n [num_instances, height, width] containing instance masks. The masks\n are of the same height, width as the input `image`.\n keypoints: (optional) rank 3 float32 tensor with shape\n [num_instances, num_keypoints, 2]. The keypoints are in y-x\n normalized coordinates.\n min_object_covered: the cropped image must cover at least this fraction of\n at least one of the input bounding boxes.\n aspect_ratio: aspect ratio of the cropped image.\n area_range: allowed range for area ratio between cropped image and the\n original image.\n overlap_thresh: minimum overlap thresh with new cropped\n image to keep the box.\n random_coef: a random coefficient that defines the chance of getting the\n original image. If random_coef is 0, we will always get the\n cropped image, and if it is 1.0, we will always get the\n original image.\n seed: random seed.\n\n Returns:\n image: image which is the same rank as input image.\n boxes: boxes which is the same rank as input boxes.\n Boxes are in normalized form.\n labels: new labels.\n\n If masks or keypoints is not None, the function also returns:\n\n masks: rank 3 float32 tensor with shape [num_instances, height, width]\n containing instance masks.\n keypoints: rank 3 float32 tensor with shape\n [num_instances, num_keypoints, 2]\n \"\"\"\n aspect_ratio_range = ((aspect_ratio, aspect_ratio),) * len(area_range)\n\n crop_result = ssd_random_crop(\n image, boxes, labels, label_scores, masks, keypoints, min_object_covered,\n aspect_ratio_range, area_range, overlap_thresh, random_coef, seed)\n i = 3\n new_image, new_boxes, new_labels = crop_result[:i]\n new_label_scores = None\n new_masks = None\n new_keypoints = None\n if label_scores is not None:\n new_label_scores = crop_result[i]\n i += 1\n if masks is not None:\n new_masks = crop_result[i]\n i += 1\n if keypoints is not None:\n new_keypoints = crop_result[i]\n result = random_crop_to_aspect_ratio(\n new_image,\n new_boxes,\n new_labels,\n new_label_scores,\n new_masks,\n new_keypoints,\n aspect_ratio=aspect_ratio,\n seed=seed)\n\n return result\n\n\ndef ssd_random_crop_pad_fixed_aspect_ratio(\n image,\n boxes,\n labels,\n label_scores=None,\n masks=None,\n keypoints=None,\n min_object_covered=(0.0, 0.1, 0.3, 0.5, 0.7, 0.9, 1.0),\n aspect_ratio=1.0,\n aspect_ratio_range=((0.5, 2.0),) * 7,\n area_range=((0.1, 1.0),) * 7,\n overlap_thresh=(0.0, 0.1, 0.3, 0.5, 0.7, 0.9, 1.0),\n random_coef=(0.15,) * 7,\n min_padded_size_ratio=(1.0, 1.0),\n max_padded_size_ratio=(2.0, 2.0),\n seed=None):\n \"\"\"Random crop and pad preprocessing with default parameters as in SSD paper.\n\n Liu et al., SSD: Single shot multibox detector.\n For further information on random crop preprocessing refer to RandomCrop\n function above.\n\n The only difference is that after the initial crop, images are zero-padded\n to a fixed aspect ratio instead of being resized to that aspect ratio.\n\n Args:\n image: rank 3 float32 tensor contains 1 image -> [height, width, channels]\n with pixel values varying between [0, 1].\n boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4].\n Boxes are in normalized form meaning their coordinates vary\n between [0, 1].\n Each row is in the form of [ymin, xmin, ymax, xmax].\n labels: rank 1 int32 tensor containing the object classes.\n label_scores: (optional) float32 tensor of shape [num_instances]\n representing the score for each box.\n masks: (optional) rank 3 float32 tensor with shape\n [num_instances, height, width] containing instance masks. The masks\n are of the same height, width as the input `image`.\n keypoints: (optional) rank 3 float32 tensor with shape\n [num_instances, num_keypoints, 2]. The keypoints are in y-x\n normalized coordinates.\n min_object_covered: the cropped image must cover at least this fraction of\n at least one of the input bounding boxes.\n aspect_ratio: the final aspect ratio to pad to.\n aspect_ratio_range: allowed range for aspect ratio of cropped image.\n area_range: allowed range for area ratio between cropped image and the\n original image.\n overlap_thresh: minimum overlap thresh with new cropped\n image to keep the box.\n random_coef: a random coefficient that defines the chance of getting the\n original image. If random_coef is 0, we will always get the\n cropped image, and if it is 1.0, we will always get the\n original image.\n min_padded_size_ratio: min ratio of padded image height and width to the\n input image's height and width.\n max_padded_size_ratio: max ratio of padded image height and width to the\n input image's height and width.\n seed: random seed.\n\n Returns:\n image: image which is the same rank as input image.\n boxes: boxes which is the same rank as input boxes.\n Boxes are in normalized form.\n labels: new labels.\n\n If masks or keypoints is not None, the function also returns:\n\n masks: rank 3 float32 tensor with shape [num_instances, height, width]\n containing instance masks.\n keypoints: rank 3 float32 tensor with shape\n [num_instances, num_keypoints, 2]\n \"\"\"\n crop_result = ssd_random_crop(\n image, boxes, labels, label_scores, masks, keypoints, min_object_covered,\n aspect_ratio_range, area_range, overlap_thresh, random_coef, seed)\n i = 3\n new_image, new_boxes, new_labels = crop_result[:i]\n new_label_scores = None\n new_masks = None\n new_keypoints = None\n if label_scores is not None:\n new_label_scores = crop_result[i]\n i += 1\n if masks is not None:\n new_masks = crop_result[i]\n i += 1\n if keypoints is not None:\n new_keypoints = crop_result[i]\n result = random_pad_to_aspect_ratio(\n new_image,\n new_boxes,\n new_masks,\n new_keypoints,\n aspect_ratio=aspect_ratio,\n min_padded_size_ratio=min_padded_size_ratio,\n max_padded_size_ratio=max_padded_size_ratio,\n seed=seed)\n\n result = list(result)\n if new_label_scores is not None:\n result.insert(2, new_label_scores)\n result.insert(2, new_labels)\n result = tuple(result)\n\n return result\n\n\ndef get_default_func_arg_map(include_label_scores=False,\n include_instance_masks=False,\n include_keypoints=False):\n \"\"\"Returns the default mapping from a preprocessor function to its args.\n\n Args:\n include_label_scores: If True, preprocessing functions will modify the\n label scores, too.\n include_instance_masks: If True, preprocessing functions will modify the\n instance masks, too.\n include_keypoints: If True, preprocessing functions will modify the\n keypoints, too.\n\n Returns:\n A map from preprocessing functions to the arguments they receive.\n \"\"\"\n groundtruth_label_scores = None\n if include_label_scores:\n groundtruth_label_scores = (fields.InputDataFields.groundtruth_label_scores)\n\n groundtruth_instance_masks = None\n if include_instance_masks:\n groundtruth_instance_masks = (\n fields.InputDataFields.groundtruth_instance_masks)\n\n groundtruth_keypoints = None\n if include_keypoints:\n groundtruth_keypoints = fields.InputDataFields.groundtruth_keypoints\n\n prep_func_arg_map = {\n normalize_image: (fields.InputDataFields.image,),\n random_horizontal_flip: (\n fields.InputDataFields.image,\n fields.InputDataFields.groundtruth_boxes,\n groundtruth_instance_masks,\n groundtruth_keypoints,),\n random_vertical_flip: (\n fields.InputDataFields.image,\n fields.InputDataFields.groundtruth_boxes,\n groundtruth_instance_masks,\n groundtruth_keypoints,),\n random_rotation90: (\n fields.InputDataFields.image,\n fields.InputDataFields.groundtruth_boxes,\n groundtruth_instance_masks,\n groundtruth_keypoints,),\n random_pixel_value_scale: (fields.InputDataFields.image,),\n random_image_scale: (\n fields.InputDataFields.image,\n groundtruth_instance_masks,),\n random_rgb_to_gray: (fields.InputDataFields.image,),\n random_adjust_brightness: (fields.InputDataFields.image,),\n random_adjust_contrast: (fields.InputDataFields.image,),\n random_adjust_hue: (fields.InputDataFields.image,),\n random_adjust_saturation: (fields.InputDataFields.image,),\n random_distort_color: (fields.InputDataFields.image,),\n random_jitter_boxes: (fields.InputDataFields.groundtruth_boxes,),\n random_crop_image: (\n fields.InputDataFields.image,\n fields.InputDataFields.groundtruth_boxes,\n fields.InputDataFields.groundtruth_classes,\n groundtruth_label_scores,\n groundtruth_instance_masks,\n groundtruth_keypoints,),\n random_pad_image: (fields.InputDataFields.image,\n fields.InputDataFields.groundtruth_boxes),\n random_crop_pad_image: (fields.InputDataFields.image,\n fields.InputDataFields.groundtruth_boxes,\n fields.InputDataFields.groundtruth_classes,\n groundtruth_label_scores),\n random_crop_to_aspect_ratio: (\n fields.InputDataFields.image,\n fields.InputDataFields.groundtruth_boxes,\n fields.InputDataFields.groundtruth_classes,\n groundtruth_label_scores,\n groundtruth_instance_masks,\n groundtruth_keypoints,),\n random_pad_to_aspect_ratio: (\n fields.InputDataFields.image,\n fields.InputDataFields.groundtruth_boxes,\n groundtruth_instance_masks,\n groundtruth_keypoints,),\n random_black_patches: (fields.InputDataFields.image,),\n retain_boxes_above_threshold: (\n fields.InputDataFields.groundtruth_boxes,\n fields.InputDataFields.groundtruth_classes,\n groundtruth_label_scores,\n groundtruth_instance_masks,\n groundtruth_keypoints,),\n image_to_float: (fields.InputDataFields.image,),\n random_resize_method: (fields.InputDataFields.image,),\n resize_to_range: (\n fields.InputDataFields.image,\n groundtruth_instance_masks,),\n resize_to_min_dimension: (\n fields.InputDataFields.image,\n groundtruth_instance_masks,),\n scale_boxes_to_pixel_coordinates: (\n fields.InputDataFields.image,\n fields.InputDataFields.groundtruth_boxes,\n groundtruth_keypoints,),\n resize_image: (\n fields.InputDataFields.image,\n groundtruth_instance_masks,),\n subtract_channel_mean: (fields.InputDataFields.image,),\n one_hot_encoding: (fields.InputDataFields.groundtruth_image_classes,),\n rgb_to_gray: (fields.InputDataFields.image,),\n ssd_random_crop: (\n fields.InputDataFields.image,\n fields.InputDataFields.groundtruth_boxes,\n fields.InputDataFields.groundtruth_classes,\n groundtruth_label_scores,\n groundtruth_instance_masks,\n groundtruth_keypoints,),\n ssd_random_crop_pad: (fields.InputDataFields.image,\n fields.InputDataFields.groundtruth_boxes,\n fields.InputDataFields.groundtruth_classes,\n groundtruth_label_scores),\n ssd_random_crop_fixed_aspect_ratio: (\n fields.InputDataFields.image,\n fields.InputDataFields.groundtruth_boxes,\n fields.InputDataFields.groundtruth_classes,\n groundtruth_label_scores,\n groundtruth_instance_masks,\n groundtruth_keypoints,),\n ssd_random_crop_pad_fixed_aspect_ratio: (\n fields.InputDataFields.image,\n fields.InputDataFields.groundtruth_boxes,\n fields.InputDataFields.groundtruth_classes,\n groundtruth_label_scores,\n groundtruth_instance_masks,\n groundtruth_keypoints,),\n }\n\n return prep_func_arg_map\n\n\ndef preprocess(tensor_dict, preprocess_options, func_arg_map=None):\n \"\"\"Preprocess images and bounding boxes.\n\n Various types of preprocessing (to be implemented) based on the\n preprocess_options dictionary e.g. \"crop image\" (affects image and possibly\n boxes), \"white balance image\" (affects only image), etc. If self._options\n is None, no preprocessing is done.\n\n Args:\n tensor_dict: dictionary that contains images, boxes, and can contain other\n things as well.\n images-> rank 4 float32 tensor contains\n 1 image -> [1, height, width, 3].\n with pixel values varying between [0, 1]\n boxes-> rank 2 float32 tensor containing\n the bounding boxes -> [N, 4].\n Boxes are in normalized form meaning\n their coordinates vary between [0, 1].\n Each row is in the form\n of [ymin, xmin, ymax, xmax].\n preprocess_options: It is a list of tuples, where each tuple contains a\n function and a dictionary that contains arguments and\n their values.\n func_arg_map: mapping from preprocessing functions to arguments that they\n expect to receive and return.\n\n Returns:\n tensor_dict: which contains the preprocessed images, bounding boxes, etc.\n\n Raises:\n ValueError: (a) If the functions passed to Preprocess\n are not in func_arg_map.\n (b) If the arguments that a function needs\n do not exist in tensor_dict.\n (c) If image in tensor_dict is not rank 4\n \"\"\"\n if func_arg_map is None:\n func_arg_map = get_default_func_arg_map()\n\n # changes the images to image (rank 4 to rank 3) since the functions\n # receive rank 3 tensor for image\n if fields.InputDataFields.image in tensor_dict:\n images = tensor_dict[fields.InputDataFields.image]\n if len(images.get_shape()) != 4:\n raise ValueError('images in tensor_dict should be rank 4')\n image = tf.squeeze(images, squeeze_dims=[0])\n tensor_dict[fields.InputDataFields.image] = image\n\n # Preprocess inputs based on preprocess_options\n for option in preprocess_options:\n func, params = option\n if func not in func_arg_map:\n raise ValueError('The function %s does not exist in func_arg_map' %\n (func.__name__))\n arg_names = func_arg_map[func]\n for a in arg_names:\n if a is not None and a not in tensor_dict:\n raise ValueError('The function %s requires argument %s' %\n (func.__name__, a))\n\n def get_arg(key):\n return tensor_dict[key] if key is not None else None\n\n args = [get_arg(a) for a in arg_names]\n results = func(*args, **params)\n if not isinstance(results, (list, tuple)):\n results = (results,)\n # Removes None args since the return values will not contain those.\n arg_names = [arg_name for arg_name in arg_names if arg_name is not None]\n for res, arg_name in zip(results, arg_names):\n tensor_dict[arg_name] = res\n\n # changes the image to images (rank 3 to rank 4) to be compatible to what\n # we received in the first place\n if fields.InputDataFields.image in tensor_dict:\n image = tensor_dict[fields.InputDataFields.image]\n images = tf.expand_dims(image, 0)\n tensor_dict[fields.InputDataFields.image] = images\n\n return tensor_dict\n" ]
[ [ "tensorflow.image.random_brightness", "tensorflow.reduce_max", "tensorflow.reshape", "tensorflow.round", "tensorflow.ones", "tensorflow.image.flip_left_right", "tensorflow.squeeze", "tensorflow.name_scope", "tensorflow.one_hot", "tensorflow.convert_to_tensor", "tensorflow.concat", "tensorflow.image.sample_distorted_bounding_box", "tensorflow.slice", "tensorflow.is_nan", "tensorflow.split", "tensorflow.image.flip_up_down", "tensorflow.minimum", "tensorflow.greater", "tensorflow.multiply", "tensorflow.image.draw_bounding_boxes", "tensorflow.clip_by_value", "tensorflow.image.random_hue", "tensorflow.constant", "tensorflow.transpose", "tensorflow.cond", "tensorflow.stack", "tensorflow.shape", "tensorflow.image.rot90", "tensorflow.subtract", "tensorflow.image.random_contrast", "tensorflow.image.crop_to_bounding_box", "tensorflow.to_float", "tensorflow.expand_dims", "tensorflow.image.resize_images", "tensorflow.random_uniform", "tensorflow.image.convert_image_dtype", "tensorflow.image.grayscale_to_rgb", "numpy.random.random_sample", "tensorflow.equal", "tensorflow.image.random_saturation", "tensorflow.image.pad_to_bounding_box", "tensorflow.add", "tensorflow.image.rgb_to_grayscale", "tensorflow.to_int32", "tensorflow.python.ops.control_flow_ops.merge", "tensorflow.image.resize_nearest_neighbor", "tensorflow.gather", "tensorflow.maximum" ] ]
metee1996/ds-example-project
[ "8d43b8786711a69779adb7fd6a830fe63fe30909" ]
[ "src/python/project/model.py" ]
[ "import numpy as np\n\ndef power(x):\n return np.power(x, 2)\n" ]
[ [ "numpy.power" ] ]
ohtu-projekti-dataproblemsemulator/dataproblemsemulator
[ "b24eac686fae4147264c1ccc8169fd96b1875577" ]
[ "examples/run_time_series_prediction_example.py" ]
[ "# MIT License\n#\n# Copyright (c) 2019 Tuomas Halvari, Juha Harviainen, Juha Mylläri, Antti Röyskö, Juuso Silvennoinen\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nimport random as rn\nimport sys\nfrom math import sqrt\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport tensorflow as tf\nfrom keras import backend\nfrom keras.layers import Dense\nfrom keras.layers import LSTM\nfrom keras.models import Sequential\nfrom sklearn.metrics import mean_squared_error\nfrom sklearn.preprocessing import MinMaxScaler\n\nfrom dpemu import pg_utils\nfrom dpemu import runner\nfrom dpemu.filters.time_series import Gap\nfrom dpemu.nodes import Array\nfrom dpemu.plotting_utils import print_results_by_model, visualize_scores, visualize_time_series_prediction\n\n\ndef get_data(argv):\n dataset_name = argv[1]\n n_data = int(argv[2])\n if dataset_name == \"passengers\":\n data = pd.read_csv(\"data/passengers.csv\", header=0, usecols=[\"passengers\"])[:n_data].values.astype(float)\n n_period = 12\n else:\n data = pd.read_csv(\"data/temperature.csv\", header=0, usecols=[dataset_name])[:n_data].values.astype(float)\n n_period = 24\n\n data = data[~np.isnan(data)]\n n_data = len(data)\n n_test = int(n_data * .2)\n return data[:-n_test], data[-n_test:], n_data, n_period, dataset_name\n\n\ndef get_err_root_node():\n err_root_node = Array()\n # err_root_node.addfilter(GaussianNoise(\"mean\", \"std\"))\n # err_root_node.addfilter(SensorDrift(\"magnitude\"))\n err_root_node.addfilter(Gap(\"prob_break\", \"prob_recover\", \"missing_value\"))\n return err_root_node\n\n\ndef get_err_params_list():\n # err_params_list = [{\"mean\": 0, \"std\": std} for std in np.linspace(0, 35, 8)]\n # err_params_list = [{\"magnitude\": m} for m in range(8)]\n err_params_list = [{\"prob_break\": p, \"prob_recover\": .5, \"missing_value\": np.nan} for p in np.linspace(0, .14, 8)]\n return err_params_list\n\n\nclass Preprocessor:\n def run(self, train_data, test_data, params):\n return train_data, test_data, {}\n\n\nclass LSTMModel:\n\n def __init__(self):\n seed = 42\n rn.seed(seed)\n np.random.seed(seed)\n tf.set_random_seed(seed)\n conf = tf.ConfigProto(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1)\n session = tf.Session(graph=tf.get_default_graph(), config=conf)\n backend.set_session(session)\n\n @staticmethod\n def __get_periodic_diffs(data, n_period):\n return np.array([data[i] - data[i - n_period] for i in range(n_period, len(data))])\n\n @staticmethod\n def __get_rmse(test_pred, test):\n return sqrt(mean_squared_error(test_pred, test))\n\n def run(self, train_data, _, params):\n n_period = params[\"n_period\"]\n clean_test = params[\"clean_test\"]\n n_test = clean_test.shape[0]\n train_data = train_data[~np.isnan(train_data)]\n train_data = np.reshape(train_data, (len(train_data), 1))\n\n n_features = 1\n n_steps = 3 * n_period\n n_nodes = 100\n n_epochs = 200\n\n scaler = MinMaxScaler(feature_range=(0, 1))\n scaled_train = scaler.fit_transform(train_data)\n train_periodic_diffs = self.__get_periodic_diffs(scaled_train, n_period)\n train_periodic_diffs = pg_utils.to_time_series_x_y(train_periodic_diffs, n_steps)\n\n model = Sequential()\n model.add(LSTM(n_nodes, activation=\"relu\", input_shape=(n_steps, n_features)))\n model.add(Dense(n_nodes, activation=\"relu\"))\n model.add(Dense(1))\n model.compile(loss=\"mse\", optimizer=\"adam\")\n model.fit(train_periodic_diffs[0], train_periodic_diffs[1], epochs=n_epochs)\n\n train_with_test_pred = scaled_train\n for _ in range(n_test):\n x_cur = self.__get_periodic_diffs(train_with_test_pred, n_period)[-n_steps:]\n x_cur = np.reshape(x_cur, (1, n_steps, n_features))\n y_cur = model.predict(x_cur) + train_with_test_pred[-n_period]\n train_with_test_pred = np.concatenate([train_with_test_pred, y_cur], axis=0)\n train_with_test_pred = scaler.inverse_transform(train_with_test_pred)\n\n test_pred = train_with_test_pred[-n_test:]\n rmse = self.__get_rmse(test_pred, clean_test)\n return {\n \"rmse\": rmse,\n \"err_train\": train_with_test_pred[:-n_test],\n \"test_pred\": test_pred\n }\n\n\ndef get_model_params_dict_list(test_data, n_period):\n return [{\"model\": LSTMModel, \"params_list\": [{\"clean_test\": test_data, \"n_period\": n_period}]}]\n\n\ndef visualize(df, data, n_data, dataset_name):\n visualize_scores(\n df,\n score_names=[\"rmse\"],\n is_higher_score_better=[False],\n # err_param_name=\"std\",\n # err_param_name=\"magnitude\",\n err_param_name=\"prob_break\",\n title=f\"Prediction scores for {dataset_name} dataset (n={n_data}) with added error\"\n )\n visualize_time_series_prediction(\n df,\n data,\n score_name=\"rmse\",\n is_higher_score_better=False,\n # err_param_name=\"std\",\n # err_param_name=\"magnitude\",\n err_param_name=\"prob_break\",\n model_name=\"LSTM\",\n err_train_column=\"err_train\",\n test_pred_column=\"test_pred\",\n title=f\"Predictions for {dataset_name} dataset (n={n_data}) with added error\"\n )\n plt.show()\n\n\ndef main(argv):\n if len(argv) != 3 or argv[1] not in [\"passengers\", \"Jerusalem\", \"Eilat\", \"Miami\", \"Tel Aviv District\"]:\n exit(0)\n\n train_data, test_data, n_data, n_period, dataset_name = get_data(argv)\n\n df = runner.run(\n train_data=train_data,\n test_data=test_data,\n preproc=Preprocessor,\n preproc_params={},\n err_root_node=get_err_root_node(),\n err_params_list=get_err_params_list(),\n model_params_dict_list=get_model_params_dict_list(test_data, n_period),\n )\n\n print_results_by_model(df, dropped_columns=[\"err_train\", \"test_pred\", \"clean_test\", \"n_period\"])\n visualize(df, np.concatenate([train_data, test_data], axis=0), n_data, dataset_name)\n\n\nif __name__ == \"__main__\":\n main(sys.argv)\n" ]
[ [ "sklearn.metrics.mean_squared_error", "pandas.read_csv", "sklearn.preprocessing.MinMaxScaler", "numpy.reshape", "numpy.random.seed", "matplotlib.pyplot.show", "tensorflow.set_random_seed", "tensorflow.get_default_graph", "numpy.isnan", "numpy.concatenate", "numpy.linspace", "tensorflow.ConfigProto" ] ]
DipeshAggarwal/wgan-gp-keras
[ "7a70192cdd26726ee981107299a7fa6e21cbe84b" ]
[ "train.py" ]
[ "from core.loss import d_wasserstein_loss\nfrom core.loss import g_wasserstein_loss\nfrom core.nn.conv.wgan import generator\nfrom core.nn.conv.wgan import critic\nfrom core.callbacks import GANMonitor\nfrom core.model import WGAN_GP\n\nimport tensorflow as tf\nimport numpy as np\nimport config\n\ntrain_images = tf.keras.utils.image_dataset_from_directory(\n \"dataset/images/\", label_mode=None, image_size=(config.IMAGE_WIDTH, config.IMAGE_HEIGHT), batch_size=config.BATCH_SIZE\n)\ntrain_images = train_images.map(lambda x: (x - 127.5) / 127.5)\n\ngenerator = generator(config.LATENT_DIM, tf.keras.initializers.RandomNormal(mean=0.0, stddev=0.02), channels=config.CHANNELS)\ncritic = critic(config.IMAGE_HEIGHT, config.IMAGE_WIDTH, config.CHANNELS)\n\nwgan = WGAN_GP(critic=critic, generator=generator, latent_dim=config.LATENT_DIM, critic_extra_steps=config.EXTRA_STEPS)\n\nd_opt = tf.keras.optimizers.Adam(learning_rate=config.LR, beta_1=0.5, beta_2=0.9)\ng_opt = tf.keras.optimizers.Adam(learning_rate=config.LR, beta_1=0.5, beta_2=0.9)\n\nwgan.compile(\n d_optimiser=d_opt,\n g_optimiser=g_opt,\n d_loss_fn=d_wasserstein_loss,\n g_loss_fn=g_wasserstein_loss,\n)\n\ncallback = [GANMonitor(num_images=16, latent_dim=config.LATENT_DIM)]\nwgan.fit(train_images, epochs=config.EPOCHS, callbacks=callback)\n" ]
[ [ "tensorflow.keras.optimizers.Adam", "tensorflow.keras.initializers.RandomNormal", "tensorflow.keras.utils.image_dataset_from_directory" ] ]
dylanbuchi/MONAI
[ "1651f1b003b0ffae8b615d191952ad65ad091277", "1651f1b003b0ffae8b615d191952ad65ad091277" ]
[ "tests/test_write_metrics_reports.py", "tests/test_rand_scale_cropd.py" ]
[ "# Copyright 2020 - 2021 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport csv\nimport os\nimport tempfile\nimport unittest\n\nimport torch\n\nfrom monai.handlers.utils import write_metrics_reports\n\n\nclass TestWriteMetricsReports(unittest.TestCase):\n def test_content(self):\n with tempfile.TemporaryDirectory() as tempdir:\n write_metrics_reports(\n save_dir=tempdir,\n images=[\"filepath1\", \"filepath2\"],\n metrics={\"metric1\": 1, \"metric2\": 2},\n metric_details={\"metric3\": torch.tensor([[1, 2], [2, 3]]), \"metric4\": torch.tensor([[5, 6], [7, 8]])},\n summary_ops=[\"mean\", \"median\", \"max\", \"90percentile\"],\n deli=\"\\t\",\n output_type=\"csv\",\n )\n\n # check the metrics.csv and content\n self.assertTrue(os.path.exists(os.path.join(tempdir, \"metrics.csv\")))\n with open(os.path.join(tempdir, \"metrics.csv\")) as f:\n f_csv = csv.reader(f)\n for i, row in enumerate(f_csv):\n self.assertEqual(row, [f\"metric{i + 1}\\t{i + 1}\"])\n self.assertTrue(os.path.exists(os.path.join(tempdir, \"metric3_raw.csv\")))\n # check the metric_raw.csv and content\n with open(os.path.join(tempdir, \"metric3_raw.csv\")) as f:\n f_csv = csv.reader(f)\n for i, row in enumerate(f_csv):\n if i > 0:\n self.assertEqual(row, [f\"filepath{i}\\t{float(i)}\\t{float(i + 1)}\\t{i + 0.5}\"])\n self.assertTrue(os.path.exists(os.path.join(tempdir, \"metric3_summary.csv\")))\n # check the metric_summary.csv and content\n with open(os.path.join(tempdir, \"metric3_summary.csv\")) as f:\n f_csv = csv.reader(f)\n for i, row in enumerate(f_csv):\n if i == 1:\n self.assertEqual(row, [\"class0\\t1.5000\\t1.5000\\t2.0000\\t1.9000\"])\n elif i == 2:\n self.assertEqual(row, [\"class1\\t2.5000\\t2.5000\\t3.0000\\t2.9000\"])\n elif i == 3:\n self.assertEqual(row, [\"mean\\t2.0000\\t2.0000\\t2.5000\\t2.4000\"])\n self.assertTrue(os.path.exists(os.path.join(tempdir, \"metric4_raw.csv\")))\n self.assertTrue(os.path.exists(os.path.join(tempdir, \"metric4_summary.csv\")))\n\n\nif __name__ == \"__main__\":\n unittest.main()\n", "# Copyright 2020 - 2021 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport unittest\n\nimport numpy as np\nfrom parameterized import parameterized\n\nfrom monai.transforms import RandScaleCropd\nfrom tests.utils import TEST_NDARRAYS, assert_allclose\n\nTEST_CASE_1 = [\n {\"keys\": \"img\", \"roi_scale\": [1.0, 1.0, -1.0], \"random_center\": True},\n {\"img\": np.random.randint(0, 2, size=[3, 3, 3, 4])},\n (3, 3, 3, 4),\n]\n\nTEST_CASE_2 = [\n {\"keys\": \"img\", \"roi_scale\": [1.0, 1.0, 1.0], \"random_center\": False},\n {\"img\": np.random.randint(0, 2, size=[3, 3, 3, 3])},\n (3, 3, 3, 3),\n]\n\nTEST_CASE_3 = [\n {\"keys\": \"img\", \"roi_scale\": [0.6, 0.6], \"random_center\": False},\n {\"img\": np.array([[[0, 0, 0, 0, 0], [0, 1, 2, 1, 0], [0, 2, 3, 2, 0], [0, 1, 2, 1, 0], [0, 0, 0, 0, 0]]])},\n]\n\nTEST_CASE_4 = [\n {\n \"keys\": \"img\",\n \"roi_scale\": [0.75, 0.6, 0.5],\n \"max_roi_scale\": [1.0, -1.0, 0.6],\n \"random_center\": True,\n \"random_size\": True,\n },\n {\"img\": np.random.randint(0, 2, size=[1, 4, 5, 6])},\n (1, 3, 4, 3),\n]\n\nTEST_CASE_5 = [\n {\"keys\": \"img\", \"roi_scale\": 0.6, \"max_roi_scale\": 0.8, \"random_center\": True, \"random_size\": True},\n {\"img\": np.random.randint(0, 2, size=[1, 4, 5, 6])},\n (1, 3, 4, 4),\n]\n\nTEST_CASE_6 = [\n {\"keys\": \"img\", \"roi_scale\": 0.2, \"max_roi_scale\": 0.8, \"random_center\": True, \"random_size\": True},\n {\"img\": np.random.randint(0, 2, size=[1, 4, 5, 6])},\n (1, 3, 2, 4),\n]\n\n\nclass TestRandScaleCropd(unittest.TestCase):\n @parameterized.expand([TEST_CASE_1, TEST_CASE_2])\n def test_shape(self, input_param, input_data, expected_shape):\n result = RandScaleCropd(**input_param)(input_data)\n self.assertTupleEqual(result[\"img\"].shape, expected_shape)\n\n @parameterized.expand([TEST_CASE_3])\n def test_value(self, input_param, input_data):\n for p in TEST_NDARRAYS:\n cropper = RandScaleCropd(**input_param)\n input_data[\"img\"] = p(input_data[\"img\"])\n result = cropper(input_data)\n roi = [(2 - i // 2, 2 + i - i // 2) for i in cropper._size]\n assert_allclose(\n result[\"img\"], input_data[\"img\"][:, roi[0][0] : roi[0][1], roi[1][0] : roi[1][1]], type_test=False\n )\n\n @parameterized.expand([TEST_CASE_4, TEST_CASE_5, TEST_CASE_6])\n def test_random_shape(self, input_param, input_data, expected_shape):\n cropper = RandScaleCropd(**input_param)\n cropper.set_random_state(seed=123)\n result = cropper(input_data)\n self.assertTupleEqual(result[\"img\"].shape, expected_shape)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n" ]
[ [ "torch.tensor" ], [ "numpy.array", "numpy.random.randint" ] ]
SR42-dev/line-following-robot-with-aruco-markers-obstacle-detection-and-turtlesim-publisher
[ "d7dae86a4f1fdc56ab80193c218e25243e44e487" ]
[ "lineFollowerArucoROS-checkpoint3.py" ]
[ "import sys\r\nimport cv2\r\nimport math\r\nimport time\r\nimport rospy\r\nimport serial\r\nimport argparse\r\nimport numpy as np\r\nfrom std_srvs.srv import Empty\r\nfrom turtlesim.msg import Pose\r\nfrom geometry_msgs.msg import Twist\r\n\r\n# ROS movement global variables and function definitions\r\nx = 0\r\ny = 0\r\nz = 0\r\nyaw = 0\r\n\r\ndef poseCallback(pose_message):\r\n global x, y, z, yaw\r\n x = pose_message.x\r\n y = pose_message.y\r\n yaw = pose_message.theta\r\n\r\n\r\ndef move(speed, distance, is_forward):\r\n velocity_message = Twist()\r\n global x, y\r\n x0 = x\r\n y0 = y\r\n\r\n if is_forward:\r\n velocity_message.linear.x = abs(speed)\r\n else:\r\n velocity_message.linear.x = -abs(speed)\r\n\r\n distance_moved = 0.0\r\n loop_rate = rospy.Rate(10)\r\n cmd_vel_topic = '/turtle1/cmd_vel'\r\n velocity_publisher = rospy.Publisher(cmd_vel_topic, Twist, queue_size=10)\r\n\r\n while True:\r\n\r\n rospy.loginfo('Turtlesim linear movement')\r\n velocity_publisher.publish(velocity_message)\r\n loop_rate.sleep()\r\n\r\n distance_moved = distance_moved + abs(0.5 * math.sqrt(((x - x0) * 2) + ((y - y0) * 2)))\r\n if not (distance_moved < distance):\r\n rospy.loginfo(\"----Reached----\")\r\n break\r\n\r\n velocity_message.linear.x = 0\r\n velocity_publisher.publish(velocity_message)\r\n\r\n\r\ndef rotate(angular_speed_degree, relative_angle_degree, clockwise):\r\n global yaw\r\n velocity_message = Twist()\r\n velocity_message.linear.x = 0\r\n velocity_message.linear.y = 0\r\n velocity_message.linear.z = 0\r\n velocity_message.angular.x = 0\r\n velocity_message.angular.y = 0\r\n velocity_message.angular.z = 0\r\n\r\n theta0 = yaw\r\n angular_speed = math.radians(abs(angular_speed_degree))\r\n\r\n if clockwise:\r\n velocity_message.angular.z = -abs(angular_speed)\r\n else:\r\n velocity_message.angular.z = abs(angular_speed)\r\n\r\n angle_moved = 0.0\r\n\r\n loop_rate = rospy.Rate(10)\r\n cmd_vel_topic = '/turtle1/cmd_vel'\r\n velocity_publisher = rospy.Publisher(cmd_vel_topic, Twist, queue_size=10)\r\n\r\n t0 = rospy.Time.now().to_sec()\r\n\r\n while True:\r\n\r\n rospy.loginfo('Turtlesim rotation')\r\n velocity_publisher.publish(velocity_message)\r\n\r\n t1 = rospy.Time.now().to_sec()\r\n current_angle_degree = (t1 - t0) * angular_speed_degree\r\n loop_rate.sleep()\r\n\r\n if current_angle_degree > relative_angle_degree:\r\n rospy.loginfo('----Reached----')\r\n break\r\n\r\n velocity_message.angular.z = 0\r\n velocity_publisher.publish(velocity_message)\r\n\r\nrospy.init_node('turtlesim_motion_pose', anonymous=True)\r\n\r\ncmd_vel_topic = '/turtle1/cmd_vel'\r\nvelocity_publisher = rospy.Publisher(cmd_vel_topic, Twist, queue_size=10)\r\n\r\nposition_topic = '/turtle1/pose'\r\npose_subscriber = rospy.Subscriber(position_topic, Pose, poseCallback)\r\ntime.sleep(2)\r\n\r\n# construct the argument parser and parse the arguments\r\nap = argparse.ArgumentParser()\r\nap.add_argument(\"-t\", \"--type\", type=str,\r\n\tdefault=\"DICT_ARUCO_ORIGINAL\",\r\n\thelp=\"type of ArUCo tag to detect\")\r\nargs = vars(ap.parse_args())\r\n\r\n# define names of each possible ArUco tag OpenCV supports\r\nARUCO_DICT = {\r\n\t\"DICT_4X4_50\": cv2.aruco.DICT_4X4_50,\r\n\t\"DICT_4X4_100\": cv2.aruco.DICT_4X4_100,\r\n\t\"DICT_4X4_250\": cv2.aruco.DICT_4X4_250,\r\n\t\"DICT_4X4_1000\": cv2.aruco.DICT_4X4_1000,\r\n\t\"DICT_5X5_50\": cv2.aruco.DICT_5X5_50,\r\n\t\"DICT_5X5_100\": cv2.aruco.DICT_5X5_100,\r\n\t\"DICT_5X5_250\": cv2.aruco.DICT_5X5_250,\r\n\t\"DICT_5X5_1000\": cv2.aruco.DICT_5X5_1000,\r\n\t\"DICT_6X6_50\": cv2.aruco.DICT_6X6_50,\r\n\t\"DICT_6X6_100\": cv2.aruco.DICT_6X6_100,\r\n\t\"DICT_6X6_250\": cv2.aruco.DICT_6X6_250,\r\n\t\"DICT_6X6_1000\": cv2.aruco.DICT_6X6_1000,\r\n\t\"DICT_7X7_50\": cv2.aruco.DICT_7X7_50,\r\n\t\"DICT_7X7_100\": cv2.aruco.DICT_7X7_100,\r\n\t\"DICT_7X7_250\": cv2.aruco.DICT_7X7_250,\r\n\t\"DICT_7X7_1000\": cv2.aruco.DICT_7X7_1000,\r\n\t\"DICT_ARUCO_ORIGINAL\": cv2.aruco.DICT_ARUCO_ORIGINAL,\r\n\t\"DICT_APRILTAG_16h5\": cv2.aruco.DICT_APRILTAG_16h5,\r\n\t\"DICT_APRILTAG_25h9\": cv2.aruco.DICT_APRILTAG_25h9,\r\n\t\"DICT_APRILTAG_36h10\": cv2.aruco.DICT_APRILTAG_36h10,\r\n\t\"DICT_APRILTAG_36h11\": cv2.aruco.DICT_APRILTAG_36h11\r\n}\r\n\r\n# verify that the supplied ArUCo tag exists and is supported by\r\n# OpenCV\r\nif ARUCO_DICT.get(args[\"type\"], None) is None:\r\n\tprint(\"[INFO] ArUCo tag of '{}' is not supported\".format(\r\n\t\targs[\"type\"]))\r\n\tsys.exit(0)\r\n# load the ArUCo dictionary and grab the ArUCo parameters\r\nprint(\"[INFO] detecting '{}' tags...\".format(args[\"type\"]))\r\narucoDict = cv2.aruco.Dictionary_get(cv2.aruco.DICT_4X4_250)\r\narucoParams = cv2.aruco.DetectorParameters_create()\r\n# initialize the video stream and allow the camera sensor to warm up\r\nprint(\"[INFO] starting video stream...\")\r\n\r\ncap = cv2.VideoCapture(2)\r\nc1 = 0\r\nlinecolor = (100, 215, 255)\r\nlwr_red = np.array([0, 0, 0])\r\nupper_red = np.array([179, 65, 55])\r\ncountl = False\r\ncountr = False\r\nSer = serial.Serial(\"/dev/ttyUSB0\", baudrate=9600)\r\nSer.flush()\r\nwidth = cap.get(3)\r\nwhile True:\r\n ret, frame = cap.read()\r\n if not ret:\r\n _, frame = cap.read()\r\n\r\n # detect ArUco markers in the input frame\r\n (corners, ids, rejected) = cv2.aruco.detectMarkers(frame,\r\n arucoDict, parameters=arucoParams)\r\n\r\n\r\n hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\r\n kernel = np.ones((5, 5), np.uint8)\r\n mask = cv2.inRange(hsv, lwr_red, upper_red)\r\n mask = cv2.dilate(mask, kernel, iterations=1)\r\n res = cv2.bitwise_and(frame, frame, mask=mask)\r\n cnts, _ = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\r\n center = None\r\n\r\n # verify at least one ArUco marker was detected\r\n if len(corners) > 0:\r\n # flatten the ArUco IDs list\r\n ids = ids.flatten()\r\n # loop over the detected ArUCo corners\r\n for (markerCorner, markerID) in zip(corners, ids):\r\n # extract the marker corners (which are always returned\r\n # in top-left, top-right, bottom-right, and bottom-left\r\n # order)\r\n corners = markerCorner.reshape((4, 2))\r\n (topLeft, topRight, bottomRight, bottomLeft) = corners\r\n # convert each of the (x, y)-coordinate pairs to integers\r\n topRight = (int(topRight[0]), int(topRight[1]))\r\n bottomRight = (int(bottomRight[0]), int(bottomRight[1]))\r\n bottomLeft = (int(bottomLeft[0]), int(bottomLeft[1]))\r\n topLeft = (int(topLeft[0]), int(topLeft[1]))\r\n\r\n # draw the bounding box of the ArUCo detection\r\n cv2.line(frame, topLeft, topRight, (0, 255, 0), 2)\r\n cv2.line(frame, topRight, bottomRight, (0, 255, 0), 2)\r\n cv2.line(frame, bottomRight, bottomLeft, (0, 255, 0), 2)\r\n cv2.line(frame, bottomLeft, topLeft, (0, 255, 0), 2)\r\n # compute and draw the center (x, y)-coordinates of the\r\n # ArUco marker\r\n cX = int((topLeft[0] + bottomRight[0]) / 2.0)\r\n cY = int((topLeft[1] + bottomRight[1]) / 2.0)\r\n cv2.circle(frame, (cX, cY), 4, (0, 0, 255), -1)\r\n # draw the ArUco marker ID on the frame\r\n\r\n cv2.putText(frame, str(markerID),\r\n (topLeft[0], topLeft[1] - 15),\r\n cv2.FONT_HERSHEY_SIMPLEX,\r\n 0.5, (0, 255, 0), 2)\r\n if markerID == 0:\r\n if not countl:\r\n countr = False\r\n countl=True\r\n i='f'\r\n for lp in range(12):\r\n Ser.write(i.encode())\r\n move(1, 1, True)\r\n time.sleep(0.1)\r\n cv2.putText(frame, '<--', (5, 50), cv2.FONT_HERSHEY_COMPLEX, 2, (0, 0, 255), 2, cv2.LINE_AA)\r\n print(\"Left\")\r\n i = 'l' # left turn\r\n for lp in range(6):\r\n Ser.write(i.encode())\r\n rotate(30, 10, False)\r\n time.sleep(0.5)\r\n i='f'\r\n for lp in range(7):\r\n Ser.write(i.encode())\r\n move(1, 1, True)\r\n time.sleep(0.1)\r\n elif markerID == 1:\r\n if not countr:\r\n countl = False\r\n countr=True\r\n i='f'\r\n for lp in range(8):\r\n Ser.write(i.encode())\r\n move(1, 1, True)\r\n time.sleep(0.1)\r\n i = 'r' # left turn\r\n cv2.putText(frame, '-->', (5, 50), cv2.FONT_HERSHEY_COMPLEX, 2, (0, 0, 255), 2, cv2.LINE_AA)\r\n print(\"Right\")\r\n for lp in range(6):\r\n Ser.write(i.encode())\r\n rotate(30, 10, True)\r\n time.sleep(0.5)\r\n else:\r\n i = 'x'\r\n Ser.write(i.encode())\r\n print(\"Invalid\")\r\n\r\n if len(cnts) > 0:\r\n c = max(cnts, key=cv2.contourArea)\r\n ((x, y), radius) = cv2.minEnclosingCircle(c)\r\n M = cv2.moments(c)\r\n center = (int(M[\"m10\"] / M[\"m00\"]), int(M[\"m01\"] / M[\"m00\"]))\r\n if radius > 3:\r\n # cv2.circle(frame, (int(x), int(y)), int(radius), (255, 255, 255), 2)\r\n cv2.circle(frame, center, 5, linecolor, -1)\r\n\r\n if (x > 0.25 * width and x <= 0.75 * width):\r\n print('Forward')\r\n cv2.putText(frame, '^', (5, 50), cv2.FONT_HERSHEY_COMPLEX, 2, (0, 0, 255), 2, cv2.LINE_AA)\r\n Ser.write(b'f')\r\n move(1, 1, True)\r\n # time.sleep(0.01)\r\n\r\n else:\r\n print(\"Track Not Visible\")\r\n c1 += 1\r\n if (c1 == 5):\r\n print(\"Backward\")\r\n cv2.putText(frame, 'V', (5, 50), cv2.FONT_HERSHEY_COMPLEX, 2, (0, 0, 255), 2, cv2.LINE_AA)\r\n Ser.write(b'b')\r\n move(1, 1, False)\r\n c1 = 0\r\n\r\n time.sleep(0.2)\r\n cv2.imshow(\"Frame\", frame)\r\n if cv2.waitKey(1) & 0xFF == ord('q'):\r\n cap.release()\r\n Ser.close()\r\n cv2.destroyAllWindows()\r\n break" ]
[ [ "numpy.array", "numpy.ones" ] ]
bkornpob/axehelper
[ "d89407f73f92e140a5cc9a76c643b9a8656e8b0f" ]
[ "build/lib/axehelper/axehelper_bkg.py" ]
[ "# Kornpob Bhirombhakdi\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport copy,glob,os\nfrom astropy.io import fits\nfrom math import pi\n\nclass AXEhelper_BKG:\n def __init__(self,axeflist=None,fltflist=None,\n padxleft=5,padxright=5,\n padylow=10,halfdy=3,padyup=10,\n adjusty=1.2\n ):\n self.axeflist = axeflist\n self.fltflist = fltflist\n self.params = (padxleft,padxright,padylow,halfdy,padyup,adjusty)\n self.headerall = self._headerall()\n ####################\n ####################\n ####################\n def make_poly2d(self):\n OBJ = {}\n fltflist = self.fltflist\n axeflist = self.axeflist\n HEADERALL = self.headerall\n for ii,i in enumerate(fltflist):\n # image\n tmp = fits.open(i)\n tmpdata = tmp[1].data.copy()\n tmpdq = tmp['DQ'].data.copy()\n\n # header & prep\n tmpheader = HEADERALL[axeflist[ii]]\n xref,yref = tmpheader['XYREF']\n pixx = tmpheader['PIXX']\n pixy = tmpheader['PIXY']\n cc0x,cc0y,cc1x,cc1y = tmpheader['CC']\n sectx = tmpheader['SECTX']\n secty = tmpheader['SECTY']\n\n # Polynomial2D\n x1 = np.arange(cc0x,cc1x)\n x2 = np.arange(cc0y,cc1y)\n x1,x2 = np.meshgrid(x1,x2)\n\n obj = Polynomial2D()\n obj.data['X1'] = x1.copy()\n obj.data['X2'] = x2.copy()\n obj.data['Y'] = tmpdata[cc0y:cc1y,cc0x:cc1x]\n # print(x1.shape,x2.shape,obj.data['Y'].shape) \n\n # data['MASK']\n tmp = np.full_like(tmpdq,True,dtype=bool)\n m = np.where(tmpdq==0)\n tmp[m] = False\n obj.data['MASK'] = tmp[cc0y:cc1y,cc0x:cc1x]\n # print(obj.data['Y'].shape,obj.data['MASK'].shape) \n\n OBJ[i] = copy.deepcopy(obj)\n return OBJ\n ####################\n ####################\n ####################\n def _headerall(self):\n axeflist = self.axeflist\n fltflist = self.fltflist\n padxleft,padxright,padylow,halfdy,padyup,adjusty = self.params\n \n tmp = {}\n for i in axeflist:\n # read from header\n HEADER = copy.deepcopy(fits.open(i)[1].header)\n xref,yref = HEADER['REFPNTX'],HEADER['REFPNTY']\n bb0x,bb1x = HEADER['BB0X'],HEADER['BB1X']\n orient = HEADER['ORIENT']\n cpointx,cpointy = HEADER['CPOINTX'],HEADER['CPOINTY']\n dldx0,dldx1 = HEADER['DLDX0'],HEADER['DLDX1']\n\n # manually adjust offset\n yref += adjusty\n\n # trace and wavelength\n fny = lambda x : np.tan((90.+orient)*pi/180.) * (x - cpointx) + yref\n fnw = lambda x : dldx1 * (x - cpointx) + dldx0\n\n pixx = np.array([round(xref),round(bb1x)],dtype=int)\n pixy = np.round(fny(pixx)).astype(int)\n ww = fnw(pixx)\n \n # section\n pixywidth = pixy[-1] - pixy[0] + 1\n sectx = (padxleft,round(bb0x-xref),round(bb1x-bb0x),padxright)\n secty = (padylow,halfdy,pixywidth,halfdy,padyup)\n\n # cut box\n cc0x = round(xref)-padxleft\n cc1x = round(bb1x)+padxright\n cc0y = int(fny(cc0x))-halfdy-padylow\n cc1y = int(fny(cc1x))+halfdy+padyup\n\n # output \n tmp[i] = {}\n tmp[i]['XYREF'] = (xref,yref)\n tmp[i]['DLDX'] = (dldx0,dldx1)\n tmp[i]['BBX'] = (bb0x,bb1x)\n tmp[i]['PIXX'] = pixx.copy()\n tmp[i]['PIXY'] = pixy.copy()\n tmp[i]['WW'] = ww.copy()\n tmp[i]['SECTX'] = copy.deepcopy(sectx)\n tmp[i]['SECTY'] = copy.deepcopy(secty)\n tmp[i]['CC'] = (cc0x,cc0y,cc1x,cc1y)\n\n return copy.deepcopy(tmp)\n ####################\n ####################\n ####################\n def show(self,save=False,savefname='default'):\n fltflist = self.fltflist\n axeflist = self.axeflist\n HEADERALL = self.headerall\n\n for ii,i in enumerate(fltflist):\n tmp = fits.open(i)\n tmpdata = tmp[1].data.copy()\n\n tmpheader = HEADERALL[axeflist[ii]]\n xref,yref = tmpheader['XYREF']\n pixx = tmpheader['PIXX']\n pixy = tmpheader['PIXY']\n ww = tmpheader['WW']\n cc0x,cc0y,cc1x,cc1y = tmpheader['CC']\n sectx = tmpheader['SECTX']\n secty = tmpheader['SECTY']\n\n fig,ax = plt.subplots(2,1,sharex=True)\n fig.tight_layout()\n m = np.where(np.isfinite(tmpdata))\n vmin,vmax = np.percentile(tmpdata[m],5.),np.percentile(tmpdata[m],99.)\n ax[0].imshow(tmpdata,origin='lower',cmap='viridis',vmin=vmin,vmax=vmax)\n ax[0].scatter(xref,yref,s=30,facecolor='red',edgecolor='None')\n ax[0].plot(pixx,pixy,'r-')\n ax[0].set_xlim(cc0x,cc1x)\n ax[0].set_ylim(cc0y,cc1y)\n ax[0].set_title('{0}'.format(i.split('/')[-1].split('_')[0]),fontsize=20)\n ax[0].set_ylabel('pixY',fontsize=20)\n\n bb0x = cc0x+sectx[0]+sectx[1]\n bb1x = bb0x+sectx[2]\n bb0y = cc0y+secty[0]\n bb1y = bb0y+secty[1]+secty[2]+secty[3]\n tmpx = [bb0x,bb1x,bb1x,bb0x,bb0x]\n tmpy = [bb0y,bb0y,bb1y,bb1y,bb0y]\n ax[0].plot(tmpx,tmpy,'r-')\n\n ax[1].plot(pixx,ww)\n ax[1].set_xlabel('pixX',fontsize=20)\n ax[1].set_ylabel('obs. wavelength (A)',fontsize=20)\n ax[1].grid()\n \n if save:\n if savefname=='default':\n string = '/'.join(axeflist[ii].split('/')[0:-1])\n string += '/{0}_axehelperbkg.png'.format(axeflist[ii].split('/')[-1].split('.')[0])\n else:\n string = savefname\n fig.savefig(string,bbox_inches='tight')\n \n " ]
[ [ "numpy.full_like", "matplotlib.pyplot.subplots", "numpy.arange", "numpy.tan", "numpy.where", "numpy.meshgrid", "numpy.percentile", "numpy.isfinite" ] ]
markytools/eeedeeplearning-finalProj
[ "6a06d73091262fb996c990302692cff7d9eed3b1" ]
[ "train.py" ]
[ "import sys\nfrom optparse import OptionParser\n\nimport torch\nimport torch.backends.cudnn as cudnn\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch import optim\nfrom torch.autograd import Variable\n\nfrom eval import eval_net\nfrom models.unet import UNet\nfrom utils import *\n\ndef train_net(net, epochs=100, batch_size=2, lr=0.02, val_percent=0.1,\n cp=True, gpu=False):\n dir_img = '/media/markytools/503f6b96-90ca-4bfb-a99e-35f774205c77/EEE298/eee298deeplearning-finalproject-withdataset/LABELS_ONE_X/'\n dir_mask = '/media/markytools/503f6b96-90ca-4bfb-a99e-35f774205c77/EEE298/eee298deeplearning-finalproject-withdataset/LABELS_ONE_Y/'\n dir_checkpoint = './checkpoints'\n\n ids = get_ids(dir_img)\n ids = split_ids(ids)\n\n iddataset = split_train_val(ids, val_percent)\n\n print('''\n Starting training:\n Epochs: {}\n Batch size: {}\n Learning rate: {}\n Training size: {}\n Validation size: {}\n Checkpoints: {}\n CUDA: {}\n '''.format(epochs, batch_size, lr, len(iddataset['train']),\n len(iddataset['val']), str(cp), str(gpu)))\n\n N_train = len(iddataset['train'])\n\n optimizer = optim.Adam(net.parameters(),lr=lr,betas=(0.9,0.99))\n criterion = nn.BCELoss()\n\n for epoch in range(epochs):\n print('Starting epoch {}/{}.'.format(epoch + 1, epochs))\n\n # reset the generators\n train = get_imgs_and_masks(iddataset['train'], dir_img, dir_mask)\n val = get_imgs_and_masks(iddataset['val'], dir_img, dir_mask)\n\n epoch_loss = 0\n\n if 1:\n val_dice = eval_net(net, val, gpu)\n print('Validation Dice Coeff: {}'.format(val_dice))\n\n for i, b in enumerate(batch(train, batch_size)):\n X = np.array([i[0] for i in b])\n y = np.array([i[1] for i in b])\n\n X = torch.FloatTensor(X)\n y = torch.ByteTensor(y)\n\n if gpu:\n X = Variable(X).cuda()\n y = Variable(y).cuda()\n else:\n X = Variable(X)\n y = Variable(y)\n\n y_pred = net(X)\n probs = F.sigmoid(y_pred)\n probs_flat = probs.view(-1)\n\n y_flat = y.view(-1)\n\n loss = criterion(probs_flat, y_flat.float())\n epoch_loss += loss.data[0]\n\n print('{0:.4f} --- loss: {1:.6f}'.format(i * batch_size / N_train,\n loss.data[0]))\n\n optimizer.zero_grad()\n\n loss.backward()\n\n optimizer.step()\n\n print('Epoch finished ! Loss: {}'.format(epoch_loss / i))\n\n if cp:\n torch.save(net.state_dict(),\n dir_checkpoint + 'CP{}.pth'.format(epoch + 1))\n\n print('Checkpoint {} saved !'.format(epoch + 1))\n\nif __name__ == '__main__':\n parser = OptionParser()\n parser.add_option('-e', '--epochs', dest='epochs', default=300, type='int',\n help='number of epochs')\n parser.add_option('-b', '--batch-size', dest='batchsize', default=2,\n type='int', help='batch size')\n parser.add_option('-l', '--learning-rate', dest='lr', default=0.001,\n type='float', help='learning rate')\n parser.add_option('-g', '--gpu', action='store_true', dest='gpu',\n default=False, help='use cuda')\n parser.add_option('-m', '--model', dest='model', default=1,\n type='int', help='select model (int): (1-Unet, )')\n parser.add_option('-c', '--load', dest='load',\n default=False, help='load file model')\n\n (options, args) = parser.parse_args()\n\n if (options.model == 1):\n net = UNet(3, 1)\n\n if options.load:\n net.load_state_dict(torch.load(options.load))\n print('Model loaded from {}'.format(options.load))\n\n if options.gpu:\n net.cuda()\n cudnn.benchmark = True\n\n try:\n train_net(net, options.epochs, options.batchsize, options.lr,\n gpu=options.gpu)\n except KeyboardInterrupt:\n torch.save(net.state_dict(), 'INTERRUPTED.pth')\n print('Saved interrupt')\n try:\n sys.exit(0)\n except SystemExit:\n os._exit(0)\n" ]
[ [ "torch.FloatTensor", "torch.nn.functional.sigmoid", "torch.load", "torch.autograd.Variable", "torch.ByteTensor", "torch.nn.BCELoss" ] ]
kacunningham413/PlasmoCount
[ "0213c63add92c8df1a53526af394bc9692ca4a62" ]
[ "api/programs/model.py" ]
[ "from pathlib import Path\nimport pandas as pd\nfrom PIL import Image as PILImage\nimport torch\nfrom torchvision import transforms, ops\nfrom fastai.basic_train import load_learner\nfrom fastai.vision import Image\nfrom fastai.core import FloatItem\nimport matplotlib.pyplot as plt\nfrom scipy import stats\n\n\nclass Model:\n def __init__(self,\n model_path='./models',\n od_model='faster-rcnn.pt',\n class_model='class_resnet.pkl',\n ls_model='ls_resnet.pkl',\n gam_model='gam_resnet.pkl',\n cutoffs=[1.5, 2.5]):\n model_path = Path(model_path)\n device = torch.device(\n 'cuda') if torch.cuda.is_available() else torch.device('cpu')\n self.od_model = torch.load(str(model_path / od_model), device)\n self.od_model.eval()\n self.class_model = load_learner(path=model_path, file=class_model)\n self.ls_model = load_learner(path=model_path, file=ls_model)\n self.gam_model = load_learner(path=model_path, file=gam_model)\n self.cutoffs = cutoffs\n\n def load_image(self, fileName):\n self.fileName = fileName\n img = PILImage.open(self.fileName).convert(\"RGB\")\n tensor = transforms.ToTensor()(img)\n self.img = tensor\n return tensor\n\n def predict(self, has_gams):\n with torch.no_grad():\n prediction = self.od_model([self.img])[0]\n prediction = self.post_processing(prediction)\n # get crops for class detection\n classes = []\n life_stages = []\n for bbox in prediction['boxes']:\n x0, y0, x1, y1 = bbox.int()\n bbox_img = Image(self.img[:, y0:y1, x0:x1])\n bbox_pred = self.class_model.predict(bbox_img)\n if str(bbox_pred[0]) == 'infected':\n if has_gams:\n gam_pred = self.gam_model.predict(bbox_img)\n if str(gam_pred[0]) == 'asexual':\n ls_pred = self.ls_model.predict(bbox_img)\n else:\n ls_pred = [FloatItem(-1)]\n else:\n ls_pred = self.ls_model.predict(bbox_img)\n life_stages.append(ls_pred)\n else:\n life_stages.append(None)\n classes.append(bbox_pred)\n\n # format predictions\n result = {}\n result['boxes'] = pd.Series(prediction['boxes'].tolist())\n result['p_boxes'] = pd.Series(prediction['scores'].tolist())\n result = pd.DataFrame.from_dict(result)\n result[['classes', 'p_classes']] = pd.Series(classes).apply(\n lambda x: pd.Series([str(x[0]), (x[2][x[1]]).item()]))\n result['life_stage'] = pd.Series(life_stages).apply(\n lambda x: float(x[0].data) if x is not None else None)\n result['life_stage_c'] = result['life_stage'].apply(\n lambda x: self.calc_life_stages(x))\n\n return result\n\n def post_processing(self,\n pred,\n score_thresh=0.9,\n iou_thresh=0.5,\n z_thresh=4):\n pred = self.apply_score_filter(pred, score_thresh)\n pred = self.apply_nms(pred, iou_thresh)\n pred = self.apply_size_filter(pred, z_thresh)\n return pred\n\n def apply_nms(self, pred, iou_thresh):\n idx = ops.nms(pred[\"boxes\"], pred[\"scores\"], iou_thresh)\n for i in [\"boxes\", \"labels\", \"scores\"]:\n pred[i] = pred[i][idx]\n return pred\n\n def apply_score_filter(self, pred, thresh):\n idx = [i for i, score in enumerate(pred['scores']) if score > thresh]\n for i in [\"boxes\", \"labels\", \"scores\"]:\n pred[i] = pred[i][idx]\n return pred\n\n def calc_area(self, coods):\n return abs((coods[:, 2] - coods[:, 0]) * (coods[:, 3] - coods[:, 1]))\n\n def apply_size_filter(self, pred, z_thresh):\n area = self.calc_area(pred['boxes'])\n zscores = stats.zscore(area)\n idx = [i for i, score in enumerate(zscores) if abs(score) < z_thresh]\n for i in [\"boxes\", \"labels\", \"scores\"]:\n pred[i] = pred[i][idx]\n return pred\n\n def calc_life_stages(self, x):\n RT_cutoff, TS_cutoff = self.cutoffs\n if not x:\n return 'uninfected'\n elif (x >= 0) & (x <= RT_cutoff):\n return 'ring'\n elif (x > RT_cutoff) & (x <= TS_cutoff):\n return 'trophozoite'\n elif (x > TS_cutoff):\n return 'schizont'\n elif (x == -1):\n return 'gametocyte'\n else:\n return 'uninfected'" ]
[ [ "scipy.stats.zscore", "pandas.Series", "torch.no_grad", "torch.cuda.is_available", "torch.device", "pandas.DataFrame.from_dict" ] ]
milinddeore/pytorch-vsumm-reinforce
[ "c3ca731c9a7f00282c8460deb47f34658cfc0522" ]
[ "utils/generate_dataset.py" ]
[ "\"\"\"\n Generate Dataset\n\n 1. Converting video to frames\n 2. Extracting features\n 3. Getting change points\n 4. User Summary ( for evaluation )\n\n\"\"\"\nimport os, sys\nsys.path.append('../')\nfrom networks.CNN import ResNet\nfrom utils.KTS.cpd_auto import cpd_auto\nfrom tqdm import tqdm\nimport math\nimport cv2\nimport numpy as np\nimport h5py\nimport argparse\n\nparser = argparse.ArgumentParser()\nparser.add_argument('-p', '--path', type=str, required=True, help=\"path of video file, whos h5 needs to generate.\")\nparser.add_argument('--h5_gen', type=str, required=True, help=\"path to h5 generated file\")\nargs = parser.parse_args()\n\n\nclass Generate_Dataset:\n def __init__(self, video_path, save_path):\n self.resnet = ResNet()\n self.dataset = {}\n self.video_list = []\n self.video_path = ''\n self.frame_root_path = './frames'\n self.h5_file = h5py.File(save_path, 'w')\n\n self._set_video_list(video_path)\n print('Video path : {} H5 autogen path : {}'.format(video_path, save_path))\n\n def _set_video_list(self, video_path):\n if os.path.isdir(video_path):\n self.video_path = video_path\n self.video_list = os.listdir(video_path)\n self.video_list.sort()\n else:\n self.video_path = ''\n self.video_list.append(video_path)\n\n for idx, file_name in enumerate(self.video_list):\n self.dataset['video_{}'.format(idx+1)] = {}\n self.h5_file.create_group('video_{}'.format(idx+1))\n\n\n def _extract_feature(self, frame):\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n frame = cv2.resize(frame, (224, 224))\n res_pool5 = self.resnet(frame)\n frame_feat = res_pool5.cpu().data.numpy().flatten()\n\n return frame_feat\n\n def _get_change_points(self, video_feat, n_frame, fps):\n print('n_frame {} fps {}'.format(n_frame, fps))\n n = n_frame / math.ceil(fps)\n m = int(math.ceil(n/2.0))\n K = np.dot(video_feat, video_feat.T)\n change_points, _ = cpd_auto(K, m, 1)\n change_points = np.concatenate(([0], change_points, [n_frame-1]))\n\n temp_change_points = []\n for idx in range(len(change_points)-1):\n segment = [change_points[idx], change_points[idx+1]-1]\n if idx == len(change_points)-2:\n segment = [change_points[idx], change_points[idx+1]]\n\n temp_change_points.append(segment)\n change_points = np.array(list(temp_change_points))\n\n temp_n_frame_per_seg = []\n for change_points_idx in range(len(change_points)):\n n_frame = change_points[change_points_idx][1] - change_points[change_points_idx][0]\n temp_n_frame_per_seg.append(n_frame)\n n_frame_per_seg = np.array(list(temp_n_frame_per_seg))\n\n return change_points, n_frame_per_seg\n\n # TODO : save dataset\n def _save_dataset(self):\n pass\n\n def generate_dataset(self):\n for video_idx, video_filename in enumerate(tqdm(self.video_list)):\n video_path = video_filename\n if os.path.isdir(self.video_path):\n video_path = os.path.join(self.video_path, video_filename)\n\n video_basename = os.path.basename(video_path).split('.')[0]\n\n if not os.path.exists(os.path.join(self.frame_root_path, video_basename)):\n os.mkdir(os.path.join(self.frame_root_path, video_basename))\n\n video_capture = cv2.VideoCapture(video_path)\n\n fps = video_capture.get(cv2.CAP_PROP_FPS)\n n_frames = int(video_capture.get(cv2.CAP_PROP_FRAME_COUNT))\n\n #frame_list = []\n picks = []\n video_feat = None\n video_feat_for_train = None\n for frame_idx in tqdm(range(n_frames-1)):\n success, frame = video_capture.read()\n if success:\n frame_feat = self._extract_feature(frame)\n\n if frame_idx % 15 == 0:\n picks.append(frame_idx)\n\n if video_feat_for_train is None:\n video_feat_for_train = frame_feat\n else:\n video_feat_for_train = np.vstack((video_feat_for_train, frame_feat))\n\n if video_feat is None:\n video_feat = frame_feat\n else:\n video_feat = np.vstack((video_feat, frame_feat))\n\n img_filename = \"{}.jpg\".format(str(frame_idx).zfill(5))\n cv2.imwrite(os.path.join(self.frame_root_path, video_basename, img_filename), frame)\n\n else:\n break\n\n video_capture.release()\n\n change_points, n_frame_per_seg = self._get_change_points(video_feat, n_frames, fps)\n\n # self.dataset['video_{}'.format(video_idx+1)]['frames'] = list(frame_list)\n # self.dataset['video_{}'.format(video_idx+1)]['features'] = list(video_feat)\n # self.dataset['video_{}'.format(video_idx+1)]['picks'] = np.array(list(picks))\n # self.dataset['video_{}'.format(video_idx+1)]['n_frames'] = n_frames\n # self.dataset['video_{}'.format(video_idx+1)]['fps'] = fps\n # self.dataset['video_{}'.format(video_idx+1)]['change_points'] = change_points\n # self.dataset['video_{}'.format(video_idx+1)]['n_frame_per_seg'] = n_frame_per_seg\n\n self.h5_file['video_{}'.format(video_idx+1)]['features'] = list(video_feat_for_train)\n self.h5_file['video_{}'.format(video_idx+1)]['picks'] = np.array(list(picks))\n self.h5_file['video_{}'.format(video_idx+1)]['n_frames'] = n_frames\n self.h5_file['video_{}'.format(video_idx+1)]['fps'] = fps\n self.h5_file['video_{}'.format(video_idx+1)]['change_points'] = change_points\n self.h5_file['video_{}'.format(video_idx+1)]['n_frame_per_seg'] = n_frame_per_seg\n\nif __name__ == \"__main__\":\n gen = Generate_Dataset(args.path, args.h5_gen)\n gen.generate_dataset()\n gen.h5_file.close()\n" ]
[ [ "numpy.vstack", "numpy.concatenate", "numpy.dot" ] ]
jhabikal21/tensorflow
[ "98d20962172301385aae694141801a375debd2bc", "98d20962172301385aae694141801a375debd2bc" ]
[ "tensorflow/python/kernel_tests/slice_op_test.py", "tensorflow/contrib/tpu/python/tpu/training_loop.py" ]
[ "# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Functional tests for slice op.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nfrom six.moves import xrange # pylint: disable=redefined-builtin\n\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import errors_impl\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import gradients_impl\nfrom tensorflow.python.platform import test\n\n\nclass SliceTest(test.TestCase):\n\n def testEmpty(self):\n inp = np.random.rand(4, 4).astype(\"f\")\n for k in xrange(4):\n with self.test_session(use_gpu=True):\n a = constant_op.constant(inp, shape=[4, 4], dtype=dtypes.float32)\n slice_t = a[2, k:k]\n slice_val = slice_t.eval()\n self.assertAllEqual(slice_val, inp[2, k:k])\n\n def testInt32(self):\n inp = np.random.rand(4, 4).astype(\"i\")\n for k in xrange(4):\n with self.test_session(use_gpu=True):\n a = constant_op.constant(inp, shape=[4, 4], dtype=dtypes.int32)\n slice_t = a[2, k:k]\n slice_val = slice_t.eval()\n self.assertAllEqual(slice_val, inp[2, k:k])\n\n def testInt64Slicing(self):\n with self.test_session(use_gpu=True):\n a = constant_op.constant([0, 1, 2], dtype=dtypes.int64)\n\n # Slice using int64 Tensor.\n i = constant_op.constant(1, dtype=dtypes.int64)\n slice_t = a[i]\n slice_val = slice_t.eval()\n self.assertAllEqual(1, slice_val)\n slice_t = a[i:i+1]\n slice_val = slice_t.eval()\n self.assertAllEqual([1], slice_val)\n\n # Slice using int64 integer.\n i = np.asarray(1).astype(np.int64)\n slice_t = a[i]\n slice_val = slice_t.eval()\n self.assertAllEqual(1, slice_val)\n slice_t = a[i:i+1]\n slice_val = slice_t.eval()\n self.assertAllEqual([1], slice_val)\n\n def testSelectAll(self):\n for _ in range(10):\n with self.test_session(use_gpu=True):\n inp = np.random.rand(4, 4, 4, 4).astype(\"f\")\n a = constant_op.constant(inp, shape=[4, 4, 4, 4], dtype=dtypes.float32)\n\n slice_explicit_t = array_ops.slice(a, [0, 0, 0, 0], [-1, -1, -1, -1])\n slice_implicit_t = a[:, :, :, :]\n\n self.assertAllEqual(inp, slice_explicit_t.eval())\n self.assertAllEqual(inp, slice_implicit_t.eval())\n self.assertEqual(inp.shape, slice_explicit_t.get_shape())\n self.assertEqual(inp.shape, slice_implicit_t.get_shape())\n\n def testSingleDimension(self):\n for _ in range(10):\n with self.test_session(use_gpu=True):\n inp = np.random.rand(10).astype(\"f\")\n a = constant_op.constant(inp, shape=[10], dtype=dtypes.float32)\n\n hi = np.random.randint(0, 9)\n scalar_t = a[hi]\n scalar_val = scalar_t.eval()\n self.assertAllEqual(scalar_val, inp[hi])\n\n if hi > 0:\n lo = np.random.randint(0, hi)\n else:\n lo = 0\n slice_t = a[lo:hi]\n slice_val = slice_t.eval()\n self.assertAllEqual(slice_val, inp[lo:hi])\n\n def testScalarInput(self):\n input_val = 0\n with self.test_session() as sess:\n # Test with constant input; shape inference fails.\n with self.assertRaisesWithPredicateMatch(ValueError, \"out of range\"):\n constant_op.constant(input_val)[:].get_shape()\n\n # Test evaluating with non-constant input; kernel execution fails.\n input_t = array_ops.placeholder(dtypes.int32)\n slice_t = input_t[:]\n with self.assertRaisesWithPredicateMatch(errors_impl.InvalidArgumentError,\n \"out of range\"):\n sess.run([slice_t], feed_dict={input_t: input_val})\n\n def testInvalidIndex(self):\n input_val = [1, 2]\n with self.test_session() as sess:\n # Test with constant input; shape inference fails.\n with self.assertRaisesWithPredicateMatch(ValueError, \"out of range\"):\n constant_op.constant(input_val)[1:, 1:].get_shape()\n\n # Test evaluating with non-constant input; kernel execution fails.\n input_t = array_ops.placeholder(dtypes.int32)\n slice_t = input_t[1:, 1:]\n with self.assertRaisesWithPredicateMatch(errors_impl.InvalidArgumentError,\n \"out of range\"):\n sess.run([slice_t], feed_dict={input_t: input_val})\n\n def _testSliceMatrixDim0(self, x, begin, size):\n with self.test_session(use_gpu=True):\n tf_ans = array_ops.slice(x, [begin, 0], [size, x.shape[1]]).eval()\n np_ans = x[begin:begin + size, :]\n self.assertAllEqual(tf_ans, np_ans)\n\n def testSliceMatrixDim0(self):\n x = np.random.rand(8, 4).astype(\"f\")\n self._testSliceMatrixDim0(x, 1, 2)\n self._testSliceMatrixDim0(x, 3, 3)\n y = np.random.rand(8, 7).astype(\"f\") # 7 * sizeof(float) is not aligned\n self._testSliceMatrixDim0(y, 1, 2)\n self._testSliceMatrixDim0(y, 3, 3)\n\n def testSingleElementAll(self):\n for _ in range(10):\n with self.test_session(use_gpu=True):\n inp = np.random.rand(4, 4).astype(\"f\")\n a = constant_op.constant(inp, shape=[4, 4], dtype=dtypes.float32)\n\n x, y = np.random.randint(0, 3, size=2).tolist()\n slice_t = a[x, 0:y]\n slice_val = slice_t.eval()\n self.assertAllEqual(slice_val, inp[x, 0:y])\n\n def testSimple(self):\n with self.test_session(use_gpu=True) as sess:\n inp = np.random.rand(4, 4).astype(\"f\")\n a = constant_op.constant(\n [float(x) for x in inp.ravel(order=\"C\")],\n shape=[4, 4],\n dtype=dtypes.float32)\n slice_t = array_ops.slice(a, [0, 0], [2, 2])\n slice2_t = a[:2, :2]\n slice_val, slice2_val = sess.run([slice_t, slice2_t])\n self.assertAllEqual(slice_val, inp[:2, :2])\n self.assertAllEqual(slice2_val, inp[:2, :2])\n self.assertEqual(slice_val.shape, slice_t.get_shape())\n self.assertEqual(slice2_val.shape, slice2_t.get_shape())\n\n def testComplex(self):\n with self.test_session(use_gpu=True):\n inp = np.random.rand(4, 10, 10, 4).astype(\"f\")\n a = constant_op.constant(inp, dtype=dtypes.float32)\n\n x = np.random.randint(0, 9)\n z = np.random.randint(0, 9)\n if z > 0:\n y = np.random.randint(0, z)\n else:\n y = 0\n slice_t = a[:, x, y:z, :]\n self.assertAllEqual(slice_t.eval(), inp[:, x, y:z, :])\n\n def testRandom(self):\n # Random dims of rank 6\n input_shape = np.random.randint(0, 20, size=6)\n inp = np.random.rand(*input_shape).astype(\"f\")\n with self.test_session(use_gpu=True) as sess:\n a = constant_op.constant(\n [float(x) for x in inp.ravel(order=\"C\")],\n shape=input_shape,\n dtype=dtypes.float32)\n indices = [0 if x == 0 else np.random.randint(x) for x in input_shape]\n sizes = [\n np.random.randint(0, input_shape[i] - indices[i] + 1)\n for i in range(6)\n ]\n slice_t = array_ops.slice(a, indices, sizes)\n slice2_t = a[indices[0]:indices[0] + sizes[0], indices[1]:indices[\n 1] + sizes[1], indices[2]:indices[2] + sizes[2], indices[3]:indices[3]\n + sizes[3], indices[4]:indices[4] + sizes[4], indices[5]:\n indices[5] + sizes[5]]\n\n slice_val, slice2_val = sess.run([slice_t, slice2_t])\n\n expected_val = inp[indices[0]:indices[0] + sizes[0], indices[1]:indices[\n 1] + sizes[1], indices[2]:indices[2] + sizes[2], indices[3]:indices[\n 3] + sizes[3], indices[4]:indices[4] + sizes[4], indices[5]:indices[\n 5] + sizes[5]]\n self.assertAllEqual(slice_val, expected_val)\n self.assertAllEqual(slice2_val, expected_val)\n self.assertEqual(expected_val.shape, slice_t.get_shape())\n self.assertEqual(expected_val.shape, slice2_t.get_shape())\n\n def testPartialShapeInference(self):\n z = array_ops.zeros((1, 2, 3))\n self.assertAllEqual(z.get_shape().as_list(), [1, 2, 3])\n\n m1 = array_ops.slice(z, [0, 0, 0], [-1, -1, -1])\n self.assertAllEqual(m1.get_shape().as_list(), [1, 2, 3])\n\n m2 = array_ops.slice(z, [0, 0, 0], [constant_op.constant(1) + 0, 2, -1])\n self.assertAllEqual(m2.get_shape().as_list(), [None, 2, None])\n\n\n def _testGradientSlice(self, input_shape, slice_begin, slice_size):\n with self.test_session(use_gpu=True):\n num_inputs = np.prod(input_shape)\n num_grads = np.prod(slice_size)\n inp = np.random.rand(num_inputs).astype(\"f\").reshape(input_shape)\n a = constant_op.constant(\n [float(x) for x in inp.ravel(order=\"C\")],\n shape=input_shape,\n dtype=dtypes.float32)\n slice_t = array_ops.slice(a, slice_begin, slice_size)\n grads = np.random.rand(num_grads).astype(\"f\").reshape(slice_size)\n grad_tensor = constant_op.constant(grads)\n grad = gradients_impl.gradients(slice_t, [a], grad_tensor)[0]\n result = grad.eval()\n\n # Create a zero tensor of the input shape ane place\n # the grads into the right location to compare against TensorFlow.\n np_ans = np.zeros(input_shape)\n slices = []\n for i in xrange(len(input_shape)):\n slices.append(slice(slice_begin[i], slice_begin[i] + slice_size[i]))\n np_ans[slices] = grads\n\n self.assertAllClose(np_ans, result)\n\n def _testGradientVariableSize(self):\n with self.test_session(use_gpu=True):\n inp = constant_op.constant([1.0, 2.0, 3.0], name=\"in\")\n out = array_ops.slice(inp, [1], [-1])\n grad_actual = gradients_impl.gradients(out, inp)[0].eval()\n self.assertAllClose([0., 1., 1.], grad_actual)\n\n def testGradientsAll(self):\n # Slice the middle square out of a 4x4 input\n self._testGradientSlice([4, 4], [1, 1], [2, 2])\n\n # Slice the upper left square out of a 4x4 input\n self._testGradientSlice([4, 4], [0, 0], [2, 2])\n\n # Slice a non-square input starting from (2,1)\n self._testGradientSlice([4, 4], [2, 1], [1, 2])\n\n # Slice a 3D tensor\n self._testGradientSlice([3, 3, 3], [0, 1, 0], [2, 1, 1])\n\n # Use -1 as a slice dimension.\n self._testGradientVariableSize()\n\n def testNotIterable(self):\n # NOTE (mrry): If we register __getitem__ as an overloaded id:2995 gh:2996\n # operator, Python will valiantly attempt to iterate over the\n # Tensor from 0 to infinity. This test ensures that this\n # unintended behavior is prevented.\n c = constant_op.constant(5.0)\n with self.assertRaisesWithPredicateMatch(\n TypeError, lambda e: \"`Tensor` objects are not iterable\" in str(e)):\n for _ in c:\n pass\n\n def testComputedShape(self):\n # NOTE (mrry): We cannot currently handle partially-known values, id:3496 gh:3497\n # because `tf.slice()` uses -1 to specify a wildcard size, and\n # this can't be handled using the\n # `tensor_util.constant_value_as_shape()` trick.\n a = constant_op.constant([[1, 2, 3], [4, 5, 6]])\n begin = constant_op.constant(0)\n size = constant_op.constant(1)\n b = array_ops.slice(a, [begin, 0], [size, 2])\n self.assertEqual([1, 2], b.get_shape())\n\n begin = array_ops.placeholder(dtypes.int32, shape=())\n c = array_ops.slice(a, [begin, 0], [-1, 2])\n self.assertEqual([None, 2], c.get_shape().as_list())\n\n def testSliceOfSlice(self):\n with self.test_session(use_gpu=True):\n a = constant_op.constant([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]])\n b = a[1:, :]\n c = b[:-1, :]\n d = c[1, :]\n res = 2 * d - c[1, :] + a[2, :] - 2 * b[-2, :]\n self.assertAllEqual([0, 0, 0], res.eval())\n\n\nif __name__ == \"__main__\":\n test.main()\n", "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# =============================================================================\n\n\"\"\"Library for constructing a training loop, suitable for TPUs.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.contrib.tpu.python.tpu import tpu_function\n\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import control_flow_ops\n\n\ndef while_loop(condition, body, inputs=None, infeed_queue=None, name=None):\n \"\"\"Builds a training loop for TPUs.\n\n The set of loop-carried tensors corresponds to `inputs`. Both\n `condition` and `body` take the current value of the loop-carried\n tensors. 'body' additionally takes a tuple of infeed from\n infeed_queue if infeed_queue is not None. `condition` must return a\n single boolean value that determines whether iteration\n continues. `body` must return an updated list of values for the\n loop-carried tensors.\n\n Args:\n condition: a Python function that builds the loop condition.\n body: a Python function that builds the loop body.\n inputs: a list of initial values passed into the training loop, or\n None (equivalent to an empty list).\n infeed_queue: if not None, the infeed queue from which to append a tuple\n of arguments as inputs to condition.\n name: an optional name for the loop.\n\n Returns:\n The final values of the loop-carried tensors.\n\n Raises:\n TypeError: if body or condition has the wrong signature.\n \"\"\"\n\n # Converts inputs to Tensors.\n inputs = [] if inputs is None else [ops.convert_to_tensor(x) for\n x in inputs]\n input_types = [x.dtype for x in inputs]\n input_arity = len(inputs)\n\n body_arg_error = tpu_function.check_function_argument_count(\n body, input_arity, infeed_queue)\n if body_arg_error is not None:\n if infeed_queue is None:\n raise TypeError(\n \"Supplied loop body function cannot be called with the specified \"\n \"inputs. You specified %d inputs: %s, but the loop body needs %s\" % (\n input_arity, str([i.name for i in inputs]), body_arg_error))\n else:\n raise TypeError(\n \"Supplied loop body function cannot be called with the specified \"\n \"inputs. You specified %d inputs: %s and %d additional inputs from \"\n \"infeed, but the computation needs %s\" % (input_arity, str(\n [i.name for i in inputs]), infeed_queue.number_of_tuple_elements,\n body_arg_error))\n condition_arg_error = tpu_function.check_function_argument_count(\n condition, input_arity, None)\n if condition_arg_error is not None:\n if infeed_queue is None:\n raise TypeError(\n \"Supplied loop condition function cannot be called with the \"\n \"specified inputs. You specified %d inputs: %s, but the loop \"\n \"condition needs %s\" % (input_arity, str([i.name for i in inputs]),\n condition_arg_error))\n else:\n raise TypeError(\n \"Supplied loop condition function cannot be called with the \"\n \"specified inputs. You specified %d inputs: %s, but the loop \"\n \"condition needs %s. Note that infeed is not passed to the loop \"\n \"condition.\" % (input_arity, str([i.name for i in inputs]),\n condition_arg_error))\n\n def condition_wrapper(*inputs):\n # Discards the dummy output added for arity-0 loops.\n if input_arity == 0:\n inputs = []\n return condition(*inputs)\n\n def body_wrapper(*inputs):\n \"\"\"Wrapper around `body` that handles infeed queues and control deps.\"\"\"\n inputs = list(inputs)\n\n # Discards the dummy output added for arity-0 loops.\n if input_arity == 0:\n inputs = []\n\n # Runs `body` with the dequeue_ops appended.\n if infeed_queue:\n number_of_shards = tpu_function.get_tpu_context().number_of_shards\n if number_of_shards is None:\n raise ValueError(\"Can't build training loop with infeed when there is \"\n \"no tpu_shard_context. Are you building a loop or \"\n \"graph directly rather than from inside tpu.rewrite, \"\n \"tpu.batch_parallel, tpu.shard, or tpu.replicate?\")\n infeed_queue.set_number_of_shards(number_of_shards)\n dequeue_ops = [d for d in infeed_queue.generate_dequeue_op()]\n else:\n dequeue_ops = []\n outputs = body(*(inputs + dequeue_ops))\n\n # If the computation only returned one value, make it a tuple.\n if not isinstance(outputs, (list, tuple)):\n outputs = (outputs,)\n\n outputs = [\n o if isinstance(o, ops.Operation) else ops.convert_to_tensor(o)\n for o in outputs\n ]\n\n # Separates the returned Operations and Tensors.\n output_operations = [o for o in outputs if isinstance(o, ops.Operation)]\n output_tensors = [o for o in outputs\n if not isinstance(o, ops.Operation)]\n\n if outputs != output_tensors + output_operations:\n raise ValueError(\n \"TPU training loop body must return zero or more Tensor values \"\n \"followed by zero or more Operations.\")\n\n output_types = [op.dtype for op in output_tensors]\n if input_types != output_types:\n raise TypeError(\n \"Mismatch between input types and output types for training loop \"\n \"body: {} vs {}\".format(input_types, output_types))\n\n # Add the dequeue operations to output_operations to ensure they are run\n # by the loop, even if the programmer's loop body does not use them.\n output_operations += dequeue_ops\n\n # Add a dummy output, if needed.\n if not output_tensors:\n output_tensors = array_ops.constant(0)\n\n if output_operations:\n # TODO (phawkins): in principle this is too restrictive since it serializes id:1047 gh:1048\n # the training loop steps. In practice it does not matter since this loop\n # will be compiled by XLA.\n return control_flow_ops.tuple(output_tensors,\n control_inputs=output_operations)\n else:\n return output_tensors\n\n # If the body has arity 0, add a dummy loop-carried value to which we can add\n # control dependencies from any side-effecting operations.\n if input_arity == 0:\n inputs = [array_ops.constant(0)]\n return control_flow_ops.while_loop(condition_wrapper, body_wrapper, inputs,\n name=name)\n\n\ndef repeat(n, body, inputs=None, infeed_queue=None, name=None):\n \"\"\"Builds a training loop that executes a fixed number of interations.\n\n The set of loop-carried tensors correspond to `inputs`.\n `body` must be a function that takes and returns the values of the\n loop-carried tensors.\n\n Args:\n n: the number of loop iterations\n body: a Python function that builds the loop body.\n inputs: a list of initial values passed into the training loop or\n None (equivalent to an empty list).\n infeed_queue: if not None, the infeed queue from which to append a tuple\n of arguments as inputs to condition.\n name: an optional name for the loop.\n Returns:\n The final values of the loop-carried tensors.\n Raises:\n ValueError: if there is a type error.\n \"\"\"\n def _convert_to_list(xs):\n if not isinstance(xs, (list, tuple)):\n return [xs]\n else:\n return list(xs)\n\n def cond(i, *args):\n del args\n return i < n\n\n def body_wrapper(i, *args):\n return [i + 1] + _convert_to_list(body(*args))\n\n inputs = [0] if inputs is None else [0] + _convert_to_list(inputs)\n outputs = while_loop(\n cond, body_wrapper, inputs=inputs, infeed_queue=infeed_queue, name=name)\n outputs = _convert_to_list(outputs)\n if len(outputs) == 1:\n # Returns the Op rather than an empty list.\n return outputs[0].op\n else:\n return outputs[1:]\n" ]
[ [ "numpy.zeros", "tensorflow.python.ops.array_ops.placeholder", "tensorflow.python.ops.array_ops.zeros", "tensorflow.python.ops.array_ops.slice", "tensorflow.python.ops.gradients_impl.gradients", "numpy.asarray", "tensorflow.python.platform.test.main", "numpy.random.rand", "numpy.prod", "numpy.random.randint", "tensorflow.python.framework.constant_op.constant" ], [ "tensorflow.contrib.tpu.python.tpu.tpu_function.check_function_argument_count", "tensorflow.python.ops.control_flow_ops.while_loop", "tensorflow.python.ops.control_flow_ops.tuple", "tensorflow.contrib.tpu.python.tpu.tpu_function.get_tpu_context", "tensorflow.python.ops.array_ops.constant", "tensorflow.python.framework.ops.convert_to_tensor" ] ]
EloyRD/ThesisExp
[ "dfb890708e95d23cc68ff79b0858630c12aa940d" ]
[ "scripts/EA_A_03_2LFact_Data.py" ]
[ "# -*- coding: utf-8 -*-\n# ---\n# jupyter:\n# jupytext:\n# formats: ipynb,scripts//py:percent\n# text_representation:\n# extension: .py\n# format_name: percent\n# format_version: '1.2'\n# jupytext_version: 1.1.6\n# kernelspec:\n# display_name: Python [conda env:thesis] *\n# language: python\n# name: conda-env-thesis-py\n# ---\n\n# %% [raw]\n# \\author{Eloy Ruiz-Donayre}\n# \\title{TESTCASE A - 2-Level 6-Factor Full Factorial (With 30 replicates) - Data Generation}\n# \\date{\\today}\n# \\maketitle\n\n# %% [raw]\n# \\tableofcontents\n\n# %% [markdown]\n# # Preliminaries\n\n# %% [markdown]\n# Importing python packages and setting display parameters\n\n# %%\nimport numpy as np\nimport pandas as pd\nimport itertools as it\nimport scipy.stats as stats\n\nimport seaborn as sns\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\n\nimport thesis_EAfunc as EAf\nimport thesis_visfunc as EAv\n\n# %%\nplt.style.use(\"bmh\")\n# %matplotlib inline\n# %config InlineBackend.figure_format = 'retina'\n\npd.set_option(\"display.latex.repr\", True)\npd.set_option(\"display.latex.longtable\", True)\n\n# %% [markdown] {\"toc-hr-collapsed\": false}\n# # Fitness Landscape Definition\n\n# %%\n# Problem domain\nx_min = -15\nx_max = 15\ny_min = -15\ny_max = 15\n\n# Known minimum\nx_point = -1\ny_point = -1\n\ndomain = (x_min, x_max, y_min, y_max)\npoint = (x_point, y_point)\nimg_size = (8.5, 4.25)\n\n# Problem definition\n\n\ndef f(x, y):\n D = 2\n alpha = 1 / 8\n\n x = (x - 5) / 6\n y = (y - 5) / 6\n\n a = np.abs(x ** 2 + y ** 2 - D) ** (alpha * D)\n b = (0.5 * (x ** 2 + y ** 2) + (x + y)) / D\n\n return a + b + 0.5\n\n\n# %%\n# Testing the minimum\nprint(f(-1, -1))\n\n# %%\n# Testing the function\nprint(f(-1.0, -1.0), f(-11.0, -9.0), f(11.0, 3.0), f(-6.0, 9.0))\n\n# %% [markdown] {\"toc-hr-collapsed\": false}\n# # Setting up the experiment\n# 64 Experiments\n# >L-> In each experiment, one set of parameters is used.\n# >>L-> 40 Replicates per experiment.\n# >>>L-> Each replicate is different due to randomness effects.\n\n# %%\n# starting seed\nnp.random.seed(42)\n\n# %% [markdown]\n# ## Initializing data storage\n\n# %%\nmult_fit_cols = (\n [\"exp\"]\n + [\"pop_s\"]\n + [\"b\"]\n + [\"mut_p\"]\n + [\"mut_s\"]\n + [\"p_sel\"]\n + [\"s_sel\"]\n + [\"run\", \"generation\", \"fitness_min\", \"fitness_max\", \"fitness_mean\", \"fitness_std\"]\n)\nmulti_fit = pd.DataFrame(columns=mult_fit_cols)\nmulti_fit = multi_fit.infer_objects()\n\n# %% [markdown] {\"toc-hr-collapsed\": false}\n# ## Parameter space for the experiment\n\n# %% [markdown]\n# ### Initializing\n\n# %%\n# Algorithm parameters\n# Number of replicates, and generations per experiment\nrep_n = 30\ngen_f = 200\n\n# Population size\npop_s = [10, 160]\n\n# Parent subpopulation's selection method and size\npar_selection = [\"uniform\", \"tournament_k3\"]\nb = [0.5, 5]\npar_s = [z * y for z in pop_s for y in b]\n\n# Progeny subpopulation's size\nprog_s = par_s\n\n# Crossover Method\ncrossover = \"uniform\"\n# Mutation method, probability and size\nmutation = \"random_all_gau_dis\"\nmut_p = [0.1, 0.9]\nmut_s = [0.5, 5]\n\n# New population selection method\nsur_selection = [\"fitness_proportional_selection\", \"uniform\"]\n\n# %% [markdown]\n# ### 2-Level Factors encoded values\n\n# %%\ninputs_labels = {\n \"pop_s\": \"Population size\",\n \"b\": \"Progeny-to-population ratio\",\n \"mut_p\": \"Mutation Probability\",\n \"mut_s\": \"Mutation size\",\n \"p_sel\": \"Parent selection\",\n \"s_sel\": \"Survivor selection method\",\n}\n\ndat = [\n (\"pop_s\", 10, 160, -1, 1, \"Numerical\"),\n (\"b\", 0.5, 5, -1, 1, \"Numerical\"),\n (\"mut_p\", 0.1, 0.9, -1, 1, \"Numerical (<1)\"),\n (\"mut_s\", 0.5, 5, -1, 1, \"Numerical\"),\n (\"p_sel\", \"uniform\", \"tournament k3\", -1, 1, \"Categorical\"),\n (\"s_sel\", \"fitness proportional\", \"uniform\", -1, 1, \"Categorical\"),\n]\n\ninputs_df = pd.DataFrame(\n dat,\n columns=[\n \"Factor\",\n \"Value_low\",\n \"Value_high\",\n \"encoded_low\",\n \"encoded_high\",\n \"Variable type\",\n ],\n)\ninputs_df = inputs_df.set_index([\"Factor\"])\ninputs_df[\"Label\"] = inputs_df.index.map(lambda z: inputs_labels[z])\ninputs_df = inputs_df[\n [\"Label\", \"Variable type\", \"Value_low\", \"Value_high\", \"encoded_low\", \"encoded_high\"]\n]\n\ninputs_df\n\n# %% [markdown]\n# ### Combining the 2-level Factors\n\n# %% [markdown]\n# We create a list with all the possible combinations of the 2-level factors\n\n# %%\nexp_par = list(it.product(pop_s, b, mut_p, mut_s, par_selection, sur_selection))\nprint('Cantidad de combinaciones de parametros en \"exp_par\" :' + str(len(exp_par)))\nprint()\nprint('Primera y última combinación de parametros en \"exp_par\":')\nprint(\"Secuencia (pop_s, b, mut_p, mut_s, p_sel, s_sel)\")\nprint(exp_par[0])\nprint(exp_par[63])\n\n# %% [markdown]\n# # Experiment execution\n\n# %%\n# %%time\nexp_n = 1\nfor (zz, yy, xx, vv, uu, tt) in exp_par:\n sur_selection = tt\n par_selection = uu\n mut_s = vv\n mut_p = xx\n b = yy\n pop_s = zz\n prog_s = int(b * pop_s)\n par_s = prog_s\n\n fitness_res = EAf.EA_exp_only_fitness(\n rep_n,\n gen_f,\n f,\n domain,\n pop_s,\n par_s,\n prog_s,\n mut_p,\n mut_s,\n par_selection,\n crossover,\n mutation,\n sur_selection,\n )\n\n fitness_res.insert(0, \"s_sel\", tt)\n fitness_res.insert(0, \"p_sel\", uu)\n fitness_res.insert(0, \"mut_s\", vv)\n fitness_res.insert(0, \"mut_p\", xx)\n fitness_res.insert(0, \"b\", yy)\n fitness_res.insert(0, \"pop_s\", zz)\n fitness_res.insert(0, \"exp\", exp_n)\n multi_fit = multi_fit.append(fitness_res, ignore_index=True, sort=False)\n multi_fit = multi_fit.infer_objects()\n\n exp_n += 1\n\n# %% [markdown]\n# ## Data storage\n\n# %% [markdown]\n# Writing the Data Frame to a pickle file\n\n# %%\nmulti_fit.to_pickle(\"./Data/TEST_A_2L_FitData.gz\", compression=\"gzip\")\n\n# %% [markdown]\n# Reading the Data Frame from a pickle file\n\n# %%\nmulti_fit = pd.read_pickle(\"./Data/TEST_A_2L_FitData.gz\", compression=\"gzip\")\n\n# %%\nmulti_fit.tail()\n\n# %% [markdown]\n# # Processing data for DOE Analysis\n\n# %% [markdown]\n# Storing the latest generation's population of each replicate\n\n# %%\nquery = multi_fit[\"generation\"] == gen_f\nmulti_final_fitness_res = multi_fit[query]\n\n# %% [markdown]\n# Reordering columns\n\n# %%\nmulti_final_fitness_res = multi_final_fitness_res.drop(\n [\"exp\", \"generation\", \"run\", \"seed\"], axis=1\n)\nmulti_final_fitness_res.columns = [\n \"pop_s\",\n \"b\",\n \"mut_p\",\n \"mut_s\",\n \"p_sel\",\n \"s_sel\",\n \"f_min\",\n \"f_max\",\n \"f_mean\",\n \"f_std\",\n]\nmulti_final_fitness_res = multi_final_fitness_res[\n [\n \"pop_s\",\n \"b\",\n \"mut_p\",\n \"mut_s\",\n \"p_sel\",\n \"s_sel\",\n \"f_min\",\n \"f_max\",\n \"f_mean\",\n \"f_std\",\n ]\n]\nmulti_final_fitness_res = multi_final_fitness_res.reset_index(drop=True)\n\n# %% [markdown]\n# Encoding values for DOE's Factors\n\n# %%\nmulti_final_fitness_res[\"pop_s\"] = (\n multi_final_fitness_res[\"pop_s\"].replace([10, 160], [-1, 1]).infer_objects()\n)\nmulti_final_fitness_res[\"b\"] = (\n multi_final_fitness_res[\"b\"].replace([0.5, 5], [-1, 1]).infer_objects()\n)\nmulti_final_fitness_res[\"mut_p\"] = (\n multi_final_fitness_res[\"mut_p\"].replace([0.1, 0.9], [-1, 1]).infer_objects()\n)\nmulti_final_fitness_res[\"mut_s\"] = (\n multi_final_fitness_res[\"mut_s\"].replace([0.5, 5], [-1, 1]).infer_objects()\n)\nmulti_final_fitness_res[\"p_sel\"] = (\n multi_final_fitness_res[\"p_sel\"]\n .replace([\"uniform\", \"tournament_k3\"], [-1, 1])\n .infer_objects()\n)\nmulti_final_fitness_res[\"s_sel\"] = (\n multi_final_fitness_res[\"s_sel\"]\n .replace([\"fitness_proportional_selection\", \"uniform\"], [-1, 1])\n .infer_objects()\n)\n\n# %% [markdown]\n# Exploring the Data Frame\n\n# %%\nmulti_final_fitness_res.head()\n\n# %%\nmulti_final_fitness_res.tail()\n\n# %% [markdown]\n# Storing the Factor Coding and DOE results Data Frames\n\n# %%\ninputs_df.to_pickle(\"./Data/TEST_A_DOE_code.gz\", compression=\"gzip\")\nmulti_final_fitness_res.to_pickle(\"./Data/TEST_A_DOE_data.gz\", compression=\"gzip\")\n\n# %%\n" ]
[ [ "pandas.read_pickle", "matplotlib.pyplot.style.use", "pandas.DataFrame", "numpy.random.seed", "numpy.abs", "pandas.set_option" ] ]
monofo/fairseq
[ "335a4cbd403543ece43e24b41abbe53fc54b5f36" ]
[ "fairseq_cli/train.py" ]
[ "#!/usr/bin/env python3 -u\n# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\"\"\"\nTrain a new model on one or across multiple GPUs.\n\"\"\"\n\nimport argparse\nimport logging\nimport math\nimport os\nimport sys\nfrom typing import Dict, Optional, Any, List, Tuple, Callable\n\nimport numpy as np\nimport torch\nfrom fairseq import (\n checkpoint_utils,\n options,\n quantization_utils,\n tasks,\n utils,\n)\nfrom fairseq.data import iterators\nfrom fairseq.data.plasma_utils import PlasmaStore\nfrom fairseq.dataclass.configs import FairseqConfig\nfrom fairseq.dataclass.utils import convert_namespace_to_omegaconf\nfrom fairseq.distributed import fsdp_enable_wrap, fsdp_wrap, utils as distributed_utils\nfrom fairseq.file_io import PathManager\nfrom fairseq.logging import meters, metrics, progress_bar\nfrom fairseq.model_parallel.megatron_trainer import MegatronTrainer\nfrom fairseq.trainer import Trainer\nfrom omegaconf import DictConfig, OmegaConf\n\n\nlogging.basicConfig(\n format=\"%(asctime)s | %(levelname)s | %(name)s | %(message)s\",\n datefmt=\"%Y-%m-%d %H:%M:%S\",\n level=os.environ.get(\"LOGLEVEL\", \"INFO\").upper(),\n stream=sys.stdout,\n)\nlogger = logging.getLogger(\"fairseq_cli.train\")\n\n\ndef main(cfg: FairseqConfig) -> None:\n if isinstance(cfg, argparse.Namespace):\n cfg = convert_namespace_to_omegaconf(cfg)\n\n utils.import_user_module(cfg.common)\n\n if distributed_utils.is_master(cfg.distributed_training) and \"job_logging_cfg\" in cfg:\n # make hydra logging work with ddp (see # see https://github.com/facebookresearch/hydra/issues/1126)\n logging.config.dictConfig(OmegaConf.to_container(cfg.job_logging_cfg))\n\n assert (\n cfg.dataset.max_tokens is not None or cfg.dataset.batch_size is not None\n ), \"Must specify batch size either with --max-tokens or --batch-size\"\n metrics.reset()\n\n np.random.seed(cfg.common.seed)\n utils.set_torch_seed(cfg.common.seed)\n\n if distributed_utils.is_master(cfg.distributed_training):\n checkpoint_utils.verify_checkpoint_directory(cfg.checkpoint.save_dir)\n\n # Print args\n logger.info(cfg)\n\n if cfg.checkpoint.write_checkpoints_asynchronously:\n try:\n import iopath # noqa: F401\n except ImportError:\n logging.exception(\n \"Asynchronous checkpoint writing is specified but iopath is \"\n \"not installed: `pip install iopath`\"\n )\n return\n\n # Setup task, e.g., translation, language modeling, etc.\n task = tasks.setup_task(cfg.task)\n\n assert cfg.criterion, \"Please specify criterion to train a model\"\n\n # Build model and criterion\n if cfg.distributed_training.ddp_backend == \"fully_sharded\":\n with fsdp_enable_wrap(cfg.distributed_training):\n model = fsdp_wrap(task.build_model(cfg.model))\n else:\n model = task.build_model(cfg.model)\n criterion = task.build_criterion(cfg.criterion)\n logger.info(model)\n logger.info(\"task: {}\".format(task.__class__.__name__))\n logger.info(\"model: {}\".format(model.__class__.__name__))\n logger.info(\"criterion: {}\".format(criterion.__class__.__name__))\n logger.info(\n \"num. shared model params: {:,} (num. trained: {:,})\".format(\n sum(p.numel() for p in model.parameters() if not getattr(p, \"expert\", False)),\n sum(p.numel() for p in model.parameters() if not getattr(p, \"expert\", False) and p.requires_grad)\n )\n )\n\n logger.info(\n \"num. expert model params: {} (num. trained: {})\".format(\n sum(p.numel() for p in model.parameters() if getattr(p, \"expert\", False)),\n sum(p.numel() for p in model.parameters() if getattr(p, \"expert\", False) and p.requires_grad),\n )\n )\n\n # Load valid dataset (we load training data below, based on the latest checkpoint)\n # We load the valid dataset AFTER building the model\n for valid_sub_split in cfg.dataset.valid_subset.split(\",\"):\n task.load_dataset(valid_sub_split, combine=False, epoch=1)\n\n # (optionally) Configure quantization\n if cfg.common.quantization_config_path is not None:\n quantizer = quantization_utils.Quantizer(\n config_path=cfg.common.quantization_config_path,\n max_epoch=cfg.optimization.max_epoch,\n max_update=cfg.optimization.max_update,\n )\n else:\n quantizer = None\n\n # Build trainer\n if cfg.common.model_parallel_size == 1:\n trainer = Trainer(cfg, task, model, criterion, quantizer)\n else:\n trainer = MegatronTrainer(cfg, task, model, criterion)\n logger.info(\n \"training on {} devices (GPUs/TPUs)\".format(\n cfg.distributed_training.distributed_world_size\n )\n )\n logger.info(\n \"max tokens per device = {} and max sentences per device = {}\".format(\n cfg.dataset.max_tokens,\n cfg.dataset.batch_size,\n )\n )\n\n # Load the latest checkpoint if one is available and restore the\n # corresponding train iterator\n extra_state, epoch_itr = checkpoint_utils.load_checkpoint(\n cfg.checkpoint,\n trainer,\n # don't cache epoch iterators for sharded datasets\n disable_iterator_cache=task.has_sharded_data(\"train\"),\n )\n if cfg.common.tpu:\n import torch_xla.core.xla_model as xm\n xm.rendezvous(\"load_checkpoint\") # wait for all workers\n\n max_epoch = cfg.optimization.max_epoch or math.inf\n lr = trainer.get_lr()\n train_meter = meters.StopwatchMeter()\n train_meter.start()\n while epoch_itr.next_epoch_idx <= max_epoch:\n if lr <= cfg.optimization.stop_min_lr:\n logger.info(\n f\"stopping training because current learning rate ({lr}) is smaller \"\n \"than or equal to minimum learning rate \"\n f\"(--stop-min-lr={cfg.optimization.stop_min_lr})\"\n )\n break\n\n # train for one epoch\n valid_losses, should_stop = train(cfg, trainer, task, epoch_itr)\n if should_stop:\n break\n\n # only use first validation loss to update the learning rate\n lr = trainer.lr_step(epoch_itr.epoch, valid_losses[0])\n\n epoch_itr = trainer.get_train_iterator(\n epoch_itr.next_epoch_idx,\n # sharded data: get train iterator for next epoch\n load_dataset=task.has_sharded_data(\"train\"),\n # don't cache epoch iterators for sharded datasets\n disable_iterator_cache=task.has_sharded_data(\"train\"),\n )\n train_meter.stop()\n logger.info(\"done training in {:.1f} seconds\".format(train_meter.sum))\n\n # ioPath implementation to wait for all asynchronous file writes to complete.\n if cfg.checkpoint.write_checkpoints_asynchronously:\n logger.info(\n \"ioPath PathManager waiting for all asynchronous checkpoint \"\n \"writes to finish.\"\n )\n PathManager.async_close()\n logger.info(\"ioPath PathManager finished waiting.\")\n\n\ndef should_stop_early(cfg: DictConfig, valid_loss: float) -> bool:\n # skip check if no validation was done in the current epoch\n if valid_loss is None:\n return False\n if cfg.checkpoint.patience <= 0:\n return False\n\n def is_better(a, b):\n return a > b if cfg.checkpoint.maximize_best_checkpoint_metric else a < b\n\n prev_best = getattr(should_stop_early, \"best\", None)\n if prev_best is None or is_better(valid_loss, prev_best):\n should_stop_early.best = valid_loss\n should_stop_early.num_runs = 0\n return False\n else:\n should_stop_early.num_runs += 1\n if should_stop_early.num_runs >= cfg.checkpoint.patience:\n logger.info(\n \"early stop since valid performance hasn't improved for last {} runs\".format(\n cfg.checkpoint.patience\n )\n )\n return True\n else:\n return False\n\n\[email protected](\"train\")\ndef train(\n cfg: DictConfig, trainer: Trainer, task: tasks.FairseqTask, epoch_itr\n) -> Tuple[List[Optional[float]], bool]:\n \"\"\"Train the model for one epoch and return validation losses.\"\"\"\n # Initialize data iterator\n itr = epoch_itr.next_epoch_itr(\n fix_batches_to_gpus=cfg.distributed_training.fix_batches_to_gpus,\n shuffle=(epoch_itr.next_epoch_idx > cfg.dataset.curriculum),\n )\n update_freq = (\n cfg.optimization.update_freq[epoch_itr.epoch - 1]\n if epoch_itr.epoch <= len(cfg.optimization.update_freq)\n else cfg.optimization.update_freq[-1]\n )\n itr = iterators.GroupedIterator(itr, update_freq)\n if cfg.common.tpu:\n itr = utils.tpu_data_loader(itr)\n progress = progress_bar.progress_bar(\n itr,\n log_format=cfg.common.log_format,\n log_interval=cfg.common.log_interval,\n epoch=epoch_itr.epoch,\n tensorboard_logdir=(\n cfg.common.tensorboard_logdir\n if distributed_utils.is_master(cfg.distributed_training)\n else None\n ),\n default_log_format=(\"tqdm\" if not cfg.common.no_progress_bar else \"simple\"),\n wandb_project=(\n cfg.common.wandb_project\n if distributed_utils.is_master(cfg.distributed_training)\n else None\n ),\n wandb_run_name=os.environ.get(\n \"WANDB_NAME\", os.path.basename(cfg.checkpoint.save_dir)\n ),\n azureml_logging=(\n cfg.common.azureml_logging\n if distributed_utils.is_master(cfg.distributed_training)\n else False\n ),\n )\n progress.update_config(_flatten_config(cfg))\n\n trainer.begin_epoch(epoch_itr.epoch)\n\n valid_subsets = cfg.dataset.valid_subset.split(\",\")\n should_stop = False\n num_updates = trainer.get_num_updates()\n logger.info(\"Start iterating over samples\")\n for i, samples in enumerate(progress):\n with metrics.aggregate(\"train_inner\"), torch.autograd.profiler.record_function(\n \"train_step-%d\" % i\n ):\n log_output = trainer.train_step(samples)\n\n if log_output is not None: # not OOM, overflow, ...\n # log mid-epoch stats\n num_updates = trainer.get_num_updates()\n if num_updates % cfg.common.log_interval == 0:\n stats = get_training_stats(metrics.get_smoothed_values(\"train_inner\"))\n progress.log(stats, tag=\"train_inner\", step=num_updates)\n\n # reset mid-epoch stats after each log interval\n # the end-of-epoch stats will still be preserved\n metrics.reset_meters(\"train_inner\")\n\n end_of_epoch = not itr.has_next()\n valid_losses, should_stop = validate_and_save(\n cfg, trainer, task, epoch_itr, valid_subsets, end_of_epoch\n )\n\n if should_stop:\n break\n\n # log end-of-epoch stats\n logger.info(\"end of epoch {} (average epoch stats below)\".format(epoch_itr.epoch))\n stats = get_training_stats(metrics.get_smoothed_values(\"train\"))\n progress.print(stats, tag=\"train\", step=num_updates)\n\n # reset epoch-level meters\n metrics.reset_meters(\"train\")\n return valid_losses, should_stop\n\n\ndef _flatten_config(cfg: DictConfig):\n config = OmegaConf.to_container(cfg)\n # remove any legacy Namespaces and replace with a single \"args\"\n namespace = None\n for k, v in list(config.items()):\n if isinstance(v, argparse.Namespace):\n namespace = v\n del config[k]\n if namespace is not None:\n config[\"args\"] = vars(namespace)\n return config\n\n\ndef validate_and_save(\n cfg: DictConfig,\n trainer: Trainer,\n task: tasks.FairseqTask,\n epoch_itr,\n valid_subsets: List[str],\n end_of_epoch: bool,\n) -> Tuple[List[Optional[float]], bool]:\n num_updates = trainer.get_num_updates()\n max_update = cfg.optimization.max_update or math.inf\n\n # Stopping conditions (and an additional one based on validation loss later\n # on)\n should_stop = False\n if num_updates >= max_update:\n should_stop = True\n logger.info(\n f\"Stopping training due to \"\n f\"num_updates: {num_updates} >= max_update: {max_update}\"\n )\n\n training_time_hours = trainer.cumulative_training_time() / (60 * 60)\n if (\n cfg.optimization.stop_time_hours > 0\n and training_time_hours > cfg.optimization.stop_time_hours\n ):\n should_stop = True\n logger.info(\n f\"Stopping training due to \"\n f\"cumulative_training_time: {training_time_hours} > \"\n f\"stop_time_hours: {cfg.optimization.stop_time_hours} hour(s)\"\n )\n\n do_save = (\n (end_of_epoch and epoch_itr.epoch % cfg.checkpoint.save_interval == 0)\n or should_stop\n or (\n cfg.checkpoint.save_interval_updates > 0\n and num_updates > 0\n and num_updates % cfg.checkpoint.save_interval_updates == 0\n and num_updates >= cfg.dataset.validate_after_updates\n )\n )\n do_validate = (\n (not end_of_epoch and do_save) # validate during mid-epoch saves\n or (end_of_epoch and epoch_itr.epoch % cfg.dataset.validate_interval == 0)\n or should_stop\n or (\n cfg.dataset.validate_interval_updates > 0\n and num_updates > 0\n and num_updates % cfg.dataset.validate_interval_updates == 0\n )\n ) and not cfg.dataset.disable_validation\n\n # Validate\n valid_losses = [None]\n if do_validate:\n valid_losses = validate(cfg, trainer, task, epoch_itr, valid_subsets)\n\n should_stop |= should_stop_early(cfg, valid_losses[0])\n\n # Save checkpoint\n if do_save or should_stop:\n checkpoint_utils.save_checkpoint(\n cfg.checkpoint, trainer, epoch_itr, valid_losses[0]\n )\n\n return valid_losses, should_stop\n\n\ndef get_training_stats(stats: Dict[str, Any]) -> Dict[str, Any]:\n stats[\"wall\"] = round(metrics.get_meter(\"default\", \"wall\").elapsed_time, 0)\n return stats\n\n\ndef validate(\n cfg: DictConfig,\n trainer: Trainer,\n task: tasks.FairseqTask,\n epoch_itr,\n subsets: List[str],\n) -> List[Optional[float]]:\n \"\"\"Evaluate the model on the validation set(s) and return the losses.\"\"\"\n\n if cfg.dataset.fixed_validation_seed is not None:\n # set fixed seed for every validation\n utils.set_torch_seed(cfg.dataset.fixed_validation_seed)\n\n trainer.begin_valid_epoch(epoch_itr.epoch)\n valid_losses = []\n for subset in subsets:\n logger.info('begin validation on \"{}\" subset'.format(subset))\n\n # Initialize data iterator\n itr = trainer.get_valid_iterator(subset).next_epoch_itr(\n shuffle=False, set_dataset_epoch=False # use a fixed valid set\n )\n if cfg.common.tpu:\n itr = utils.tpu_data_loader(itr)\n progress = progress_bar.progress_bar(\n itr,\n log_format=cfg.common.log_format,\n log_interval=cfg.common.log_interval,\n epoch=epoch_itr.epoch,\n prefix=f\"valid on '{subset}' subset\",\n tensorboard_logdir=(\n cfg.common.tensorboard_logdir\n if distributed_utils.is_master(cfg.distributed_training)\n else None\n ),\n default_log_format=(\"tqdm\" if not cfg.common.no_progress_bar else \"simple\"),\n wandb_project=(\n cfg.common.wandb_project\n if distributed_utils.is_master(cfg.distributed_training)\n else None\n ),\n wandb_run_name=os.environ.get(\n \"WANDB_NAME\", os.path.basename(cfg.checkpoint.save_dir)\n ),\n )\n\n # create a new root metrics aggregator so validation metrics\n # don't pollute other aggregators (e.g., train meters)\n with metrics.aggregate(new_root=True) as agg:\n for i, sample in enumerate(progress):\n if cfg.dataset.max_valid_steps is not None and i > cfg.dataset.max_valid_steps:\n break\n trainer.valid_step(sample)\n\n # log validation stats\n stats = get_valid_stats(cfg, trainer, agg.get_smoothed_values())\n progress.print(stats, tag=subset, step=trainer.get_num_updates())\n\n valid_losses.append(stats[cfg.checkpoint.best_checkpoint_metric])\n return valid_losses\n\n\ndef get_valid_stats(\n cfg: DictConfig, trainer: Trainer, stats: Dict[str, Any]\n) -> Dict[str, Any]:\n stats[\"num_updates\"] = trainer.get_num_updates()\n if hasattr(checkpoint_utils.save_checkpoint, \"best\"):\n key = \"best_{0}\".format(cfg.checkpoint.best_checkpoint_metric)\n best_function = max if cfg.checkpoint.maximize_best_checkpoint_metric else min\n stats[key] = best_function(\n checkpoint_utils.save_checkpoint.best,\n stats[cfg.checkpoint.best_checkpoint_metric],\n )\n return stats\n\n\ndef cli_main(\n modify_parser: Optional[Callable[[argparse.ArgumentParser], None]] = None\n) -> None:\n parser = options.get_training_parser()\n args = options.parse_args_and_arch(parser, modify_parser=modify_parser)\n\n cfg = convert_namespace_to_omegaconf(args)\n\n if cfg.common.use_plasma_view:\n server = PlasmaStore(path=cfg.common.plasma_path)\n logger.info(f\"Started plasma server pid {server.server.pid} {cfg.common.plasma_path}\")\n\n if args.profile:\n with torch.cuda.profiler.profile():\n with torch.autograd.profiler.emit_nvtx():\n distributed_utils.call_main(cfg, main)\n else:\n distributed_utils.call_main(cfg, main)\n\n # if cfg.common.use_plasma_view:\n # server.server.kill()\n\n\nif __name__ == \"__main__\":\n cli_main()\n" ]
[ [ "torch.autograd.profiler.record_function", "numpy.random.seed", "torch.cuda.profiler.profile", "torch.autograd.profiler.emit_nvtx" ] ]
Joshuaalbert/neural_deprojection
[ "5f7859bfd514efe1707a61e2a5e7fc6d949f85ce" ]
[ "neural_deprojection/models/TwoD_to_2d_dVAE_GCD/graph_networks.py" ]
[ "import sys\n\nsys.path.insert(1, '/data/s2675544/git/neural_deprojection/')\nsys.path.insert(1, '/home/matthijs/git/neural_deprojection/')\n\nfrom graph_nets import blocks\nfrom graph_nets.utils_tf import concat\n\nimport tensorflow as tf\nimport sonnet as snt\nfrom graph_nets.graphs import GraphsTuple\nfrom graph_nets.utils_tf import fully_connect_graph_dynamic, fully_connect_graph_static\nfrom neural_deprojection.graph_net_utils import AbstractModule, histogramdd, get_shape\nimport tensorflow_probability as tfp\nfrom neural_deprojection.models.openai_dvae_modules.modules import Encoder, Decoder\n\nclass DiscreteImageVAE(AbstractModule):\n def __init__(self,\n hidden_size: int = 64,\n embedding_dim: int = 64,\n num_embedding: int = 1024,\n num_token_samples: int = 32,\n num_channels=1,\n name=None):\n super(DiscreteImageVAE, self).__init__(name=name)\n # (num_embedding, embedding_dim)\n self.num_channels=num_channels\n self.embeddings = tf.Variable(initial_value=tf.random.truncated_normal((num_embedding, embedding_dim)),\n name='embeddings')\n self.num_token_samples = num_token_samples\n self.num_embedding = num_embedding\n self.embedding_dim = embedding_dim\n self.temperature = tf.Variable(initial_value=tf.constant(1.), name='temperature', trainable=False)\n self.beta = tf.Variable(initial_value=tf.constant(6.6), name='beta', trainable=False)\n\n self.encoder = Encoder(hidden_size=hidden_size, num_embeddings=num_embedding, name='EncoderImage')\n self.decoder = Decoder(hidden_size=hidden_size, num_channels=num_channels, name='DecoderImage')\n\n def set_beta(self, beta):\n self.beta.assign(beta)\n\n def set_temperature(self, temperature):\n self.temperature.assign(temperature)\n\n @tf.function(input_signature=[tf.TensorSpec([None, None, None, None], dtype=tf.float32)])\n def sample_encoder(self, img):\n return self.encoder(img)\n\n @tf.function(input_signature=[tf.TensorSpec([None, None, None, None], dtype=tf.float32),\n tf.TensorSpec([], dtype=tf.float32),\n tf.TensorSpec([], dtype=tf.float32)])\n def sample_decoder(self, img_logits, temperature, num_samples):\n [batch, H, W, _] = get_shape(img_logits)\n\n logits = tf.reshape(img_logits, [batch * H * W, self.num_embedding]) # [batch*H*W, num_embeddings]\n reduce_logsumexp = tf.math.reduce_logsumexp(logits, axis=-1) # [batch*H*W]\n reduce_logsumexp = tf.tile(reduce_logsumexp[:, None], [1, self.num_embedding]) # [batch*H*W, num_embedding]\n logits -= reduce_logsumexp # [batch*H*W, num_embeddings]\n token_distribution = tfp.distributions.RelaxedOneHotCategorical(temperature, logits=logits)\n token_samples_onehot = token_distribution.sample((num_samples,),\n name='token_samples') # [S, batch*H*W, num_embeddings]\n def _single_decode(token_sample_onehot):\n # [batch*H*W, num_embeddings] @ [num_embeddings, embedding_dim]\n token_sample = tf.matmul(token_sample_onehot, self.embeddings) # [batch*H*W, embedding_dim] # = z ~ q(z|x)\n latent_img = tf.reshape(token_sample, [batch, H, W, self.embedding_dim]) # [batch, H, W, embedding_dim]\n decoded_img = self.decoder(latent_img) # [batch, H', W', C*2]\n return decoded_img\n\n decoded_ims = tf.vectorized_map(_single_decode, token_samples_onehot) # [S, batch, H', W', C*2]\n decoded_im = tf.reduce_mean(decoded_ims, axis=0) # [batch, H', W', C*2]\n return decoded_im\n\n\n def log_likelihood(self, img, mu, logb):\n \"\"\"\n Log-Laplace distribution.\n\n Args:\n img: [...,c] assumes of the form log(maximum(1e-5, img))\n mu: [...,c]\n logb: [...,c]\n\n Returns:\n log_prob [...]\n \"\"\"\n log_prob = - tf.math.abs(img - mu) / tf.math.exp(logb) \\\n - tf.math.log(2.) - img - logb\n return tf.reduce_sum(log_prob, axis=-1)\n\n\n def _build(self, img, **kwargs) -> dict:\n \"\"\"\n\n Args:\n img: [batch, H', W', num_channel]\n **kwargs:\n\n Returns:\n\n \"\"\"\n encoded_img_logits = self.encoder(img) # [batch, H, W, num_embedding]\n [batch, H, W, _] = get_shape(encoded_img_logits)\n\n logits = tf.reshape(encoded_img_logits, [batch*H*W, self.num_embedding]) # [batch*H*W, num_embeddings]\n reduce_logsumexp = tf.math.reduce_logsumexp(logits, axis=-1) # [batch*H*W]\n reduce_logsumexp = tf.tile(reduce_logsumexp[:, None], [1, self.num_embedding]) # [batch*H*W, num_embedding]\n logits -= reduce_logsumexp # [batch*H*W, num_embeddings]\n\n temperature = tf.maximum(0.1, tf.cast(1. - 0.1/(self.step/1000), tf.float32))\n token_distribution = tfp.distributions.RelaxedOneHotCategorical(temperature, logits=logits)\n token_samples_onehot = token_distribution.sample((self.num_token_samples,), name='token_samples') # [S, batch*H*W, num_embeddings]\n\n def _single_decode(token_sample_onehot):\n #[batch*H*W, num_embeddings] @ [num_embeddings, embedding_dim]\n token_sample = tf.matmul(token_sample_onehot, self.embeddings) # [batch*H*W, embedding_dim] # = z ~ q(z|x)\n latent_img = tf.reshape(token_sample, [batch, H, W, self.embedding_dim]) # [batch, H, W, embedding_dim]\n decoded_img = self.decoder(latent_img) # [batch, H', W', C*2]\n # print('decod shape', decoded_img)\n img_mu = decoded_img[..., :self.num_channels] #[batch, H', W', C]\n # print('mu shape', img_mu)\n img_logb = decoded_img[..., self.num_channels:]\n # print('logb shape', img_logb)\n log_likelihood = self.log_likelihood(img, img_mu, img_logb)#[batch, H', W', C]\n log_likelihood = tf.reduce_sum(log_likelihood, axis=[-3,-2,-1]) # [batch]\n sum_selected_logits = tf.math.reduce_sum(token_sample_onehot * logits, axis=-1) # [batch*H*W]\n sum_selected_logits = tf.reshape(sum_selected_logits, [batch, H, W])\n kl_term = tf.reduce_sum(sum_selected_logits, axis=[-2,-1])#[batch]\n return log_likelihood, kl_term, decoded_img\n\n #num_samples, batch\n log_likelihood_samples, kl_term_samples, decoded_ims = tf.vectorized_map(_single_decode, token_samples_onehot) # [S, batch], [S, batch]\n\n if self.step % 50 == 0:\n img_mu_0 = tf.reduce_mean(decoded_ims, axis=0)[..., :self.num_channels]\n img_mu_0 -= tf.reduce_min(img_mu_0)\n img_mu_0 /= tf.reduce_max(img_mu_0)\n tf.summary.image('mu', img_mu_0, step=self.step)\n\n smoothed_img = img[..., self.num_channels:]\n smoothed_img = (smoothed_img - tf.reduce_min(smoothed_img)) / (\n tf.reduce_max(smoothed_img) - tf.reduce_min(smoothed_img))\n tf.summary.image(f'img_before_autoencoder', smoothed_img, step=self.step)\n\n var_exp = tf.reduce_mean(log_likelihood_samples, axis=0) # [batch]\n kl_div = tf.reduce_mean(kl_term_samples, axis=0) # [batch]\n elbo = var_exp - kl_div # batch\n loss = - tf.reduce_mean(elbo) # scalar\n\n entropy = -tf.reduce_sum(logits * tf.math.exp(logits), axis=-1) # [batch*H*W]\n perplexity = 2. ** (-entropy / tf.math.log(2.)) # [batch*H*W]\n mean_perplexity = tf.reduce_mean(perplexity) # scalar\n\n if self.step % 2 == 0:\n logits = tf.nn.softmax(logits, axis=-1) # [batch*H*W, num_embedding]\n logits -= tf.reduce_min(logits)\n logits /= tf.reduce_max(logits)\n logits = tf.reshape(logits, [batch, H*W, self.num_embedding])[0] # [H*W, num_embedding]\n # tf.repeat(tf.repeat(logits, 16*[4], axis=0), 512*[4], axis=1)\n tf.summary.image('logits', logits[None, :, :, None], step=self.step)\n tf.summary.scalar('perplexity', mean_perplexity, step=self.step)\n tf.summary.scalar('var_exp', tf.reduce_mean(var_exp), step=self.step)\n tf.summary.scalar('kl_div', tf.reduce_mean(kl_div), step=self.step)\n\n\n return dict(loss=loss,\n metrics=dict(var_exp=var_exp,\n kl_div=kl_div,\n mean_perplexity=mean_perplexity))\n" ]
[ [ "tensorflow.summary.scalar", "tensorflow.reduce_max", "tensorflow.reshape", "tensorflow.summary.image", "tensorflow.random.truncated_normal", "tensorflow.matmul", "tensorflow.nn.softmax", "tensorflow.reduce_sum", "tensorflow.math.log", "tensorflow.math.reduce_logsumexp", "tensorflow.reduce_min", "tensorflow.constant", "tensorflow.vectorized_map", "tensorflow.cast", "tensorflow.tile", "tensorflow.math.reduce_sum", "tensorflow.math.abs", "tensorflow.reduce_mean", "tensorflow.TensorSpec", "tensorflow.math.exp" ] ]
bfxavier/GamestonkTerminal
[ "b0a685cacaca1f06fc41d8041bcae5492216dc52" ]
[ "gamestonk_terminal/prediction_techniques/neural_networks.py" ]
[ "import argparse\nimport os\nfrom warnings import simplefilter\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom pandas.plotting import register_matplotlib_converters\nfrom TimeSeriesCrossValidation import splitTrain\nfrom sklearn.preprocessing import MinMaxScaler, StandardScaler\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import LSTM, SimpleRNN, Dense, Dropout\n\nfrom gamestonk_terminal.helper_funcs import (\n check_positive,\n get_next_stock_market_days,\n parse_known_args_and_warn,\n print_pretty_prediction,\n)\n\nfrom gamestonk_terminal import config_neural_network_models as cfg_nn_models\n\n\nregister_matplotlib_converters()\n\nos.environ[\"TF_CPP_MIN_LOG_LEVEL\"] = \"2\"\n\nsimplefilter(action=\"ignore\", category=FutureWarning)\n\n\n# ----------------------------------------------------------------------------------------------------\ndef build_neural_network_model(Recurrent_Neural_Network, n_inputs, n_days):\n model = Sequential()\n\n for idx_layer, d_layer in enumerate(Recurrent_Neural_Network):\n # Recurrent Neural Network\n if str(*d_layer) == \"SimpleRNN\":\n # Is this the input layer? If so, define input_shape\n if idx_layer == 0:\n model.add(SimpleRNN(**d_layer[\"SimpleRNN\"], input_shape=(n_inputs, 1)))\n # Is this the last output layer? If so, set units to prediction days\n elif idx_layer == (len(Recurrent_Neural_Network) - 1):\n model.add(SimpleRNN(**d_layer[\"SimpleRNN\"], units=n_days))\n else:\n model.add(SimpleRNN(**d_layer[\"SimpleRNN\"]))\n\n # Long-Short Term-Memory\n elif str(*d_layer) == \"LSTM\":\n # Is this the input layer? If so, define input_shape\n if idx_layer == 0:\n model.add(LSTM(**d_layer[\"LSTM\"], input_shape=(n_inputs, 1)))\n # Is this the last output layer? If so, set units to prediction days\n elif idx_layer == (len(Recurrent_Neural_Network) - 1):\n model.add(LSTM(**d_layer[\"LSTM\"], units=n_days))\n else:\n model.add(LSTM(**d_layer[\"LSTM\"]))\n\n # Dense (Simple Neuron)\n elif str(*d_layer) == \"Dense\":\n # Is this the input layer? If so, define input_shape\n if idx_layer == 0:\n model.add(Dense(**d_layer[\"Dense\"], input_dim=n_inputs))\n # Is this the last output layer? If so, set units to prediction days\n elif idx_layer == (len(Recurrent_Neural_Network) - 1):\n model.add(Dense(**d_layer[\"Dense\"], units=n_days))\n else:\n model.add(Dense(**d_layer[\"Dense\"]))\n\n # Dropout (Regularization)\n elif str(*d_layer) == \"Dropout\":\n model.add(Dropout(**d_layer[\"Dropout\"]))\n\n else:\n print(f\"Incorrect neuron type: {str(*d_layer)}\")\n\n return model\n\n\ndef mlp(l_args, s_ticker, df_stock):\n parser = argparse.ArgumentParser(\n add_help=False, prog=\"mlp\", description=\"\"\"Multilayer Perceptron. \"\"\"\n )\n\n parser.add_argument(\n \"-d\",\n \"--days\",\n action=\"store\",\n dest=\"n_days\",\n type=check_positive,\n default=5,\n help=\"prediction days.\",\n )\n parser.add_argument(\n \"-i\",\n \"--input\",\n action=\"store\",\n dest=\"n_inputs\",\n type=check_positive,\n default=40,\n help=\"number of days to use for prediction.\",\n )\n parser.add_argument(\n \"-e\",\n \"--epochs\",\n action=\"store\",\n dest=\"n_epochs\",\n type=check_positive,\n default=200,\n help=\"number of training epochs.\",\n )\n parser.add_argument(\n \"-j\",\n \"--jumps\",\n action=\"store\",\n dest=\"n_jumps\",\n type=check_positive,\n default=1,\n help=\"number of jumps in training data.\",\n )\n parser.add_argument(\n \"-p\",\n \"--pp\",\n action=\"store\",\n dest=\"s_preprocessing\",\n default=\"normalization\",\n choices=[\"normalization\", \"standardization\", \"none\"],\n help=\"pre-processing data.\",\n )\n parser.add_argument(\n \"-o\",\n \"--optimizer\",\n action=\"store\",\n dest=\"s_optimizer\",\n default=\"adam\",\n choices=[\n \"adam\",\n \"adagrad\",\n \"adadelta\",\n \"adamax\",\n \"ftrl\",\n \"nadam\",\n \"optimizer\",\n \"rmsprop\",\n \"sgd\",\n ],\n help=\"optimization technique.\",\n )\n parser.add_argument(\n \"-l\",\n \"--loss\",\n action=\"store\",\n dest=\"s_loss\",\n default=\"mae\",\n choices=[\"mae\", \"mape\", \"mse\", \"msle\"],\n help=\"loss function.\",\n )\n\n try:\n ns_parser = parse_known_args_and_warn(parser, l_args)\n if not ns_parser:\n return\n\n # Pre-process data\n if ns_parser.s_preprocessing == \"standardization\":\n scaler = StandardScaler()\n stock_train_data = scaler.fit_transform(\n np.array(df_stock[\"5. adjusted close\"].values.reshape(-1, 1))\n )\n elif ns_parser.s_preprocessing == \"normalization\":\n scaler = MinMaxScaler()\n stock_train_data = scaler.fit_transform(\n np.array(df_stock[\"5. adjusted close\"].values.reshape(-1, 1))\n )\n else: # No pre-processing\n stock_train_data = np.array(\n df_stock[\"5. adjusted close\"].values.reshape(-1, 1)\n )\n\n # Split training data for the neural network\n stock_x, stock_y = splitTrain.split_train(\n stock_train_data,\n ns_parser.n_inputs,\n ns_parser.n_days,\n numJumps=ns_parser.n_jumps,\n )\n stock_x = np.array(stock_x)\n stock_x = np.reshape(stock_x, (stock_x.shape[0], stock_x.shape[1]))\n stock_y = np.array(stock_y)\n stock_y = np.reshape(stock_y, (stock_y.shape[0], stock_y.shape[1]))\n\n # Build Neural Network model\n model = build_neural_network_model(\n cfg_nn_models.MultiLayer_Perceptron, ns_parser.n_inputs, ns_parser.n_days\n )\n model.compile(optimizer=ns_parser.s_optimizer, loss=ns_parser.s_loss)\n\n # Train our model\n model.fit(stock_x, stock_y, epochs=ns_parser.n_epochs, verbose=1)\n print(\"\")\n\n print(model.summary())\n print(\"\")\n\n # Prediction\n yhat = model.predict(\n stock_train_data[-ns_parser.n_inputs :].reshape(1, ns_parser.n_inputs),\n verbose=0,\n )\n\n # Re-scale the data back\n if (ns_parser.s_preprocessing == \"standardization\") or (\n ns_parser.s_preprocessing == \"normalization\"\n ):\n y_pred_test_t = scaler.inverse_transform(yhat.tolist())\n else:\n y_pred_test_t = yhat\n\n l_pred_days = get_next_stock_market_days(\n last_stock_day=df_stock[\"5. adjusted close\"].index[-1],\n n_next_days=ns_parser.n_days,\n )\n df_pred = pd.Series(y_pred_test_t[0].tolist(), index=l_pred_days, name=\"Price\")\n\n # Plotting\n plt.figure()\n plt.plot(df_stock.index, df_stock[\"5. adjusted close\"], lw=3)\n plt.title(f\"MLP on {s_ticker} - {ns_parser.n_days} days prediction\")\n plt.xlim(\n df_stock.index[0], get_next_stock_market_days(df_pred.index[-1], 1)[-1]\n )\n plt.xlabel(\"Time\")\n plt.ylabel(\"Share Price ($)\")\n plt.grid(b=True, which=\"major\", color=\"#666666\", linestyle=\"-\")\n plt.minorticks_on()\n plt.grid(b=True, which=\"minor\", color=\"#999999\", linestyle=\"-\", alpha=0.2)\n plt.plot(\n [df_stock.index[-1], df_pred.index[0]],\n [df_stock[\"5. adjusted close\"].values[-1], df_pred.values[0]],\n lw=1,\n c=\"tab:green\",\n linestyle=\"--\",\n )\n plt.plot(df_pred.index, df_pred, lw=2, c=\"tab:green\")\n plt.axvspan(\n df_stock.index[-1], df_pred.index[-1], facecolor=\"tab:orange\", alpha=0.2\n )\n _, _, ymin, ymax = plt.axis()\n plt.vlines(\n df_stock.index[-1],\n ymin,\n ymax,\n colors=\"k\",\n linewidth=3,\n linestyle=\"--\",\n color=\"k\",\n )\n plt.ion()\n plt.show()\n\n # Print prediction data\n print_pretty_prediction(df_pred, df_stock[\"5. adjusted close\"].values[-1])\n print(\"\")\n\n except Exception as e:\n print(e)\n print(\"\")\n\n\ndef rnn(l_args, s_ticker, df_stock):\n parser = argparse.ArgumentParser(\n add_help=False, prog=\"rnn\", description=\"\"\"Recurrent Neural Network. \"\"\"\n )\n\n parser.add_argument(\n \"-d\",\n \"--days\",\n action=\"store\",\n dest=\"n_days\",\n type=check_positive,\n default=5,\n help=\"prediction days.\",\n )\n parser.add_argument(\n \"-i\",\n \"--input\",\n action=\"store\",\n dest=\"n_inputs\",\n type=check_positive,\n default=40,\n help=\"number of days to use for prediction.\",\n )\n parser.add_argument(\n \"-e\",\n \"--epochs\",\n action=\"store\",\n dest=\"n_epochs\",\n type=check_positive,\n default=200,\n help=\"number of training epochs.\",\n )\n parser.add_argument(\n \"-j\",\n \"--jumps\",\n action=\"store\",\n dest=\"n_jumps\",\n type=check_positive,\n default=1,\n help=\"number of jumps in training data.\",\n )\n parser.add_argument(\n \"-p\",\n \"--pp\",\n action=\"store\",\n dest=\"s_preprocessing\",\n default=\"normalization\",\n choices=[\"normalization\", \"standardization\", \"none\"],\n help=\"pre-processing data.\",\n )\n parser.add_argument(\n \"-o\",\n \"--optimizer\",\n action=\"store\",\n dest=\"s_optimizer\",\n default=\"adam\",\n help=\"optimizer technique\",\n choices=[\n \"adam\",\n \"adagrad\",\n \"adadelta\",\n \"adamax\",\n \"ftrl\",\n \"nadam\",\n \"optimizer\",\n \"rmsprop\",\n \"sgd\",\n ],\n )\n parser.add_argument(\n \"-l\",\n \"--loss\",\n action=\"store\",\n dest=\"s_loss\",\n default=\"mae\",\n choices=[\"mae\", \"mape\", \"mse\", \"msle\"],\n help=\"loss function.\",\n )\n\n try:\n ns_parser = parse_known_args_and_warn(parser, l_args)\n if not ns_parser:\n return\n\n # Pre-process data\n if ns_parser.s_preprocessing == \"standardization\":\n scaler = StandardScaler()\n stock_train_data = scaler.fit_transform(\n np.array(df_stock[\"5. adjusted close\"].values.reshape(-1, 1))\n )\n elif ns_parser.s_preprocessing == \"normalization\":\n scaler = MinMaxScaler()\n stock_train_data = scaler.fit_transform(\n np.array(df_stock[\"5. adjusted close\"].values.reshape(-1, 1))\n )\n else: # No pre-processing\n stock_train_data = np.array(\n df_stock[\"5. adjusted close\"].values.reshape(-1, 1)\n )\n\n # Split training data for the neural network\n stock_x, stock_y = splitTrain.split_train(\n stock_train_data,\n ns_parser.n_inputs,\n ns_parser.n_days,\n numJumps=ns_parser.n_jumps,\n )\n stock_x = np.array(stock_x)\n stock_x = np.reshape(stock_x, (stock_x.shape[0], stock_x.shape[1], 1))\n stock_y = np.array(stock_y)\n stock_y = np.reshape(stock_y, (stock_y.shape[0], stock_y.shape[1], 1))\n\n # Build Neural Network model\n model = build_neural_network_model(\n cfg_nn_models.Recurrent_Neural_Network, ns_parser.n_inputs, ns_parser.n_days\n )\n model.compile(optimizer=ns_parser.s_optimizer, loss=ns_parser.s_loss)\n\n # Train our model\n model.fit(stock_x, stock_y, epochs=ns_parser.n_epochs, verbose=1)\n print(\"\")\n\n print(model.summary())\n print(\"\")\n\n # Prediction\n yhat = model.predict(\n stock_train_data[-ns_parser.n_inputs :].reshape(1, ns_parser.n_inputs, 1),\n verbose=0,\n )\n\n # Re-scale the data back\n if (ns_parser.s_preprocessing == \"standardization\") or (\n ns_parser.s_preprocessing == \"normalization\"\n ):\n y_pred_test_t = scaler.inverse_transform(yhat.tolist())\n else:\n y_pred_test_t = yhat\n\n l_pred_days = get_next_stock_market_days(\n last_stock_day=df_stock[\"5. adjusted close\"].index[-1],\n n_next_days=ns_parser.n_days,\n )\n df_pred = pd.Series(y_pred_test_t[0].tolist(), index=l_pred_days, name=\"Price\")\n\n # Plotting\n plt.figure()\n plt.plot(df_stock.index, df_stock[\"5. adjusted close\"], lw=3)\n plt.title(f\"RNN on {s_ticker} - {ns_parser.n_days} days prediction\")\n plt.xlim(\n df_stock.index[0], get_next_stock_market_days(df_pred.index[-1], 1)[-1]\n )\n plt.xlabel(\"Time\")\n plt.ylabel(\"Share Price ($)\")\n plt.grid(b=True, which=\"major\", color=\"#666666\", linestyle=\"-\")\n plt.minorticks_on()\n plt.grid(b=True, which=\"minor\", color=\"#999999\", linestyle=\"-\", alpha=0.2)\n plt.plot(\n [df_stock.index[-1], df_pred.index[0]],\n [df_stock[\"5. adjusted close\"].values[-1], df_pred.values[0]],\n lw=1,\n c=\"tab:green\",\n linestyle=\"--\",\n )\n plt.plot(df_pred.index, df_pred, lw=2, c=\"tab:green\")\n plt.axvspan(\n df_stock.index[-1], df_pred.index[-1], facecolor=\"tab:orange\", alpha=0.2\n )\n _, _, ymin, ymax = plt.axis()\n plt.vlines(\n df_stock.index[-1],\n ymin,\n ymax,\n colors=\"k\",\n linewidth=3,\n linestyle=\"--\",\n color=\"k\",\n )\n plt.ion()\n plt.show()\n\n # Print prediction data\n print_pretty_prediction(df_pred, df_stock[\"5. adjusted close\"].values[-1])\n print(\"\")\n\n except Exception as e:\n print(e)\n print(\"\")\n\n\ndef lstm(l_args, s_ticker, df_stock):\n parser = argparse.ArgumentParser(\n add_help=False, prog=\"lstm\", description=\"\"\"Long-Short Term Memory. \"\"\"\n )\n\n parser.add_argument(\n \"-d\",\n \"--days\",\n action=\"store\",\n dest=\"n_days\",\n type=check_positive,\n default=5,\n help=\"prediction days\",\n )\n parser.add_argument(\n \"-i\",\n \"--input\",\n action=\"store\",\n dest=\"n_inputs\",\n type=check_positive,\n default=40,\n help=\"number of days to use for prediction.\",\n )\n parser.add_argument(\n \"-e\",\n \"--epochs\",\n action=\"store\",\n dest=\"n_epochs\",\n type=check_positive,\n default=200,\n help=\"number of training epochs.\",\n )\n parser.add_argument(\n \"-j\",\n \"--jumps\",\n action=\"store\",\n dest=\"n_jumps\",\n type=check_positive,\n default=1,\n help=\"number of jumps in training data.\",\n )\n parser.add_argument(\n \"-p\",\n \"--pp\",\n action=\"store\",\n dest=\"s_preprocessing\",\n default=\"normalization\",\n choices=[\"normalization\", \"standardization\", \"none\"],\n help=\"pre-processing data.\",\n )\n parser.add_argument(\n \"-o\",\n \"--optimizer\",\n action=\"store\",\n dest=\"s_optimizer\",\n default=\"adam\",\n help=\"optimization technique.\",\n choices=[\n \"adam\",\n \"adagrad\",\n \"adadelta\",\n \"adamax\",\n \"ftrl\",\n \"nadam\",\n \"optimizer\",\n \"rmsprop\",\n \"sgd\",\n ],\n )\n parser.add_argument(\n \"-l\",\n \"--loss\",\n action=\"store\",\n dest=\"s_loss\",\n default=\"mae\",\n choices=[\"mae\", \"mape\", \"mse\", \"msle\"],\n help=\"loss function.\",\n )\n\n try:\n ns_parser = parse_known_args_and_warn(parser, l_args)\n if not ns_parser:\n return\n\n # Pre-process data\n if ns_parser.s_preprocessing == \"standardization\":\n scaler = StandardScaler()\n stock_train_data = scaler.fit_transform(\n np.array(df_stock[\"5. adjusted close\"].values.reshape(-1, 1))\n )\n elif ns_parser.s_preprocessing == \"normalization\":\n scaler = MinMaxScaler()\n stock_train_data = scaler.fit_transform(\n np.array(df_stock[\"5. adjusted close\"].values.reshape(-1, 1))\n )\n else: # No pre-processing\n stock_train_data = np.array(\n df_stock[\"5. adjusted close\"].values.reshape(-1, 1)\n )\n\n # Split training data for the neural network\n stock_x, stock_y = splitTrain.split_train(\n stock_train_data,\n ns_parser.n_inputs,\n ns_parser.n_days,\n numJumps=ns_parser.n_jumps,\n )\n stock_x = np.array(stock_x)\n stock_x = np.reshape(stock_x, (stock_x.shape[0], stock_x.shape[1], 1))\n stock_y = np.array(stock_y)\n stock_y = np.reshape(stock_y, (stock_y.shape[0], stock_y.shape[1], 1))\n\n # Build Neural Network model\n model = build_neural_network_model(\n cfg_nn_models.Long_Short_Term_Memory, ns_parser.n_inputs, ns_parser.n_days\n )\n model.compile(optimizer=ns_parser.s_optimizer, loss=ns_parser.s_loss)\n\n # Train our model\n model.fit(stock_x, stock_y, epochs=ns_parser.n_epochs, verbose=1)\n print(\"\")\n\n print(model.summary())\n print(\"\")\n\n # Prediction\n yhat = model.predict(\n stock_train_data[-ns_parser.n_inputs :].reshape(1, ns_parser.n_inputs, 1),\n verbose=0,\n )\n\n # Re-scale the data back\n if (ns_parser.s_preprocessing == \"standardization\") or (\n ns_parser.s_preprocessing == \"normalization\"\n ):\n y_pred_test_t = scaler.inverse_transform(yhat.tolist())\n else:\n y_pred_test_t = yhat\n\n l_pred_days = get_next_stock_market_days(\n last_stock_day=df_stock[\"5. adjusted close\"].index[-1],\n n_next_days=ns_parser.n_days,\n )\n df_pred = pd.Series(y_pred_test_t[0].tolist(), index=l_pred_days, name=\"Price\")\n\n # Plotting\n plt.figure()\n plt.plot(df_stock.index, df_stock[\"5. adjusted close\"], lw=3)\n plt.title(f\"LSTM on {s_ticker} - {ns_parser.n_days} days prediction\")\n plt.xlim(\n df_stock.index[0], get_next_stock_market_days(df_pred.index[-1], 1)[-1]\n )\n plt.xlabel(\"Time\")\n plt.ylabel(\"Share Price ($)\")\n plt.grid(b=True, which=\"major\", color=\"#666666\", linestyle=\"-\")\n plt.minorticks_on()\n plt.grid(b=True, which=\"minor\", color=\"#999999\", linestyle=\"-\", alpha=0.2)\n plt.plot(\n [df_stock.index[-1], df_pred.index[0]],\n [df_stock[\"5. adjusted close\"].values[-1], df_pred.values[0]],\n lw=1,\n c=\"tab:green\",\n linestyle=\"--\",\n )\n plt.plot(df_pred.index, df_pred, lw=2, c=\"tab:green\")\n plt.axvspan(\n df_stock.index[-1], df_pred.index[-1], facecolor=\"tab:orange\", alpha=0.2\n )\n _, _, ymin, ymax = plt.axis()\n plt.vlines(\n df_stock.index[-1],\n ymin,\n ymax,\n colors=\"k\",\n linewidth=3,\n linestyle=\"--\",\n color=\"k\",\n )\n plt.ion()\n plt.show()\n\n # Print prediction data\n print_pretty_prediction(df_pred, df_stock[\"5. adjusted close\"].values[-1])\n print(\"\")\n\n except Exception as e:\n print(e)\n print(\"\")\n" ]
[ [ "pandas.plotting.register_matplotlib_converters", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.plot", "matplotlib.pyplot.figure", "numpy.reshape", "matplotlib.pyplot.title", "tensorflow.keras.layers.Dense", "tensorflow.keras.layers.Dropout", "sklearn.preprocessing.MinMaxScaler", "matplotlib.pyplot.axis", "matplotlib.pyplot.axvspan", "sklearn.preprocessing.StandardScaler", "matplotlib.pyplot.ion", "tensorflow.keras.models.Sequential", "matplotlib.pyplot.grid", "matplotlib.pyplot.vlines", "tensorflow.keras.layers.SimpleRNN", "matplotlib.pyplot.minorticks_on", "matplotlib.pyplot.show", "tensorflow.keras.layers.LSTM", "numpy.array", "matplotlib.pyplot.xlabel" ] ]
markson14/RRPN_pytorch
[ "f30c6180c44c2d6cc65ce4521a3cf839b5215089" ]
[ "maskrcnn_benchmark/modeling/rrpn/inference.py" ]
[ "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\nimport torch\n\nfrom maskrcnn_benchmark.modeling.box_coder import BoxCoder\nfrom maskrcnn_benchmark.modeling.rbox_coder import RBoxCoder\nfrom maskrcnn_benchmark.structures.bounding_box import BoxList, RBoxList\nfrom maskrcnn_benchmark.structures.rboxlist_ops import cat_boxlist\nfrom maskrcnn_benchmark.structures.rboxlist_ops import boxlist_nms #\nfrom maskrcnn_benchmark.structures.rboxlist_ops import remove_small_boxes\n\nfrom ..utils import cat\n\n\nclass RPNPostProcessor(torch.nn.Module):\n \"\"\"\n Performs post-processing on the outputs of the RPN boxes, before feeding the\n proposals to the heads\n \"\"\"\n\n def __init__(\n self,\n pre_nms_top_n,\n post_nms_top_n,\n nms_thresh,\n min_size,\n box_coder=None,\n fpn_post_nms_top_n=None,\n ):\n \"\"\"\n Arguments:\n pre_nms_top_n (int)\n post_nms_top_n (int)\n nms_thresh (float)\n min_size (int)\n box_coder (BoxCoder)\n fpn_post_nms_top_n (int)\n \"\"\"\n super(RPNPostProcessor, self).__init__()\n self.pre_nms_top_n = pre_nms_top_n\n self.post_nms_top_n = post_nms_top_n\n self.nms_thresh = nms_thresh\n self.min_size = min_size\n\n if box_coder is None:\n box_coder = RBoxCoder(weights=(1.0, 1.0, 1.0, 1.0, 1.0))\n self.box_coder = box_coder\n\n if fpn_post_nms_top_n is None:\n fpn_post_nms_top_n = post_nms_top_n\n self.fpn_post_nms_top_n = fpn_post_nms_top_n\n\n def add_gt_proposals(self, proposals, targets):\n \"\"\"\n Arguments:\n proposals: list[BoxList]\n targets: list[BoxList]\n \"\"\"\n # Get the device we're operating on\n device = proposals[0].bbox.device\n\n gt_boxes = [target.copy_with_fields([]) for target in targets]\n\n # later cat of bbox requires all fields to be present for all bbox\n # so we need to add a dummy for objectness that's missing\n for gt_box in gt_boxes:\n gt_box.add_field(\"objectness\", torch.ones(len(gt_box), device=device))\n\n proposals = [\n cat_boxlist((proposal, gt_box))\n for proposal, gt_box in zip(proposals, gt_boxes)\n ]\n # print('rrpn_proposal:', proposals[0].bbox.size(), proposals[0].bbox[:, 2:4])\n return proposals\n\n # proposal_target_layer\n def forward_for_single_feature_map(self, anchors, objectness, box_regression):\n \"\"\"\n Arguments:\n anchors: list[BoxList]\n objectness: tensor of size N, A, H, W\n box_regression: tensor of size N, A * 5, H, W\n \"\"\"\n device = objectness.device\n N, A, H, W = objectness.shape\n\n # put in the same format as anchors\n objectness = objectness.permute(0, 2, 3, 1).reshape(N, -1)\n objectness = objectness.sigmoid()\n box_regression = box_regression.view(N, -1, 5, H, W).permute(0, 3, 4, 1, 2)\n box_regression = box_regression.reshape(N, -1, 5)\n\n num_anchors = A * H * W\n\n pre_nms_top_n = min(self.pre_nms_top_n, num_anchors)\n objectness, topk_idx = objectness.topk(pre_nms_top_n, dim=1, sorted=True)\n\n batch_idx = torch.arange(N, device=device)[:, None]\n box_regression = box_regression[batch_idx, topk_idx]\n\n image_shapes = [box.size for box in anchors]\n concat_anchors = torch.cat([a.bbox for a in anchors], dim=0)\n concat_anchors = concat_anchors.reshape(N, -1, 5)[batch_idx, topk_idx]\n\n # print('concat_anchors:', concat_anchors.size(), concat_anchors[:, 2:4])\n\n proposals = self.box_coder.decode(\n box_regression.view(-1, 5), concat_anchors.view(-1, 5)\n )\n\n proposals = proposals.view(N, -1, 5)\n # print('outsider:', proposals.size(), proposals[:, 2:4], 'box_regression:', box_regression)\n\n #-------\n result = []\n for proposal, score, im_shape in zip(proposals, objectness, image_shapes):\n boxlist = RBoxList(proposal, im_shape, mode=\"xywha\")\n\n # print('before nms:', boxlist.bbox.size(), boxlist.bbox[:, 2:4])\n\n boxlist.add_field(\"objectness\", score)\n # boxlist = boxlist.clip_to_image(remove_empty=False)\n boxlist = remove_small_boxes(boxlist, self.min_size)\n boxlist = boxlist_nms(\n boxlist,\n self.nms_thresh,\n max_proposals=self.post_nms_top_n,\n score_field=\"objectness\",\n )\n\n # print('after nms:', boxlist.bbox.size(), boxlist.bbox[:, 2:4])\n\n result.append(boxlist)\n return result\n\n def forward(self, anchors, objectness, box_regression, targets=None):\n \"\"\"\n Arguments:\n anchors: list[list[BoxList]]\n objectness: list[tensor]\n box_regression: list[tensor]\n\n Returns:\n boxlists (list[BoxList]): the post-processed anchors, after\n applying box decoding and NMS\n \"\"\"\n sampled_boxes = []\n num_levels = len(objectness)\n anchors = list(zip(*anchors))\n for a, o, b in zip(anchors, objectness, box_regression):\n sampled_boxes.append(self.forward_for_single_feature_map(a, o, b))\n\n boxlists = list(zip(*sampled_boxes))\n boxlists = [cat_boxlist(boxlist) for boxlist in boxlists]\n\n if num_levels > 1:\n boxlists = self.select_over_all_levels(boxlists)\n\n # append ground-truth bboxes to proposals\n if self.training and targets is not None:\n boxlists = self.add_gt_proposals(boxlists, targets)\n\n return boxlists\n\n def select_over_all_levels(self, boxlists):\n num_images = len(boxlists)\n # different behavior during training and during testing:\n # during training, post_nms_top_n is over *all* the proposals combined, while\n # during testing, it is over the proposals for each image\n # TODO resolve this difference and make it consistent. It should be per image,\n # and not per batch\n if self.training:\n objectness = torch.cat(\n [boxlist.get_field(\"objectness\") for boxlist in boxlists], dim=0\n )\n box_sizes = [len(boxlist) for boxlist in boxlists]\n post_nms_top_n = min(self.fpn_post_nms_top_n, len(objectness))\n _, inds_sorted = torch.topk(objectness, post_nms_top_n, dim=0, sorted=True)\n inds_mask = torch.zeros_like(objectness, dtype=torch.uint8)\n inds_mask[inds_sorted] = 1\n inds_mask = inds_mask.split(box_sizes)\n for i in range(num_images):\n boxlists[i] = boxlists[i][inds_mask[i]]\n else:\n for i in range(num_images):\n objectness = boxlists[i].get_field(\"objectness\")\n post_nms_top_n = min(self.fpn_post_nms_top_n, len(objectness))\n _, inds_sorted = torch.topk(\n objectness, post_nms_top_n, dim=0, sorted=True\n )\n boxlists[i] = boxlists[i][inds_sorted]\n return boxlists\n\n\ndef make_rpn_postprocessor(config, rpn_box_coder, is_train):\n fpn_post_nms_top_n = config.MODEL.RPN.FPN_POST_NMS_TOP_N_TRAIN\n if not is_train:\n fpn_post_nms_top_n = config.MODEL.RPN.FPN_POST_NMS_TOP_N_TEST\n\n pre_nms_top_n = config.MODEL.RPN.PRE_NMS_TOP_N_TRAIN\n post_nms_top_n = config.MODEL.RPN.POST_NMS_TOP_N_TRAIN\n if not is_train:\n pre_nms_top_n = config.MODEL.RPN.PRE_NMS_TOP_N_TEST\n post_nms_top_n = config.MODEL.RPN.POST_NMS_TOP_N_TEST\n nms_thresh = config.MODEL.RPN.NMS_THRESH\n min_size = config.MODEL.RPN.MIN_SIZE\n box_selector = RPNPostProcessor(\n pre_nms_top_n=pre_nms_top_n,\n post_nms_top_n=post_nms_top_n,\n nms_thresh=nms_thresh,\n min_size=min_size,\n box_coder=rpn_box_coder,\n fpn_post_nms_top_n=fpn_post_nms_top_n,\n )\n return box_selector\n" ]
[ [ "torch.topk", "torch.arange", "torch.cat", "torch.zeros_like" ] ]
iwan933/mlmi-federated-learning
[ "e148664304dd7fbbc2cc2a6a34567533748c1720" ]
[ "mlmi/participant.py" ]
[ "import copy\nfrom pathlib import Path\nfrom typing import Dict, List, Optional, Union\n\nimport torch\nfrom pytorch_lightning.metrics import Accuracy\nfrom torch import Tensor, optim\nfrom torch.utils import data\n\nimport pytorch_lightning as pl\nfrom pytorch_lightning.loggers import LightningLoggerBase\nfrom pytorch_lightning.callbacks.base import Callback\n\nfrom mlmi.structs import OptimizerArgs, TrainArgs, ModelArgs\nfrom mlmi.log import getLogger\nfrom mlmi.settings import CHECKPOINT_DIR\n\n\nlogger = getLogger(__name__)\n\n\ndef optimizer_state_dict_to_cpu(optimizer_state_dict):\n c = copy.deepcopy(optimizer_state_dict)\n o = {}\n state_dict = c.get('state')\n r = {}\n for key, state in state_dict.items():\n s = {}\n for k, v in state.items():\n if torch.is_tensor(v):\n s[k] = v.cpu()\n else:\n s[k] = v\n r[key] = s\n o['state'] = r\n o['param_groups'] = c.get('param_groups')\n return o\n\n\nclass BaseParticipant(object):\n\n def __init__(self, participant_name: str, model_args: ModelArgs, context):\n assert participant_name is not None, 'A participant name is required to load and save logs'\n assert model_args is not None, 'Model args are required to initialize a model for the participant'\n assert context is not None, 'Experiment context is required for participant'\n\n self._name = participant_name\n self._cluster_id = None\n self._experiment_context = context\n participant_model_kwargs = self.get_model_kwargs()\n if participant_model_kwargs is not None:\n self._model = model_args(participant_name=participant_name, **participant_model_kwargs)\n else:\n self._model = model_args(participant_name=participant_name)\n self._model_args = model_args\n\n def get_model_kwargs(self) -> Optional[Dict]:\n return None\n\n @property\n def model(self) -> Union[pl.LightningModule, 'BaseParticipantModel']:\n \"\"\"\n The model to train\n :return: The model\n \"\"\"\n return self._model\n\n @property\n def cluster_id(self) -> str:\n return self._cluster_id\n\n @cluster_id.setter\n def cluster_id(self, value: str):\n self._cluster_id = value\n\n def overwrite_model_state(self, model_state: Dict[str, Tensor]):\n \"\"\"\n Loads the model state into the current model instance\n :param model_state: The model state to load\n \"\"\"\n self._model.load_state_dict(model_state, strict=False)\n\n def load_model_state_from_checkpoint(self):\n \"\"\"\n Load the model state from an existing saved checkpoint\n \"\"\"\n self._model = self._model_args.model_class.load_from_checkpoint(\n checkpoint_path=str(self.get_checkpoint_path().absolute()))\n\n def get_checkpoint_path(self, suffix: Union[str, None] = None) -> Path:\n \"\"\"\n Constructs a checkpoint path based on\n :return:\n \"\"\"\n str_suffix = '' if suffix is None else '_' + suffix\n filename = (self._name + str_suffix + '.ckpt')\n return CHECKPOINT_DIR / self._experiment_context.name / filename\n\n def save_model_state(self):\n \"\"\"\n Saves the model state of the aggregated model\n :param target_path: The path to save the model at\n :return:\n \"\"\"\n path = self.get_checkpoint_path()\n path.parent.mkdir(parents=True, exist_ok=True)\n torch.save(self._model.state_dict(), path)\n\n\nclass BaseTrainingParticipant(BaseParticipant):\n def __init__(self, client_id: str, model_args: ModelArgs, context,\n train_dataloader: data.DataLoader, num_train_samples: int,\n test_dataloader: data.DataLoader, num_test_samples: int,\n lightning_logger: LightningLoggerBase, *args, **kwargs):\n self._train_dataloader = train_dataloader\n self._test_dataloader = test_dataloader\n self._num_train_samples = sum([len(y) for x, y in train_dataloader])\n self._num_test_samples = num_test_samples\n self._lightning_logger = lightning_logger\n self._callbacks = None\n self._model_state = None\n self._trainer = None\n super().__init__(client_id, model_args, context)\n\n def create_trainer(self, enable_logging=True, **kwargs) -> pl.Trainer:\n \"\"\"\n Creates a new trainer instance for each training round.\n :param kwargs: additional keyword arguments to send to the trainer for configuration\n :return: a pytorch lightning trainer instance\n \"\"\"\n _kwargs = kwargs.copy()\n _kwargs['logger'] = self.logger\n _kwargs['checkpoint_callback'] = False\n if torch.cuda.is_available():\n _kwargs['gpus'] = 1\n return pl.Trainer(callbacks=self._callbacks, limit_val_batches=0.0, **_kwargs)\n\n def set_trainer_callbacks(self, callbacks: List[Callback]):\n self._callbacks = callbacks\n\n @property\n def logger(self) -> LightningLoggerBase:\n \"\"\"\n Gets the logger to use for the training in later stage.\n :return: The lightning logger to use\n \"\"\"\n return self._lightning_logger\n\n @property\n def train_data_loader(self) -> data.DataLoader:\n return self._train_dataloader\n\n @property\n def test_data_loader(self) -> data.DataLoader:\n return self._test_dataloader\n\n @property\n def num_train_samples(self) -> int:\n return self._num_train_samples\n\n @property\n def num_test_samples(self) -> int:\n return self._num_test_samples\n\n def train(self, training_args: TrainArgs, *args, **kwargs):\n \"\"\"\n Implement the training routine.\n :param training_args:\n :param args:\n :param kwargs:\n :return:\n \"\"\"\n trainer = self.create_trainer(enable_logging=False, **training_args.kwargs)\n train_dataloader = self.train_data_loader\n trainer.fit(self.model, train_dataloader)\n del self.model.trainer\n\n def test(self, model: Optional[torch.nn.Module] = None, use_local_model: bool = False):\n \"\"\"\n Test the model state on this clients data.\n :param\n :param model_state: The model state to evaluate\n :return: The output loss\n \"\"\"\n assert use_local_model or model is not None\n\n trainer = self.create_trainer(enable_logging=False, progress_bar_refresh_rate=0)\n\n if use_local_model:\n result = trainer.test(model=self.model, test_dataloaders=self.test_data_loader, verbose=False)\n self._model = self._model.cpu()\n del self._model.trainer\n else:\n result = trainer.test(model=model, test_dataloaders=self.test_data_loader, verbose=False)\n return result\n\n\nclass BaseAggregatorParticipant(BaseParticipant):\n\n def __init__(self, participant_name: str, model_args: ModelArgs, context):\n super().__init__(participant_name, model_args, context)\n\n def aggregate(self, participants: List['BaseTrainingParticipant'], *args, **kwargs):\n \"\"\"\n Aggregate the models of other participants with their models.\n :param participants: Participants to apply the model changes from\n :return:\n \"\"\"\n raise NotImplementedError()\n\n\nclass BaseParticipantModel(object):\n\n def __init__(self, *args, participant_name=None, optimizer_args: Optional[OptimizerArgs]=None,\n model=None, **kwargs):\n assert participant_name is not None, 'Please provide a participant name parameter in model args to identify' \\\n 'your model in logging'\n assert optimizer_args is not None, 'Optimizer args not set!'\n assert model is not None, 'Model not passed!'\n self.participant_name = participant_name\n self.optimizer_args = optimizer_args\n super().__init__(*args, **kwargs)\n self.model = model\n self._optimizer_state = None\n\n @property\n def optimizer_state(self):\n return self._optimizer_state\n\n @optimizer_state.setter\n def optimizer_state(self, value):\n self._optimizer_state = value\n\n def configure_optimizers(self):\n return self.optimizer_args(self.model.parameters())\n \"\"\"\n Do not restore state\n if self.optimizer_state is not None:\n optimizer.load_state_dict(self.optimizer_state)\n return optimizer\n \"\"\"\n" ]
[ [ "torch.cuda.is_available", "torch.is_tensor" ] ]
Flsahkong/transferlearning
[ "0fe84de59dcb2871e2dca24130dc24e1ccce8506" ]
[ "code/distance/mmd_pytorch.py" ]
[ "# Compute MMD distance using pytorch\n\nimport torch\nimport torch.nn as nn\n\n\nclass MMD_loss(nn.Module):\n def __init__(self, kernel_type='rbf', kernel_mul=2.0, kernel_num=5):\n super(MMD_loss, self).__init__()\n self.kernel_num = kernel_num\n self.kernel_mul = kernel_mul\n self.fix_sigma = None\n self.kernel_type = kernel_type\n\n def guassian_kernel(self, source, target, kernel_mul=2.0, kernel_num=5, fix_sigma=None):\n n_samples = int(source.size()[0]) + int(target.size()[0])\n total = torch.cat([source, target], dim=0)\n total0 = total.unsqueeze(0).expand(\n int(total.size(0)), int(total.size(0)), int(total.size(1)))\n total1 = total.unsqueeze(1).expand(\n int(total.size(0)), int(total.size(0)), int(total.size(1)))\n L2_distance = ((total0-total1)**2).sum(2)\n if fix_sigma:\n bandwidth = fix_sigma\n else:\n bandwidth = torch.sum(L2_distance.data) / (n_samples**2-n_samples)\n bandwidth /= kernel_mul ** (kernel_num // 2)\n bandwidth_list = [bandwidth * (kernel_mul**i)\n for i in range(kernel_num)]\n kernel_val = [torch.exp(-L2_distance / bandwidth_temp)\n for bandwidth_temp in bandwidth_list]\n return sum(kernel_val)\n\n def linear_mmd2(self, f_of_X, f_of_Y):\n loss = 0.0\n delta = f_of_X.float().mean(0) - f_of_Y.float().mean(0)\n loss = delta.dot(delta.T)\n return loss\n\n def forward(self, source, target):\n if self.kernel_type == 'linear':\n return self.linear_mmd2(source, target)\n elif self.kernel_type == 'rbf':\n batch_size = int(source.size()[0])\n kernels = self.guassian_kernel(\n source, target, kernel_mul=self.kernel_mul, kernel_num=self.kernel_num, fix_sigma=self.fix_sigma)\n with torch.no_grad():\n XX = torch.mean(kernels[:batch_size, :batch_size])\n YY = torch.mean(kernels[batch_size:, batch_size:])\n XY = torch.mean(kernels[:batch_size, batch_size:])\n YX = torch.mean(kernels[batch_size:, :batch_size])\n loss = torch.mean(XX + YY - XY - YX)\n torch.cuda.empty_cache()\n return loss\n" ]
[ [ "torch.sum", "torch.cuda.empty_cache", "torch.no_grad", "torch.exp", "torch.cat", "torch.mean" ] ]
goudfroo/pysiaf
[ "ca8350ce814950344789a9674079b8d0168ac05e" ]
[ "pysiaf/iando/write.py" ]
[ "\"\"\"Functions to write Science Instrument Aperture Files (SIAF).\n\nSIAF content in an aperture_collection object can be written to an xml file that can be ingested in\nthe PRD. Format and order of the xml fields are defined in SIAF reference files.\nWriting to Microsoft Excel .xlsx format is supported.\nWriting to .csv and other formats supported by astropy.table.Table.write is enabled.\n\nAuthors\n-------\n Johannes Sahlmann\n\n\"\"\"\n\nimport os\n\nimport numpy as np\nimport lxml.etree as ET\nfrom astropy.time import Time\nfrom astropy.table import Table, Column\nfrom openpyxl import Workbook\nfrom openpyxl.styles import Font, Color\nfrom openpyxl.styles import Alignment\n\nfrom ..version import __version__\nfrom ..constants import _JWST_TEMPORARY_ROOT\nfrom ..aperture import PRD_REQUIRED_ATTRIBUTES_ORDERED, SIAF_XML_FIELD_FORMAT, FLOAT_ATTRIBUTES\n\n# dictionary used to set field precision in SIAF.XML\nxml_decimal_precision = {}\nfield_names = list(SIAF_XML_FIELD_FORMAT['field_name'])\nfor attr in PRD_REQUIRED_ATTRIBUTES_ORDERED:\n index = field_names.index(attr)\n xml_decimal_precision[attr] = SIAF_XML_FIELD_FORMAT['pyformat'][index]\n\n\ndef write_jwst_siaf(aperture_collection, filename=None, basepath=None, label=None,\n file_format='xml', verbose=True):\n \"\"\"Write the content of aperture_collection into xml and xlsx files that are PRD-compliant.\n\n Parameters\n ----------\n aperture_collection : ApertureCollection\n dictionary of apertures\n filename\n basepath\n label\n file_format : str list\n one of ['xml', 'xlsx', 'csv', and formats supported by astropy Table.write]\n verbose\n\n Returns\n -------\n\n TODO\n ----\n test support of astropy Table.write formats (FITS not working)\n\n\n \"\"\"\n if type(file_format) == str:\n file_format = [file_format]\n\n aperture_names = np.array([key for key in aperture_collection.apertures.keys()])\n instrument = aperture_collection.apertures[aperture_names[0]].InstrName\n\n if instrument == 'NIRCAM':\n instrument = 'NIRCam'\n elif instrument == 'NIRSPEC':\n instrument = 'NIRSpec'\n\n if (filename is not None) and (len(list(file_format)) != 1):\n raise RuntimeError('When filename is specified, only one output format is supported')\n\n if label is not None:\n name_seed = instrument + '_SIAF_{}'.format(label)\n else:\n name_seed = instrument + '_SIAF'\n\n filenames = []\n # hostname = os.uname()[1]\n username = os.getlogin()\n timestamp = Time.now()\n\n for file_format in list(file_format):\n if filename is None:\n if basepath is None:\n basepath = _JWST_TEMPORARY_ROOT\n if not os.path.isdir(basepath):\n raise RuntimeError(\"Could not write SIAF data \"\n \"to {}. Directory does not exist.\".format(basepath))\n if file_format == 'xml':\n out_filename = os.path.join(basepath, name_seed+'.xml')\n elif file_format == 'xlsx':\n out_filename = os.path.join(basepath, name_seed+'.xlsx')\n # elif file_format == 'csv':\n # out_filename = os.path.join(basepath, name_seed+'.csv')\n else:\n out_filename = os.path.join(basepath, name_seed+'.{}'.format(file_format))\n else:\n out_filename = filename\n\n if file_format == 'xml':\n root = ET.Element('SiafEntries')\n\n # add generation info as comment to SIAFXML\n root.append(ET.Comment('Generated {} {}'.format(timestamp.isot, timestamp.scale)))\n root.append(ET.Comment('by {}'.format(username)))\n # try:\n # repo = git.Repo(os.path.abspath(__file__), search_parent_directories=True)\n # git_version = git.Git(repo.working_dir).describe()\n # root.append(ET.Comment('pysiaf git-version {}'.format(git_version)))\n # except git.exc.InvalidGitRepositoryError:\n root.append(ET.Comment('pysiaf version {}'.format(__version__)))\n\n for aperture_name in aperture_names:\n\n aperture = aperture_collection.apertures[aperture_name]\n siaf_entry = ET.SubElement(root, 'SiafEntry')\n for attribute in PRD_REQUIRED_ATTRIBUTES_ORDERED:\n attribute_value = getattr(aperture_collection.apertures[aperture_name],\n attribute)\n if attribute_value is None:\n attribute_text = None\n\n # NIRSpec special case\n elif (aperture.AperType in ['TRANSFORM']) and \\\n (attribute in 'XSciRef YSciRef XSciScale YSciScale V2Ref V3Ref'.\n split()):\n attribute_text = '{:{prec}}'.format(attribute_value,\n prec='.15e').strip()\n elif attribute in FLOAT_ATTRIBUTES:\n attribute_text = '{:{prec}}'.format(\n attribute_value, prec=xml_decimal_precision[attribute]).strip()\n else:\n attribute_text = str(attribute_value)\n\n if (not isinstance(attribute_value, str)) and (attribute_text is not None):\n if np.isnan(attribute_value):\n attribute_text = None\n\n ET.SubElement(siaf_entry, attribute).text = attribute_text\n\n doc = ET.ElementTree(root)\n\n doc.write(out_filename, pretty_print=True, xml_declaration=False)\n if verbose:\n print('Wrote Siaf to xml file {}'.format(out_filename))\n\n elif file_format == 'xlsx':\n siaf_workbook = Workbook()\n\n ws1 = siaf_workbook.active\n ws1.title = 'SIAF'\n\n header_row_description = 1\n header_row_attributes = 2\n\n # write descriptive header\n for j, attribute_name in enumerate(PRD_REQUIRED_ATTRIBUTES_ORDERED):\n col = j + 1\n if attribute_name == 'InstrName':\n text = 'Aperture Basic Info'\n elif attribute_name == 'XDetSize':\n text = 'Detector Frame'\n elif attribute_name == 'XSciSize':\n text = 'Science Frame'\n elif attribute_name == 'V2Ref':\n text = 'V Frame'\n elif attribute_name == 'V2IdlYAngle':\n text = 'Frame Relationships'\n elif attribute_name == 'XIdlVert1':\n text = 'Vertices'\n elif attribute_name == 'Sci2IdlDeg':\n text = 'Science to Ideal Polynomial'\n else:\n text = ''\n\n cell = ws1.cell(column=col, row=header_row_description, value=\"{}\".format(text))\n cell.font = Font(name='Courier', b=True, i=True, family=3.0, sz=14.0)\n # cell.font.color = Color(rgb='FF0000FF', type='rgb')\n\n # write aperture attributes\n for j, attribute_name in enumerate(PRD_REQUIRED_ATTRIBUTES_ORDERED):\n col = j + 1\n cell = ws1.cell(column=col, row=header_row_attributes, value=\"{}\".\n format(attribute_name))\n cell.font = Font(name='Calibri', b=True, family=2.0, sz=15.0)\n cell.alignment = Alignment(horizontal='center')\n\n # write aperture values\n for i, aper_name in enumerate(aperture_names):\n aperture = aperture_collection.apertures[aper_name]\n # aperture = siaf[aper_name]\n\n row = i + 1 + header_row_attributes\n for j, attribute_name in enumerate(PRD_REQUIRED_ATTRIBUTES_ORDERED):\n col = j + 1\n cell = ws1.cell(column=col, row=row, value=\"{}\".\n format(getattr(aperture, attribute_name)))\n if attribute_name not in 'InstrName\tAperName DDCName AperType AperShape'.\\\n split():\n cell.alignment = Alignment(horizontal='right')\n\n # adjust column width\n for column_cells in ws1.columns:\n length = max(len(cell.value or '') for cell in column_cells[1:])\n ws1.column_dimensions[column_cells[0].column].width = length * 1.5\n siaf_workbook.save(filename=out_filename)\n if verbose:\n print('Wrote Siaf to xlsx file {}'.format(out_filename))\n\n else:\n table = Table()\n for attribute_name in PRD_REQUIRED_ATTRIBUTES_ORDERED:\n data = [getattr(aperture_collection.apertures[aperture_name], attribute_name) for\n aperture_name in aperture_names]\n table.add_column(Column(data=data, name=attribute_name))\n table.write(out_filename, format=file_format)\n if verbose:\n print('Wrote Siaf to {} file {}'.format(file_format, out_filename))\n\n filenames.append(out_filename)\n\n return filenames\n" ]
[ [ "numpy.isnan" ] ]
vnarayan13/featuretools
[ "a86b6d8df246a13558d19915b15230c418ad27ab" ]
[ "featuretools/primitives/standard/aggregation_primitives.py" ]
[ "from __future__ import division\n\nfrom datetime import datetime, timedelta\n\nimport numpy as np\nimport pandas as pd\n\nfrom ..base.aggregation_primitive_base import (\n AggregationPrimitive,\n make_agg_primitive\n)\n\nfrom featuretools.variable_types import (\n Boolean,\n DatetimeTimeIndex,\n Discrete,\n Index,\n Numeric,\n Variable\n)\n\n\nclass Count(AggregationPrimitive):\n \"\"\"Counts the number of non null values.\"\"\"\n name = \"count\"\n input_types = [[Index]]\n return_type = Numeric\n stack_on_self = False\n default_value = 0\n\n def get_function(self):\n return 'count'\n\n def generate_name(self, base_feature_names, child_entity_id,\n parent_entity_id, where_str, use_prev_str):\n return u\"COUNT(%s%s%s)\" % (child_entity_id,\n where_str, use_prev_str)\n\n\nclass Sum(AggregationPrimitive):\n \"\"\"Sums elements of a numeric or boolean feature.\"\"\"\n name = \"sum\"\n input_types = [Numeric]\n return_type = Numeric\n stack_on_self = False\n stack_on_exclude = [Count]\n default_value = 0\n\n def get_function(self):\n return np.sum\n\n\nclass Mean(AggregationPrimitive):\n \"\"\"Computes the average value of a numeric feature.\n Defaults to not ignoring NaNs when computing mean.\n\n \"\"\"\n name = \"mean\"\n input_types = [Numeric]\n return_type = Numeric\n\n def __init__(self, skipna=True):\n self.skipna = skipna\n\n def get_function(self):\n if self.skipna:\n # np.mean of series is functionally nanmean\n return np.mean\n\n def mean(series):\n return np.mean(series.values)\n return mean\n\n def generate_name(self, base_feature_names, child_entity_id,\n parent_entity_id, where_str, use_prev_str):\n skipna = \"\"\n if not self.skipna:\n skipna = \", skipna=False\"\n base_features_str = \", \".join(base_feature_names)\n return u\"%s(%s.%s%s%s%s)\" % (self.name.upper(),\n child_entity_id,\n base_features_str,\n where_str,\n use_prev_str,\n skipna)\n\n\nclass Mode(AggregationPrimitive):\n \"\"\"Finds the most common element in a categorical feature.\"\"\"\n name = \"mode\"\n input_types = [Discrete]\n return_type = None\n\n def get_function(self):\n def pd_mode(s):\n return s.mode().get(0, np.nan)\n return pd_mode\n\n\nMin = make_agg_primitive(\n np.min,\n [Numeric],\n Numeric,\n name=\"Min\",\n stack_on_self=False,\n description=\"Finds the minimum non-null value of a numeric feature.\")\n\n\nclass Max(AggregationPrimitive):\n \"\"\"Finds the maximum non-null value of a numeric feature.\"\"\"\n name = \"max\"\n input_types = [Numeric]\n return_type = Numeric\n stack_on_self = False\n\n def get_function(self):\n return np.max\n\n\nclass NUnique(AggregationPrimitive):\n \"\"\"Returns the number of unique categorical variables.\"\"\"\n name = \"num_unique\"\n input_types = [Discrete]\n return_type = Numeric\n stack_on_self = False\n\n def get_function(self):\n # note: returning pd.Series.nunique errors for python2,\n # so using this branching code path while we support python2\n from sys import version_info\n if version_info.major < 3:\n def nunique(x):\n return pd.Series(x).nunique()\n return nunique\n else:\n return pd.Series.nunique\n\n\nclass NumTrue(AggregationPrimitive):\n \"\"\"Finds the number of 'True' values in a boolean.\"\"\"\n name = \"num_true\"\n input_types = [Boolean]\n return_type = Numeric\n default_value = 0\n stack_on = []\n stack_on_exclude = []\n\n def get_function(self):\n return np.sum\n\n\nclass PercentTrue(AggregationPrimitive):\n \"\"\"Finds the percent of 'True' values in a boolean feature.\"\"\"\n name = \"percent_true\"\n input_types = [Boolean]\n return_type = Numeric\n stack_on = []\n stack_on_exclude = []\n default_value = 0\n\n def get_function(self):\n def percent_true(s):\n return s.fillna(0).mean()\n return percent_true\n\n\nclass NMostCommon(AggregationPrimitive):\n \"\"\"Finds the N most common elements in a categorical feature.\"\"\"\n name = \"n_most_common\"\n input_types = [Discrete]\n return_type = Discrete\n\n def __init__(self, n=3):\n self.number_output_features = n\n\n def get_function(self):\n def n_most_common(x, n=self.number_output_features):\n array = np.array(x.value_counts()[:n].index)\n if len(array) < n:\n filler = np.full(n - len(array), np.nan)\n array = np.append(array, filler)\n return array\n return n_most_common\n\n\nclass AvgTimeBetween(AggregationPrimitive):\n \"\"\"Computes the average time between consecutive events.\n\n Note: equivalent to Mean(Diff(time_index)), but more performant\n \"\"\"\n\n # Potentially unnecessary if we add an trans_feat that\n # calculates the difference between events. DFS\n # should then calculate the average of that trans_feat\n # which amounts to AvgTimeBetween\n name = \"avg_time_between\"\n input_types = [DatetimeTimeIndex]\n return_type = Numeric\n\n def get_function(self):\n def pd_avg_time_between(x):\n \"\"\"Assumes time scales are closer to order\n of seconds than to nanoseconds\n if times are much closer to nanoseconds\n we could get some floating point errors\n\n this can be fixed with another function\n that calculates the mean before converting\n to seconds\n \"\"\"\n x = x.dropna()\n if x.shape[0] < 2:\n return np.nan\n if isinstance(x.iloc[0], (pd.Timestamp, datetime)):\n x = x.astype('int64')\n # use len(x)-1 because we care about difference\n # between values, len(x)-1 = len(diff(x))\n\n avg = (x.max() - x.min()) / (len(x) - 1)\n avg = avg * 1e-9\n\n # long form:\n # diff_in_ns = x.diff().iloc[1:].astype('int64')\n # diff_in_seconds = diff_in_ns * 1e-9\n # avg = diff_in_seconds.mean()\n return avg\n return pd_avg_time_between\n\n\nclass Median(AggregationPrimitive):\n \"\"\"Finds the median value of any feature with well-ordered values.\"\"\"\n name = \"median\"\n input_types = [Numeric]\n return_type = Numeric\n\n def get_function(self):\n return lambda x: x.median()\n\n\nclass Skew(AggregationPrimitive):\n \"\"\"Computes the skewness of a data set.\n\n For normally distributed data, the skewness should be about 0. A skewness\n value > 0 means that there is more weight in the left tail of the\n distribution.\n \"\"\"\n name = \"skew\"\n input_types = [Numeric]\n return_type = Numeric\n stack_on = []\n stack_on_self = False\n\n def get_function(self):\n return 'skew'\n\n\nclass Std(AggregationPrimitive):\n \"\"\"Finds the standard deviation of a numeric feature ignoring null values.\n \"\"\"\n name = \"std\"\n input_types = [Numeric]\n return_type = Numeric\n stack_on_self = False\n\n def get_function(self):\n return np.std\n\n\nclass Last(AggregationPrimitive):\n \"\"\"Returns the last value.\"\"\"\n name = \"last\"\n input_types = [Variable]\n return_type = None\n stack_on_self = False\n\n def get_function(self):\n def pd_last(x):\n return x.iloc[-1]\n return pd_last\n\n\nclass Any(AggregationPrimitive):\n \"\"\"Test if any value is 'True'.\"\"\"\n name = \"any\"\n input_types = [Boolean]\n return_type = Boolean\n stack_on_self = False\n\n def get_function(self):\n return np.any\n\n\nclass All(AggregationPrimitive):\n \"\"\"Test if all values are 'True'.\"\"\"\n name = \"all\"\n input_types = [Boolean]\n return_type = Boolean\n stack_on_self = False\n\n def get_function(self):\n return np.all\n\n\nclass TimeSinceLast(AggregationPrimitive):\n \"\"\"Time since last related instance.\"\"\"\n name = \"time_since_last\"\n input_types = [DatetimeTimeIndex]\n return_type = Numeric\n uses_calc_time = True\n\n def get_function(self):\n\n def time_since_last(values, time=None):\n time_since = time - values.iloc[-1]\n return time_since.total_seconds()\n\n return time_since_last\n\n\nclass TimeSinceFirst(AggregationPrimitive):\n \"\"\"Time since first related instance.\"\"\"\n name = \"time_since_first\"\n input_types = [DatetimeTimeIndex]\n return_type = Numeric\n uses_calc_time = True\n\n def get_function(self):\n\n def time_since_first(values, time=None):\n time_since = time - values.iloc[0]\n return time_since.total_seconds()\n\n return time_since_first\n\n\nclass Trend(AggregationPrimitive):\n \"\"\"Calculates the slope of the linear trend of variable overtime.\"\"\"\n name = \"trend\"\n input_types = [Numeric, DatetimeTimeIndex]\n return_type = Numeric\n\n def get_function(self):\n def pd_trend(y, x):\n df = pd.DataFrame({\"x\": x, \"y\": y}).dropna()\n if df.shape[0] <= 2:\n return np.nan\n if isinstance(df['x'].iloc[0], (datetime, pd.Timestamp)):\n x = convert_datetime_to_floats(df['x'])\n else:\n x = df['x'].values\n\n if isinstance(df['y'].iloc[0], (datetime, pd.Timestamp)):\n y = convert_datetime_to_floats(df['y'])\n elif isinstance(df['y'].iloc[0], (timedelta, pd.Timedelta)):\n y = convert_timedelta_to_floats(df['y'])\n else:\n y = df['y'].values\n\n x = x - x.mean()\n y = y - y.mean()\n\n # prevent divide by zero error\n if len(np.unique(x)) == 1:\n return 0\n\n # consider scipy.stats.linregress for large n cases\n coefficients = np.polyfit(x, y, 1)\n\n return coefficients[0]\n return pd_trend\n\n\ndef convert_datetime_to_floats(x):\n first = int(x.iloc[0].value * 1e-9)\n x = pd.to_numeric(x).astype(np.float64).values\n dividend = find_dividend_by_unit(first)\n x *= (1e-9 / dividend)\n return x\n\n\ndef convert_timedelta_to_floats(x):\n first = int(x.iloc[0].total_seconds())\n dividend = find_dividend_by_unit(first)\n x = pd.TimedeltaIndex(x).total_seconds().astype(np.float64) / dividend\n return x\n\n\ndef find_dividend_by_unit(time):\n \"\"\"Finds whether time best corresponds to a value in\n days, hours, minutes, or seconds.\n \"\"\"\n for dividend in [86400, 3600, 60]:\n div = time / dividend\n if round(div) == div:\n return dividend\n return 1\n" ]
[ [ "pandas.Series", "numpy.append", "pandas.to_numeric", "pandas.DataFrame", "pandas.TimedeltaIndex", "numpy.polyfit", "numpy.unique", "numpy.mean" ] ]
yardenas/meta-learning-tutorial
[ "c5154eae85f6255f58fe6028ab630e3499238b3a" ]
[ "omniglot_dataset.py" ]
[ "from typing import Iterator, List, Tuple\n\nimport os\nimport random\n\nimport numpy as np\n\nfrom tensorflow import data as tfd\nfrom tensorflow import image as tfi\nfrom tensorflow import io as tfio\nfrom tensorflow import dtypes\nimport tensorflow as tf\n\nfrom google_drive_downloader import GoogleDriveDownloader\n\n\nclass Omniglot:\n\n def __init__(self,\n meta_batch_size: int,\n num_classes: int,\n num_samples_per_class: int,\n seed: int = 666):\n self.meta_batch_size = meta_batch_size\n self.num_samples_per_class = num_samples_per_class\n self.num_classes = num_classes\n self.seed = seed\n if not os.path.isdir('./omniglot_resized'):\n GoogleDriveDownloader.download_file_from_google_drive(\n file_id='1iaSFXIYC3AB8q9K_M-oVMa4pmB7yKMtI',\n dest_path='./omniglot_resized.zip',\n unzip=True)\n\n data_folder = './omniglot_resized'\n self.img_size = 28, 28\n\n character_folders = [\n os.path.join(data_folder, family, character)\n for family in os.listdir(data_folder)\n if os.path.isdir(os.path.join(data_folder, family))\n for character in os.listdir(os.path.join(data_folder, family))\n if os.path.isdir(os.path.join(data_folder, family, character))\n ]\n\n random.seed(1)\n random.shuffle(character_folders)\n num_val = 100\n num_train = 1100\n self.metatrain = self._make_dataset(character_folders[:num_train])\n self.metaval = self._make_dataset(character_folders[num_train:num_train +\n num_val])\n self.metatest = self._make_dataset(character_folders[num_train + num_val:])\n\n @property\n def train_set(\n self\n ) -> Iterator[Tuple[Tuple[np.ndarray, np.ndarray], Tuple[np.ndarray,\n np.ndarray]]]:\n yield from self.metatrain.as_numpy_iterator()\n\n @property\n def eval_set(\n self\n ) -> Iterator[Tuple[Tuple[np.ndarray, np.ndarray], Tuple[np.ndarray,\n np.ndarray]]]:\n yield from self.metaval.as_numpy_iterator()\n\n @property\n def test_set(\n self\n ) -> Iterator[Tuple[Tuple[np.ndarray, np.ndarray], Tuple[np.ndarray,\n np.ndarray]]]:\n yield from self.metatest.as_numpy_iterator()\n\n def _make_dataset(self, folders: List[str]) -> tfd.Dataset:\n characters = tfd.Dataset.from_tensor_slices(folders).shuffle(\n 1100, seed=self.seed, reshuffle_each_iteration=True)\n\n def get_images_filenames(char):\n all_images = tfio.matching_files(char + '/*.png')\n return tfd.Dataset.from_tensor_slices(\n tf.random.shuffle(all_images,\n seed=self.seed)[:self.num_samples_per_class + 1])\n\n # Use interleave to read the relevant .png files as we iterate through the\n # 1100 different chars. Set block_length to num_samples_per_class so that\n # we can next batch images from same char together.\n image_filenames = characters.interleave(\n get_images_filenames,\n num_parallel_calls=tfd.AUTOTUNE,\n block_length=self.num_samples_per_class + 1).repeat()\n\n def load_image(image_filename):\n img = tfio.read_file(image_filename)\n img = tfio.decode_png(img, channels=1)\n img = tfi.resize(img, self.img_size)\n img = tf.cast(img, dtypes.float32) / 255.0\n img = 1.0 - img\n return img\n\n # Unbatch map and batch to allow tf to read images concurrently. Class\n # grouping is maintained.\n shots = image_filenames.map(\n load_image,\n num_parallel_calls=tfd.AUTOTUNE).batch(self.num_samples_per_class + 1)\n ways = shots.batch(self.num_classes)\n tasks = ways.batch(self.meta_batch_size)\n\n def to_support_and_query_sets(batch):\n support_x, query_x = tf.split(\n tf.transpose(batch, (0, 2, 1, 3, 4, 5)),\n (self.num_samples_per_class, 1),\n axis=1)\n support_y, query_y = tf.split(\n tf.eye(\n self.num_classes,\n batch_shape=(self.meta_batch_size,\n self.num_samples_per_class + 1)),\n (self.num_samples_per_class, 1),\n axis=1)\n ids = tf.range(0, self.num_classes, dtype=dtypes.int32)\n ids = tf.random.shuffle(ids, seed=self.seed)\n query_x = tf.gather(query_x, ids, axis=2)\n query_y = tf.gather(query_y, ids, axis=2)\n new_shape = lambda x: tf.concat([(self.meta_batch_size, -1),\n tf.shape(x)[3:]], 0)\n reshape = lambda x: tf.reshape(x, new_shape(x))\n return (reshape(support_x), reshape(support_y)), (reshape(query_x),\n reshape(query_y))\n\n return tasks.map(\n to_support_and_query_sets,\n num_parallel_calls=tfd.AUTOTUNE).prefetch(tfd.AUTOTUNE)\n" ]
[ [ "tensorflow.shape", "tensorflow.random.shuffle", "tensorflow.range", "tensorflow.image.resize", "tensorflow.io.decode_png", "tensorflow.eye", "tensorflow.cast", "tensorflow.io.read_file", "tensorflow.io.matching_files", "tensorflow.gather", "tensorflow.transpose", "tensorflow.data.Dataset.from_tensor_slices" ] ]
zyxwvu321/Classifer_SSL_Longtail
[ "e6c09414c49e695b0f4221a3c6245ae3929a1788" ]
[ "modeling/backbones/senet.py" ]
[ "\"\"\"\nResNet code gently borrowed from\nhttps://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py\n\"\"\"\nfrom __future__ import print_function, division, absolute_import\nfrom collections import OrderedDict\nimport math\nimport torch\nimport torch.nn as nn\nfrom torch.utils import model_zoo\n\n__all__ = ['SENet', 'senet154', 'se_resnet50', 'se_resnet101', 'se_resnet152',\n 'se_resnext50_32x4d', 'se_resnext101_32x4d']\n\npretrained_settings = {\n 'senet154': {\n 'imagenet': {\n 'url': 'http://data.lip6.fr/cadene/pretrainedmodels/senet154-c7b49a05.pth',\n 'input_space': 'RGB',\n 'input_size': [3, 224, 224],\n 'input_range': [0, 1],\n 'mean': [0.485, 0.456, 0.406],\n 'std': [0.229, 0.224, 0.225],\n 'num_classes': 1000\n }\n },\n 'se_resnet50': {\n 'imagenet': {\n 'url': 'http://data.lip6.fr/cadene/pretrainedmodels/se_resnet50-ce0d4300.pth',\n 'input_space': 'RGB',\n 'input_size': [3, 224, 224],\n 'input_range': [0, 1],\n 'mean': [0.485, 0.456, 0.406],\n 'std': [0.229, 0.224, 0.225],\n 'num_classes': 1000\n }\n },\n 'se_resnet101': {\n 'imagenet': {\n 'url': 'http://data.lip6.fr/cadene/pretrainedmodels/se_resnet101-7e38fcc6.pth',\n 'input_space': 'RGB',\n 'input_size': [3, 224, 224],\n 'input_range': [0, 1],\n 'mean': [0.485, 0.456, 0.406],\n 'std': [0.229, 0.224, 0.225],\n 'num_classes': 1000\n }\n },\n 'se_resnet152': {\n 'imagenet': {\n 'url': 'http://data.lip6.fr/cadene/pretrainedmodels/se_resnet152-d17c99b7.pth',\n 'input_space': 'RGB',\n 'input_size': [3, 224, 224],\n 'input_range': [0, 1],\n 'mean': [0.485, 0.456, 0.406],\n 'std': [0.229, 0.224, 0.225],\n 'num_classes': 1000\n }\n },\n 'se_resnext50_32x4d': {\n 'imagenet': {\n 'url': 'http://data.lip6.fr/cadene/pretrainedmodels/se_resnext50_32x4d-a260b3a4.pth',\n 'input_space': 'RGB',\n 'input_size': [3, 224, 224],\n 'input_range': [0, 1],\n 'mean': [0.485, 0.456, 0.406],\n 'std': [0.229, 0.224, 0.225],\n 'num_classes': 1000\n }\n },\n 'se_resnext101_32x4d': {\n 'imagenet': {\n 'url': 'http://data.lip6.fr/cadene/pretrainedmodels/se_resnext101_32x4d-3b2fe3d8.pth',\n 'input_space': 'RGB',\n 'input_size': [3, 224, 224],\n 'input_range': [0, 1],\n 'mean': [0.485, 0.456, 0.406],\n 'std': [0.229, 0.224, 0.225],\n 'num_classes': 1000\n }\n },\n}\n\n\nclass SEModule(nn.Module):\n\n def __init__(self, channels, reduction):\n super(SEModule, self).__init__()\n self.avg_pool = nn.AdaptiveAvgPool2d(1)\n self.fc1 = nn.Conv2d(channels, channels // reduction, kernel_size=1,\n padding=0)\n self.relu = nn.ReLU(inplace=True)\n self.fc2 = nn.Conv2d(channels // reduction, channels, kernel_size=1,\n padding=0)\n self.sigmoid = nn.Sigmoid()\n\n def forward(self, x):\n module_input = x\n x = self.avg_pool(x)\n x = self.fc1(x)\n x = self.relu(x)\n x = self.fc2(x)\n x = self.sigmoid(x)\n return module_input * x\n\n\nclass Bottleneck(nn.Module):\n \"\"\"\n Base class for bottlenecks that implements `forward()` method.\n \"\"\"\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out = self.se_module(out) + residual\n out = self.relu(out)\n\n return out\n\n\nclass SEBottleneck(Bottleneck):\n \"\"\"\n Bottleneck for SENet154.\n \"\"\"\n expansion = 4\n\n def __init__(self, inplanes, planes, groups, reduction, stride=1,\n downsample=None):\n super(SEBottleneck, self).__init__()\n self.conv1 = nn.Conv2d(inplanes, planes * 2, kernel_size=1, bias=False)\n self.bn1 = nn.BatchNorm2d(planes * 2)\n self.conv2 = nn.Conv2d(planes * 2, planes * 4, kernel_size=3,\n stride=stride, padding=1, groups=groups,\n bias=False)\n self.bn2 = nn.BatchNorm2d(planes * 4)\n self.conv3 = nn.Conv2d(planes * 4, planes * 4, kernel_size=1,\n bias=False)\n self.bn3 = nn.BatchNorm2d(planes * 4)\n self.relu = nn.ReLU(inplace=True)\n self.se_module = SEModule(planes * 4, reduction=reduction)\n self.downsample = downsample\n self.stride = stride\n\n\nclass SEResNetBottleneck(Bottleneck):\n \"\"\"\n ResNet bottleneck with a Squeeze-and-Excitation module. It follows Caffe\n implementation and uses `stride=stride` in `conv1` and not in `conv2`\n (the latter is used in the torchvision implementation of ResNet).\n \"\"\"\n expansion = 4\n\n def __init__(self, inplanes, planes, groups, reduction, stride=1,\n downsample=None):\n super(SEResNetBottleneck, self).__init__()\n self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False,\n stride=stride)\n self.bn1 = nn.BatchNorm2d(planes)\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, padding=1,\n groups=groups, bias=False)\n self.bn2 = nn.BatchNorm2d(planes)\n self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)\n self.bn3 = nn.BatchNorm2d(planes * 4)\n self.relu = nn.ReLU(inplace=True)\n self.se_module = SEModule(planes * 4, reduction=reduction)\n self.downsample = downsample\n self.stride = stride\n\n\nclass SEResNeXtBottleneck(Bottleneck):\n \"\"\"\n ResNeXt bottleneck type C with a Squeeze-and-Excitation module.\n \"\"\"\n expansion = 4\n\n def __init__(self, inplanes, planes, groups, reduction, stride=1,\n downsample=None, base_width=4):\n super(SEResNeXtBottleneck, self).__init__()\n width = math.floor(planes * (base_width / 64)) * groups\n self.conv1 = nn.Conv2d(inplanes, width, kernel_size=1, bias=False,\n stride=1)\n self.bn1 = nn.BatchNorm2d(width)\n self.conv2 = nn.Conv2d(width, width, kernel_size=3, stride=stride,\n padding=1, groups=groups, bias=False)\n self.bn2 = nn.BatchNorm2d(width)\n self.conv3 = nn.Conv2d(width, planes * 4, kernel_size=1, bias=False)\n self.bn3 = nn.BatchNorm2d(planes * 4)\n self.relu = nn.ReLU(inplace=True)\n self.se_module = SEModule(planes * 4, reduction=reduction)\n self.downsample = downsample\n self.stride = stride\n\n\nclass SENet(nn.Module):\n\n def __init__(self, block, layers, groups, reduction, dropout_p=0.2,\n inplanes=128, input_3x3=True, downsample_kernel_size=3,\n downsample_padding=1, last_stride=2, last2_stride = 2):\n \"\"\"\n Parameters\n ----------\n block (nn.Module): Bottleneck class.\n - For SENet154: SEBottleneck\n - For SE-ResNet models: SEResNetBottleneck\n - For SE-ResNeXt models: SEResNeXtBottleneck\n layers (list of ints): Number of residual blocks for 4 layers of the\n network (layer1...layer4).\n groups (int): Number of groups for the 3x3 convolution in each\n bottleneck block.\n - For SENet154: 64\n - For SE-ResNet models: 1\n - For SE-ResNeXt models: 32\n reduction (int): Reduction ratio for Squeeze-and-Excitation modules.\n - For all models: 16\n dropout_p (float or None): Drop probability for the Dropout layer.\n If `None` the Dropout layer is not used.\n - For SENet154: 0.2\n - For SE-ResNet models: None\n - For SE-ResNeXt models: None\n inplanes (int): Number of input channels for layer1.\n - For SENet154: 128\n - For SE-ResNet models: 64\n - For SE-ResNeXt models: 64\n input_3x3 (bool): If `True`, use three 3x3 convolutions instead of\n a single 7x7 convolution in layer0.\n - For SENet154: True\n - For SE-ResNet models: False\n - For SE-ResNeXt models: False\n downsample_kernel_size (int): Kernel size for downsampling convolutions\n in layer2, layer3 and layer4.\n - For SENet154: 3\n - For SE-ResNet models: 1\n - For SE-ResNeXt models: 1\n downsample_padding (int): Padding for downsampling convolutions in\n layer2, layer3 and layer4.\n - For SENet154: 1\n - For SE-ResNet models: 0\n - For SE-ResNeXt models: 0\n num_classes (int): Number of outputs in `last_linear` layer.\n - For all models: 1000\n \"\"\"\n super(SENet, self).__init__()\n self.inplanes = inplanes\n if input_3x3:\n layer0_modules = [\n ('conv1', nn.Conv2d(3, 64, 3, stride=2, padding=1,\n bias=False)),\n ('bn1', nn.BatchNorm2d(64)),\n ('relu1', nn.ReLU(inplace=True)),\n ('conv2', nn.Conv2d(64, 64, 3, stride=1, padding=1,\n bias=False)),\n ('bn2', nn.BatchNorm2d(64)),\n ('relu2', nn.ReLU(inplace=True)),\n ('conv3', nn.Conv2d(64, inplanes, 3, stride=1, padding=1,\n bias=False)),\n ('bn3', nn.BatchNorm2d(inplanes)),\n ('relu3', nn.ReLU(inplace=True)),\n ]\n else:\n layer0_modules = [\n ('conv1', nn.Conv2d(3, inplanes, kernel_size=7, stride=2,\n padding=3, bias=False)),\n ('bn1', nn.BatchNorm2d(inplanes)),\n ('relu1', nn.ReLU(inplace=True)),\n ]\n # To preserve compatibility with Caffe weights `ceil_mode=True`\n # is used instead of `padding=1`.\n layer0_modules.append(('pool', nn.MaxPool2d(3, stride=2,\n ceil_mode=True)))\n self.layer0 = nn.Sequential(OrderedDict(layer0_modules))\n self.layer1 = self._make_layer(\n block,\n planes=64,\n blocks=layers[0],\n groups=groups,\n reduction=reduction,\n downsample_kernel_size=1,\n downsample_padding=0\n )\n self.layer2 = self._make_layer(\n block,\n planes=128,\n blocks=layers[1],\n stride=2,\n groups=groups,\n reduction=reduction,\n downsample_kernel_size=downsample_kernel_size,\n downsample_padding=downsample_padding\n )\n self.layer3 = self._make_layer(\n block,\n planes=256,\n blocks=layers[2],\n stride=last2_stride,\n groups=groups,\n reduction=reduction,\n downsample_kernel_size=downsample_kernel_size,\n downsample_padding=downsample_padding\n )\n self.layer4 = self._make_layer(\n block,\n planes=512,\n blocks=layers[3],\n stride=last_stride,\n groups=groups,\n reduction=reduction,\n downsample_kernel_size=downsample_kernel_size,\n downsample_padding=downsample_padding\n )\n self.avg_pool = nn.AvgPool2d(7, stride=1)\n self.dropout = nn.Dropout(dropout_p) if dropout_p is not None else None\n\n def _make_layer(self, block, planes, blocks, groups, reduction, stride=1,\n downsample_kernel_size=1, downsample_padding=0):\n downsample = None\n if stride != 1 or self.inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n nn.Conv2d(self.inplanes, planes * block.expansion,\n kernel_size=downsample_kernel_size, stride=stride,\n padding=downsample_padding, bias=False),\n nn.BatchNorm2d(planes * block.expansion),\n )\n\n layers = []\n layers.append(block(self.inplanes, planes, groups, reduction, stride,\n downsample))\n self.inplanes = planes * block.expansion\n for i in range(1, blocks):\n layers.append(block(self.inplanes, planes, groups, reduction))\n\n return nn.Sequential(*layers)\n \n def load_param(self, model_path):\n param_dict = torch.load(model_path)\n for i in param_dict:\n if 'last_linear' in i:\n continue\n self.state_dict()[i].copy_(param_dict[i])\n\n def forward(self, x):\n x = self.layer0(x)\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n return x" ]
[ [ "torch.nn.BatchNorm2d", "torch.nn.MaxPool2d", "torch.load", "torch.nn.AdaptiveAvgPool2d", "torch.nn.Conv2d", "torch.nn.Sequential", "torch.nn.AvgPool2d", "torch.nn.Sigmoid", "torch.nn.ReLU", "torch.nn.Dropout" ] ]
emirhanai/Machine-Learning-Prediction-Software-Based-on-Classification-and-Regression-Based-on-Processor-CPU-
[ "051be998eb9195dccf28c2e7607ead0812c79cf1" ]
[ "Machine Learning Prediction Software Based on Classification and Regression Based on Processor [CPU] Specifications.py" ]
[ "import pandas as pd\r\nimport numpy as np\r\nfrom sklearn.tree import *\r\nfrom sklearn.ensemble import *\r\nfrom sklearn.preprocessing import *\r\nfrom sklearn.model_selection import *\r\nfrom sklearn.metrics import *\r\n\r\n\r\ndata = pd.read_csv('data.csv')\r\n\r\nX = data.drop(['Company','Processor Name'],axis='columns')\r\ny = data.drop(['Turbo Speed (GHz)','Processor Name','Processor Cores','Processor Threads','Typical TDP (W)','Average CPU Mark'],axis='columns')\r\n\r\n#load of change function for columns changing.\r\ny_data = LabelEncoder()\r\n\r\n#print(y)\r\n\r\ny['Company_Change'] = y_data.fit_transform(y['Company'])\r\n\r\ny_update_data = y.drop(['Company'],axis='columns')\r\n\r\nfloat_y_update_data = np.float64(y_update_data)\r\n\r\n#print(float_y_update_data)\r\n\r\n#for i in np.arange(0,1,1):\r\n\r\n#X_train,X_test,y_train and y_test files of creating (with suitable parameters).\r\nX_train, X_test, y_train, y_test = train_test_split(X, y_update_data, test_size=0.2, random_state=15, shuffle=True,\r\n stratify=None)\r\n# model - processor classifier\r\nmodel_processor = ExtraTreeClassifier(criterion=\"gini\", splitter=\"random\")\r\n\r\n# model - processor regression\r\nmodel_processor_regression = ExtraTreesRegressor(n_estimators=1)\r\n\r\n# model - processor fit\r\nmodel_processor_regression.fit(X_train, y_train)\r\n\r\n# model - processor classifier fit\r\nmodel_processor.fit(X_train, y_train)\r\n\r\n# \"\"CLASSIFIER OF SCORE AND RESULT\"\"\r\n\r\n# model - processor classifier y_pred\r\n\r\ny_pred_of_model = model_processor.predict(X_test)\r\n\r\n# model classifier score of result\r\n# print(\"Select of X {} \".format(i))\r\nprint(\"Classifier Accuracy Score: {} \".format(accuracy_score(y_test,y_pred_of_model)))\r\nprint(\"Classifier Precision Score: {} \".format(precision_score(y_test,y_pred_of_model)))\r\nprint(\"Classifier Recall Score: {} \".format(recall_score(y_test,y_pred_of_model)))\r\nprint(\"Classifier F1 Score: {} \".format(f1_score(y_test,y_pred_of_model)))\r\na,b,_ = roc_curve(y_test,y_pred_of_model)\r\nprint(\"Classifier AUC Score: {} \".format(auc(a,b)))\r\nprint(\"Classifier Confision Matrix: {} \".format(confusion_matrix(y_test,y_pred_of_model)))\r\n\r\n# \"\"REGRESSION OF SCORE AND RESULT\"\"\r\n\r\ny_pred_of_regression_in_model = model_processor_regression.predict(X_test)\r\n\r\n# print(\"Select of X {} \".format(i))\r\nprint(\"Regression Accuracy Score: {} \".format(accuracy_score(y_test, y_pred_of_regression_in_model)))\r\nprint(\"Regression Precision Score: {} \".format(precision_score(y_test, y_pred_of_regression_in_model)))\r\nprint(\"Regression Recall Score: {} \".format(recall_score(y_test, y_pred_of_regression_in_model)))\r\nprint(\"Regression F1 Score: {} \".format(f1_score(y_test, y_pred_of_regression_in_model)))\r\na, b, _ = roc_curve(y_test, y_pred_of_regression_in_model)\r\nprint(\"Regression AUC Score: {} \".format(auc(a, b)))\r\nprint(\"Regression Confision Matrix: {} \".format(confusion_matrix(y_test, y_pred_of_regression_in_model)))\r\n\r\n# Enter you random value for Features :)\r\nProcessor_Cores = int(input(\"Enter, Processor Cores: \"))\r\nProcessor_Threads = int(input(\"Enter, Processor Threads: \"))\r\nTurbo_Speed_GHz = float(input(\"Enter, Turbo Speed (GHz): \"))\r\nTypical_TDP_W = int(input(\"Enter, Typical TDP (W): \"))\r\nAverage_CPU_Mark = int(input(\"Enter, Average CPU Mark: \"))\r\n\r\n# prediction, random value of Company!\r\nprediction_of_company_random_value = model_processor_regression.predict(\r\n [[Processor_Cores, Processor_Threads, Turbo_Speed_GHz, Typical_TDP_W, Average_CPU_Mark]])\r\n\r\n# I create of algorithm :)\r\ndata_class = pd.read_csv('class.csv', index_col=None, na_values=None)\r\nclass_value_detect = data_class.columns.values[int(prediction_of_company_random_value)]\r\nprint('Prediction company: {} '.format(class_value_detect))\r\n\r\n# model classifier save of format to .dot file :)\r\nfrom graphviz import Source\r\ndotfile = open(\"emirhan_project.dot\",'w')\r\n\r\ngraph_of_data_dot = Source(export_graphviz(model_processor,\r\n filled=True,\r\n rounded=True,\r\n out_file=dotfile,\r\n feature_names=X.columns,\r\n class_names=['AMD = 0','INTEL = 1']))\r\ndotfile.close()\r\n\r\n#CLASSIFICATION RESULT\r\n\r\n#Classifier Accuracy Score: 1.0\r\n#Classifier Precision Score: 1.0\r\n#Classifier Recall Score: 1.0\r\n#Classifier F1 Score: 1.0\r\n#Classifier AUC Score: 1.0\r\n#Classifier Confision Matrix: [[5 0]\r\n #[0 2]]\r\n\r\n#REGRESSION RESULT\r\n\r\n#Regression Accuracy Score: 1.0\r\n#Regression Precision Score: 1.0\r\n#Regression Recall Score: 1.0\r\n#Regression F1 Score: 1.0\r\n#Regression AUC Score: 1.0\r\n#Regression Confision Matrix: [[5 0]\r\n #[0 2]]\r\n" ]
[ [ "pandas.read_csv", "numpy.float64" ] ]
ytorzuk-altran/openvino
[ "031e998a15ec738c64cc2379d7f30fb73087c272", "68d460a3bb578a738ba0e4d0e1f2e321afa73ab0" ]
[ "src/core/tests/frontend/paddlepaddle/test_models/gen_scripts/generate_yolo_box.py", "tools/mo/openvino/tools/mo/front/onnx/lstm_ext.py" ]
[ "#\n# pool2d paddle model generator\n#\nimport numpy as np\nfrom save_model import saveModel\nimport sys\n\ndef yolo_box(name : str, x, img_size, attrs : dict):\n import paddle as pdpd\n pdpd.enable_static()\n \n with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()):\n node_x = pdpd.static.data(name='x', shape=x.shape, dtype=x.dtype)\n node_img_size = pdpd.static.data(name='img_size', shape=img_size.shape, dtype=img_size.dtype)\n boxes, scores = pdpd.vision.ops.yolo_box(node_x,\n node_img_size,\n anchors=attrs['anchors'],\n class_num=attrs['class_num'],\n conf_thresh=attrs['conf_thresh'],\n downsample_ratio=attrs['downsample_ratio'],\n clip_bbox=attrs['clip_bbox'],\n name=None, \n scale_x_y=attrs['scale_x_y'])\n\n cpu = pdpd.static.cpu_places(1)\n exe = pdpd.static.Executor(cpu[0])\n # startup program will call initializer to initialize the parameters.\n exe.run(pdpd.static.default_startup_program())\n\n outs = exe.run(\n feed={'x': x, 'img_size': img_size},\n fetch_list=[boxes, scores])\n \n # Save inputs in order of ngraph function, to facilite Fuzzy test, \n # which accepts inputs and outputs in this order as well. \n saveModel(name, exe, feedkeys=['x', 'img_size'], fetchlist=[boxes, scores],\n inputs=[x, img_size], outputs=outs, target_dir=sys.argv[1])\n\n return outs\n\n\ndef TEST1():\n # yolo_box\n pdpd_attrs = {\n 'name': \"yolo_box_default\",\n 'anchors': [10, 13, 16, 30, 33, 23],\n 'class_num': 2,\n 'conf_thresh': 0.5,\n 'downsample_ratio': 32,\n 'clip_bbox': False,\n 'scale_x_y': 1.0\n }\n\n pdpd_attrs_clip_box = {\n 'name': \"yolo_box_clip_box\",\n 'anchors': [10, 13, 16, 30, 33, 23],\n 'class_num': 2,\n 'conf_thresh': 0.5,\n 'downsample_ratio': 32,\n 'clip_bbox': True,\n 'scale_x_y': 1.0\n }\n\n pdpd_attrs_scale_xy = {\n 'name': \"yolo_box_scale_xy\",\n 'anchors': [10, 13, 16, 30, 33, 23],\n 'class_num': 2,\n 'conf_thresh': 0.5,\n 'downsample_ratio': 32,\n 'clip_bbox': True,\n 'scale_x_y': 1.2\n }\n\n pdpd_attrs_list = [pdpd_attrs, pdpd_attrs_clip_box, pdpd_attrs_scale_xy]\n \n N = 32\n num_anchors = int(len(pdpd_attrs['anchors'])//2)\n x_shape = (N, num_anchors * (5 + pdpd_attrs['class_num']), 13, 13)\n imgsize_shape = (N, 2)\n\n data = np.random.random(x_shape).astype('float32')\n data_ImSize = np.random.randint(10, 20, imgsize_shape).astype('int32') \n\n for item in pdpd_attrs_list:\n pred_pdpd = yolo_box(item['name'], data, data_ImSize, item)\n\n\ndef TEST2():\n # yolo_box uneven spatial width and height\n pdpd_attrs = {\n 'name': \"yolo_box_uneven_wh\",\n 'anchors': [10, 13, 16, 30, 33, 23],\n 'class_num': 2,\n 'conf_thresh': 0.5,\n 'downsample_ratio': 32,\n 'clip_bbox': False,\n 'scale_x_y': 1.0\n }\n\n N = 16\n SPATIAL_WIDTH = 13\n SPATIAL_HEIGHT = 9\n num_anchors = int(len(pdpd_attrs['anchors'])//2)\n x_shape = (N, num_anchors * (5 + pdpd_attrs['class_num']), SPATIAL_HEIGHT, SPATIAL_WIDTH)\n imgsize_shape = (N, 2)\n\n data = np.random.random(x_shape).astype('float32')\n data_ImSize = np.random.randint(10, 20, imgsize_shape).astype('int32')\n \n pred_pdpd = yolo_box(pdpd_attrs['name'], data, data_ImSize, pdpd_attrs)\n\nif __name__ == \"__main__\":\n TEST1()\n TEST2()", "# Copyright (C) 2018-2021 Intel Corporation\n# SPDX-License-Identifier: Apache-2.0\n\nimport numpy as np\n\nfrom openvino.tools.mo.ops.LSTM import LSTM\nfrom openvino.tools.mo.front.extractor import FrontExtractorOp\nfrom openvino.tools.mo.front.onnx.extractors.utils import onnx_attr\n\n\nclass LSTMFrontExtractor(FrontExtractorOp):\n op = 'LSTM'\n enabled = True\n\n @classmethod\n def extract(cls, node):\n activation_alpha = onnx_attr(node, 'activation_alpha', 'floats',\n default=None, dst_type=lambda x: np.array(x, dtype=np.float32))\n activation_beta = onnx_attr(node, 'activation_beta', 'floats',\n default=None, dst_type=lambda x: np.array(x, dtype=np.float32))\n activations = onnx_attr(node, 'activations', 'strings', default=None,\n dst_type=lambda x: list(map(lambda s: s.decode(encoding=\"utf-8\").lower(), list(x))))\n clip = onnx_attr(node, 'clip', 'f', default=None)\n input_forget = onnx_attr(node, 'input_forget', 'i', default=0)\n\n attrs = {\n 'batch_dim': 1,\n 'sequence_dim': 0,\n 'blobs_wrb': True,\n 'has_num_directions': True,\n 'num_layers': 1,\n 'format': 'onnx',\n 'multilayers': False,\n 'gate_order': [2, 0, 3, 1], # iofc --> fico\n\n # ONNX attrs\n 'activation_alpha': activation_alpha,\n 'activation_beta': activation_beta,\n 'activations': activations,\n 'clip': clip,\n 'direction': onnx_attr(node, 'direction', 's', b'forward').decode().lower(),\n 'hidden_size': np.array(onnx_attr(node, 'hidden_size', 'i'), dtype=np.int64),\n 'input_forget': input_forget,\n }\n\n LSTM.update_node_stat(node, attrs)\n return cls.enabled\n" ]
[ [ "numpy.random.random", "numpy.random.randint" ], [ "numpy.array" ] ]
guidefloripa/kerasify
[ "cbb2ea6cae61ccd551b0f5327433d23e8e8050ee" ]
[ "make_tests.py" ]
[ "import numpy as np\nimport pprint\n\nfrom keras.models import Sequential\nfrom keras.layers import Convolution2D, Dense, Flatten, Activation, MaxPooling2D, Dropout\nfrom keras.layers.recurrent import LSTM\nfrom keras.layers.advanced_activations import ELU\nfrom keras.layers.embeddings import Embedding\n\nfrom kerasify import export_model\n\nnp.set_printoptions(precision=25, threshold=np.nan)\n\ndef c_array(a):\n s = pprint.pformat(a.flatten())\n s = s.replace('[', '{').replace(']', '}').replace('array(', '').replace(')', '').replace(', dtype=float32', '')\n\n shape = ''\n\n if a.shape == ():\n s = '{%s}' % s\n shape = '(1)'\n else:\n shape = repr(a.shape).replace(',)', ')')\n\n return shape, s\n\n\nTEST_CASE = '''\nbool test_%s(double* load_time, double* apply_time)\n{\n printf(\"TEST %s\\\\n\");\n\n KASSERT(load_time, \"Invalid double\");\n KASSERT(apply_time, \"Invalid double\");\n\n Tensor in%s;\n in.data_ = %s;\n\n Tensor out%s;\n out.data_ = %s;\n\n KerasTimer load_timer;\n load_timer.Start();\n\n KerasModel model;\n KASSERT(model.LoadModel(\"test_%s.model\"), \"Failed to load model\");\n\n *load_time = load_timer.Stop();\n\n KerasTimer apply_timer;\n apply_timer.Start();\n\n Tensor predict = out;\n KASSERT(model.Apply(&in, &out), \"Failed to apply\");\n\n *apply_time = apply_timer.Stop();\n\n for (int i = 0; i < out.dims_[0]; i++)\n {\n KASSERT_EQ(out(i), predict(i), %s);\n }\n\n return true;\n}\n'''\n\ndef output_testcase(model, test_x, test_y, name, eps):\n print(\"Processing %s\" % name)\n model.compile(loss='mean_squared_error', optimizer='adamax')\n model.fit(test_x, test_y, nb_epoch=1, verbose=False)\n predict_y = model.predict(test_x).astype('f')\n print(model.summary())\n\n export_model(model, 'test_%s.model' % name)\n\n with open('test_%s.h' % name, 'w') as f:\n x_shape, x_data = c_array(test_x[0])\n y_shape, y_data = c_array(predict_y[0])\n\n f.write(TEST_CASE % (name, name, x_shape, x_data, y_shape, y_data, name, eps))\n\n\n\n''' Dense 1x1 '''\ntest_x = np.arange(10)\ntest_y = test_x * 10 + 1\nmodel = Sequential()\nmodel.add(Dense(1, input_dim=1))\n\noutput_testcase(model, test_x, test_y, 'dense_1x1', '1e-6')\n\n''' Dense 10x1 '''\ntest_x = np.random.rand(10, 10).astype('f')\ntest_y = np.random.rand(10).astype('f')\nmodel = Sequential()\nmodel.add(Dense(1, input_dim=10))\n\noutput_testcase(model, test_x, test_y, 'dense_10x1', '1e-6')\n\n''' Dense 2x2 '''\ntest_x = np.random.rand(10, 2).astype('f')\ntest_y = np.random.rand(10).astype('f')\nmodel = Sequential()\nmodel.add(Dense(2, input_dim=2))\nmodel.add(Dense(1))\n\noutput_testcase(model, test_x, test_y, 'dense_2x2', '1e-6')\n\n''' Dense 10x10 '''\ntest_x = np.random.rand(10, 10).astype('f')\ntest_y = np.random.rand(10).astype('f')\nmodel = Sequential()\nmodel.add(Dense(10, input_dim=10))\nmodel.add(Dense(1))\n\noutput_testcase(model, test_x, test_y, 'dense_10x10', '1e-6')\n\n''' Dense 10x10x10 '''\ntest_x = np.random.rand(10, 10).astype('f')\ntest_y = np.random.rand(10, 10).astype('f')\nmodel = Sequential()\nmodel.add(Dense(10, input_dim=10))\nmodel.add(Dense(10))\n\noutput_testcase(model, test_x, test_y, 'dense_10x10x10', '1e-6')\n\n''' Conv 2x2 '''\ntest_x = np.random.rand(10, 1, 2, 2).astype('f')\ntest_y = np.random.rand(10, 1).astype('f')\nmodel = Sequential()\nmodel.add(Convolution2D(1, 2, 2, input_shape=(1, 2, 2)))\nmodel.add(Flatten())\nmodel.add(Dense(1))\n\noutput_testcase(model, test_x, test_y, 'conv_2x2', '1e-6')\n\n''' Conv 3x3 '''\ntest_x = np.random.rand(10, 1, 3, 3).astype('f').astype('f')\ntest_y = np.random.rand(10, 1).astype('f')\nmodel = Sequential()\nmodel.add(Convolution2D(1, 3, 3, input_shape=(1, 3, 3)))\nmodel.add(Flatten())\nmodel.add(Dense(1))\n\noutput_testcase(model, test_x, test_y, 'conv_3x3', '1e-6')\n\n''' Conv 3x3x3 '''\ntest_x = np.random.rand(10, 3, 10, 10).astype('f')\ntest_y = np.random.rand(10, 1).astype('f')\nmodel = Sequential()\nmodel.add(Convolution2D(3, 3, 3, input_shape=(3, 10, 10)))\nmodel.add(Flatten())\nmodel.add(Dense(1))\n\noutput_testcase(model, test_x, test_y, 'conv_3x3x3', '1e-6')\n\n''' Activation ELU '''\ntest_x = np.random.rand(1, 10).astype('f')\ntest_y = np.random.rand(1, 1).astype('f')\nmodel = Sequential()\nmodel.add(Dense(10, input_dim=10))\nmodel.add(ELU(alpha=0.5))\nmodel.add(Dense(1))\n\noutput_testcase(model, test_x, test_y, 'elu_10', '1e-6')\n\n''' Activation relu '''\ntest_x = np.random.rand(1, 10).astype('f')\ntest_y = np.random.rand(1, 10).astype('f')\nmodel = Sequential()\nmodel.add(Dense(10, input_dim=10))\nmodel.add(Activation('relu'))\n\noutput_testcase(model, test_x, test_y, 'relu_10', '1e-6')\n\n''' Dense relu '''\ntest_x = np.random.rand(1, 10).astype('f')\ntest_y = np.random.rand(1, 10).astype('f')\nmodel = Sequential()\nmodel.add(Dense(10, input_dim=10, activation='relu'))\nmodel.add(Dense(10, input_dim=10, activation='relu'))\nmodel.add(Dense(10, input_dim=10, activation='relu'))\n\noutput_testcase(model, test_x, test_y, 'dense_relu_10', '1e-6')\n\n''' Dense relu '''\ntest_x = np.random.rand(1, 10).astype('f')\ntest_y = np.random.rand(1, 10).astype('f')\nmodel = Sequential()\nmodel.add(Dense(10, input_dim=10, activation='tanh'))\nmodel.add(Dense(10, input_dim=10, activation='tanh'))\nmodel.add(Dense(10, input_dim=10, activation='tanh'))\n\noutput_testcase(model, test_x, test_y, 'dense_tanh_10', '1e-6')\n\n''' Conv softplus '''\ntest_x = np.random.rand(10, 1, 2, 2).astype('f')\ntest_y = np.random.rand(10, 1).astype('f')\nmodel = Sequential()\nmodel.add(Convolution2D(1, 2, 2, input_shape=(1, 2, 2), activation='softplus'))\nmodel.add(Flatten())\nmodel.add(Dense(1))\n\noutput_testcase(model, test_x, test_y, 'conv_softplus_2x2', '1e-6')\n\n\n''' Conv hardsigmoid '''\ntest_x = np.random.rand(10, 1, 2, 2).astype('f')\ntest_y = np.random.rand(10, 1).astype('f')\nmodel = Sequential()\nmodel.add(Convolution2D(1, 2, 2, input_shape=(1, 2, 2), activation='hard_sigmoid'))\nmodel.add(Flatten())\nmodel.add(Dense(1))\n\noutput_testcase(model, test_x, test_y, 'conv_hard_sigmoid_2x2', '1e-6')\n\n''' Conv sigmoid '''\ntest_x = np.random.rand(10, 1, 2, 2).astype('f')\ntest_y = np.random.rand(10, 1).astype('f')\nmodel = Sequential()\nmodel.add(Convolution2D(1, 2, 2, input_shape=(1, 2, 2), activation='sigmoid'))\nmodel.add(Flatten())\nmodel.add(Dense(1))\n\noutput_testcase(model, test_x, test_y, 'conv_sigmoid_2x2', '1e-6')\n\n\n''' Maxpooling2D 1x1'''\ntest_x = np.random.rand(10, 1, 10, 10).astype('f')\ntest_y = np.random.rand(10, 1).astype('f')\nmodel = Sequential()\nmodel.add(MaxPooling2D(pool_size=(1, 1), input_shape=(1, 10, 10)))\nmodel.add(Flatten())\nmodel.add(Dense(1))\n\noutput_testcase(model, test_x, test_y, 'maxpool2d_1x1', '1e-6')\n\n''' Maxpooling2D 2x2'''\ntest_x = np.random.rand(10, 1, 10, 10).astype('f')\ntest_y = np.random.rand(10, 1).astype('f')\nmodel = Sequential()\nmodel.add(MaxPooling2D(pool_size=(2, 2), input_shape=(1, 10, 10)))\nmodel.add(Flatten())\nmodel.add(Dense(1))\n\noutput_testcase(model, test_x, test_y, 'maxpool2d_2x2', '1e-6')\n\n''' Maxpooling2D 3x2x2'''\ntest_x = np.random.rand(10, 3, 10, 10).astype('f')\ntest_y = np.random.rand(10, 1).astype('f')\nmodel = Sequential()\nmodel.add(MaxPooling2D(pool_size=(2, 2), input_shape=(3, 10, 10)))\nmodel.add(Flatten())\nmodel.add(Dense(1))\n\noutput_testcase(model, test_x, test_y, 'maxpool2d_3x2x2', '1e-6')\n\n''' Maxpooling2D 3x3x3'''\ntest_x = np.random.rand(10, 3, 10, 10).astype('f')\ntest_y = np.random.rand(10, 1).astype('f')\nmodel = Sequential()\nmodel.add(MaxPooling2D(pool_size=(3, 3), input_shape=(3, 10, 10)))\nmodel.add(Flatten())\nmodel.add(Dense(1))\n\noutput_testcase(model, test_x, test_y, 'maxpool2d_3x3x3', '1e-6')\n\n''' LSTM simple 7x20 '''\ntest_x = np.random.rand(10, 7, 20).astype('f')\ntest_y = np.random.rand(10, 3).astype('f')\nmodel = Sequential()\nmodel.add(LSTM(3, return_sequences=False, input_shape=(7, 20)))\n\noutput_testcase(model, test_x, test_y, 'lstm_simple_7x20', '1e-6')\n\n\n''' LSTM simple stacked 20x9 '''\ntest_x = np.random.rand(10, 20, 9).astype('f')\ntest_y = np.random.rand(10, 1).astype('f')\nmodel = Sequential()\nmodel.add(LSTM(32, return_sequences=False, input_shape=(20, 9)))\nmodel.add(Dense(3, input_dim=32, activation='tanh'))\nmodel.add(Dense(1))\n\noutput_testcase(model, test_x, test_y, 'lstm_simple_stacked20x9', '1e-6')\n\n''' LSTM stacked 150x83 '''\ntest_x = np.random.rand(10, 150, 83).astype('f')\ntest_y = np.random.rand(10, 1).astype('f')\nmodel = Sequential()\nmodel.add(LSTM(32, return_sequences=True, input_shape=(150, 83)))\nmodel.add(LSTM(32, return_sequences=False))\nmodel.add(Dense(1, activation='sigmoid'))\n\noutput_testcase(model, test_x, test_y, 'lstm_stacked150x83', '1e-6')\n\n\n''' Embedding 64 '''\nnp.random.seed(10)\ntest_x = np.random.randint(100, size=(32, 10)).astype('f')\ntest_y = np.random.rand(32, 20).astype('f')\nmodel = Sequential()\nmodel.add(Embedding(100, 64, input_length=10))\nmodel.add(Flatten())\n#model.add(Dropout(0.5))\nmodel.add(Dense(20, activation='sigmoid'))\n\noutput_testcase(model, test_x, test_y, 'embedding64', '1e-6')\n\n\n''' Benchmark '''\ntest_x = np.random.rand(1, 3, 128, 128).astype('f')\ntest_y = np.random.rand(1, 10).astype('f')\nmodel = Sequential()\nmodel.add(Convolution2D(16, 7, 7, input_shape=(3, 128, 128), activation='relu'))\nmodel.add(MaxPooling2D(pool_size=(3, 3)))\nmodel.add(ELU())\nmodel.add(Convolution2D(8, 3, 3))\nmodel.add(Flatten())\nmodel.add(Dense(1000, activation='relu'))\nmodel.add(Dense(10))\n\noutput_testcase(model, test_x, test_y, 'benchmark', '1e-3')\n\n\n" ]
[ [ "numpy.random.seed", "numpy.set_printoptions", "numpy.arange", "numpy.random.rand", "numpy.random.randint" ] ]
gjkennedy/OpenMDAO
[ "06897b584403cce34bc106dd2840aa07eea69e96" ]
[ "openmdao/surrogate_models/tests/test_map.py" ]
[ "from openmdao.api import Group, Problem, MetaModelUnStructuredComp, NearestNeighbor\nfrom openmdao.utils.assert_utils import assert_near_equal\n\nimport numpy as np\nimport unittest\n\n\nclass CompressorMap(MetaModelUnStructuredComp):\n\n def __init__(self):\n super(CompressorMap, self).__init__()\n\n self.add_input('Nc', val=1.0)\n self.add_input('Rline', val=2.0)\n self.add_input('alpha', val=0.0)\n\n self.add_output('PR', val=1.0, surrogate=NearestNeighbor(interpolant_type='linear'))\n self.add_output('eff', val=1.0, surrogate=NearestNeighbor(interpolant_type='linear'))\n self.add_output('Wc', val=1.0, surrogate=NearestNeighbor(interpolant_type='linear'))\n\n\nclass TestMap(unittest.TestCase):\n\n def test_comp_map(self):\n # create compressor map and save reference to options (for training data)\n c = CompressorMap()\n m = c.options\n\n # add compressor map to problem\n p = Problem()\n p.model.add_subsystem('compmap', c)\n p.setup()\n\n # train metamodel\n Nc = np.array([0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1])\n Rline = np.array([1.0, 1.2, 1.4, 1.6, 1.8, 2.0, 2.2, 2.4, 2.6, 2.8, 3.0])\n alpha = np.array([0.0, 1.0])\n Nc_mat, Rline_mat, alpha_mat = np.meshgrid(Nc, Rline, alpha, sparse=False)\n\n m['train:Nc'] = Nc_mat.flatten()\n m['train:Rline'] = Rline_mat.flatten()\n m['train:alpha'] = alpha_mat.flatten()\n\n m['train:PR'] = m['train:Nc']*m['train:Rline']+m['train:alpha']\n m['train:eff'] = m['train:Nc']*m['train:Rline']**2+m['train:alpha']\n m['train:Wc'] = m['train:Nc']**2*m['train:Rline']**2+m['train:alpha']\n\n # check predicted values\n p['compmap.Nc'] = 0.9\n p['compmap.Rline'] = 2.0\n p['compmap.alpha'] = 0.0\n p.run_model()\n\n tol = 1e-1\n assert_near_equal(p['compmap.PR'], p['compmap.Nc']*p['compmap.Rline']+p['compmap.alpha'], tol)\n assert_near_equal(p['compmap.eff'], p['compmap.Nc']*p['compmap.Rline']**2+p['compmap.alpha'], tol)\n assert_near_equal(p['compmap.Wc'], p['compmap.Nc']**2*p['compmap.Rline']**2+p['compmap.alpha'], tol)\n\n p['compmap.Nc'] = 0.95\n p['compmap.Rline'] = 2.1\n p['compmap.alpha'] = 0.0\n p.run_model()\n\n assert_near_equal(p['compmap.PR'], p['compmap.Nc']*p['compmap.Rline']+p['compmap.alpha'], tol)\n assert_near_equal(p['compmap.eff'], p['compmap.Nc']*p['compmap.Rline']**2+p['compmap.alpha'], tol)\n assert_near_equal(p['compmap.Wc'], p['compmap.Nc']**2*p['compmap.Rline']**2+p['compmap.alpha'], tol)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n" ]
[ [ "numpy.array", "numpy.meshgrid" ] ]
pyensemble/wildwood
[ "b261cbd7d0b425b50647f719ab99c1d89f477d5c" ]
[ "plot_signals_weighted_depth.py" ]
[ "\nimport logging\nfrom matplotlib.cm import get_cmap\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nimport pandas as pd\n\nfrom wildwood.datasets import get_signal, make_regression\nfrom wildwood.forest import ForestRegressor\n\nfrom wildwood._binning import Binner\n\npd.set_option(\"display.max_columns\", 20)\npd.set_option(\"display.precision\", 2)\n\nlogging.basicConfig(\n level=logging.INFO, format=\"%(asctime)s %(message)s\", datefmt=\"%Y-%m-%d %H:%M:%S\"\n)\n\ncolormap = get_cmap(\"tab20\")\n\nn_samples_train = 5000\nn_samples_test = 1000\nrandom_state = 42\n\n\nnoise = 0.03\naggregation = True\nn_estimators = 100\n\nstep = 1 / noise ** 2\n\nsignal = \"heavisine\"\n\nX_train, y_train = make_regression(\n n_samples=n_samples_train, signal=signal, noise=noise, random_state=random_state\n)\nX_test = np.linspace(0, 1, num=n_samples_test)\n\n#\n# reg = ForestRegressor(\n# random_state=random_state,\n# aggregation=aggregation,\n# max_features=1,\n# n_estimators=n_estimators,\n# step=step,\n# )\n#\n# reg.fit(X_train.reshape(n_samples_train, 1), y_train)\n# y_pred = reg.predict(X_test.reshape(n_samples_test, 1))\n#\n# df = reg.get_nodes(0)\n\n# print(df)\n\n# exit(0)\n\nsignals = [\"heavisine\", \"bumps\", \"blocks\", \"doppler\"]\n\n\ndef plot_weighted_depth(signal):\n\n X_train, y_train = make_regression(\n n_samples=n_samples_train, signal=signal, noise=noise, random_state=random_state\n )\n X_train = X_train.reshape(-1, 1)\n X_test = np.linspace(0, 1, num=n_samples_test).reshape(-1, 1)\n\n binner = Binner().fit(X_train)\n X_test_binned = binner.transform(X_test)\n\n reg = ForestRegressor(\n random_state=random_state,\n aggregation=aggregation,\n n_estimators=n_estimators,\n step=step,\n )\n\n reg.fit(X_train, y_train)\n y_pred = reg.predict(X_test)\n weighted_depths = reg._weighted_depth(X_test.reshape(n_samples_test, 1))\n\n # print(\"weighted_depths.shape:\", weighted_depths.shape)\n\n # avg_weighted_depth = weighted_depths.mean(axis=0)\n\n fig, (ax1, ax2, ax3) = plt.subplots(nrows=3, ncols=1, sharex=True, figsize=(6, 5))\n\n plot_samples = ax1.plot(\n X_train, y_train, color=colormap.colors[1], lw=2, label=\"Samples\"\n )[0]\n plot_signal = ax1.plot(\n X_test_binned / 255,\n get_signal(X_test_binned / 255, signal),\n lw=2,\n color=colormap.colors[0],\n label=\"Signal\",\n )[0]\n plot_prediction = ax2.plot(\n X_test.ravel(), y_pred, lw=2, color=colormap.colors[2], label=\"Prediction\"\n )[0]\n # ax3.plot(\n # X_test,\n # weighted_depths[:, 1:],\n # lw=1,\n # color=colormap.colors[5],\n # alpha=0.2,\n # label=\"Weighted depths\",\n # )\n plot_weighted_depths = ax3.plot(\n X_test, weighted_depths.T, lw=1, color=colormap.colors[5], alpha=0.2\n )[0]\n\n plot_mean_weighted_depths = ax3.plot(\n X_test,\n weighted_depths.mean(axis=0),\n lw=2,\n color=colormap.colors[4],\n label=\"Mean weighted depth\",\n )[0]\n filename = \"weighted_depths_%s.pdf\" % signal\n fig.subplots_adjust(hspace=0.1)\n fig.legend(\n (\n plot_signal,\n plot_samples,\n plot_mean_weighted_depths,\n plot_weighted_depths,\n plot_prediction,\n ),\n (\n \"Signal\",\n \"Samples\",\n \"Average weighted depths\",\n \"Weighted depths\",\n \"Prediction\",\n ),\n fontsize=12,\n loc=\"upper center\",\n bbox_to_anchor=(0.5, 1.0),\n ncol=3,\n )\n # plt.savefig(filename)\n logging.info(\"Saved the decision functions in '%s'\" % filename)\n\n\nfor signal in signals:\n plot_weighted_depth(signal)\n\nplt.show()\n" ]
[ [ "matplotlib.pyplot.subplots", "pandas.set_option", "matplotlib.cm.get_cmap", "matplotlib.pyplot.show", "numpy.linspace" ] ]
obkyrush/jax
[ "8662c5f660678b6320a1a8fc46e917e97c399b57" ]
[ "jax/_src/random.py" ]
[ "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nfrom functools import partial\nfrom typing import Any, Optional, Sequence, Union\nimport warnings\n\nimport numpy as np\n\nfrom jax import lax\nfrom jax import core\nfrom jax import numpy as jnp\nfrom jax._src import dtypes\nfrom jax.core import NamedShape\nfrom jax._src.api import jit, vmap\nfrom jax._src.numpy.lax_numpy import _constant_like, _convert_and_clip_integer, _check_arraylike\nfrom jax.lib import xla_bridge\nfrom jax.lib import xla_client\nfrom jax.lib import cuda_prng\nfrom jax.numpy.linalg import cholesky, svd, eigh\nfrom jax.interpreters import ad\nfrom jax.interpreters import batching\nfrom jax.interpreters import xla\nfrom jax._src.util import prod\n\n\nArray = Any\nRealArray = Array\nIntegerArray = Array\n# TODO: Import or define these to match\n# https://github.com/numpy/numpy/blob/main/numpy/typing/_dtype_like.py.\nDTypeLikeInt = Any\nDTypeLikeFloat = Any\n\n\n_UINT_DTYPES = {8: jnp.uint8, 16: jnp.uint16, 32: jnp.uint32, 64: jnp.uint64}\n\n\ndef PRNGKey(seed: int) -> jnp.ndarray:\n \"\"\"Create a pseudo-random number generator (PRNG) key given an integer seed.\n\n Args:\n seed: a 64- or 32-bit integer used as the value of the key.\n\n Returns:\n A PRNG key, which is modeled as an array of shape (2,) and dtype uint32. The\n key is constructed from a 64-bit seed by effectively bit-casting to a pair\n of uint32 values (or from a 32-bit seed by first padding out with zeros).\n \"\"\"\n # Avoid overflowerror in X32 mode by first converting ints to int64.\n # This breaks JIT invariance of PRNGKey for large ints, but supports the\n # common use-case of instantiating PRNGKey with Python hashes in X32 mode.\n if isinstance(seed, int):\n seed_arr = jnp.asarray(np.int64(seed))\n else:\n seed_arr = jnp.asarray(seed)\n if seed_arr.shape:\n raise TypeError(f\"PRNGKey seed must be a scalar; got {seed!r}.\")\n if not np.issubdtype(seed_arr.dtype, np.integer):\n raise TypeError(f\"PRNGKey seed must be an integer; got {seed!r}\")\n\n convert = lambda k: lax.reshape(lax.convert_element_type(k, np.uint32), [1])\n k1 = convert(lax.shift_right_logical(seed_arr, lax._const(seed_arr, 32)))\n k2 = convert(jnp.bitwise_and(seed_arr, np.uint32(0xFFFFFFFF)))\n return lax.concatenate([k1, k2], 0)\n\ndef _is_prng_key(key: jnp.ndarray) -> bool:\n try:\n return key.shape == (2,) and key.dtype == np.uint32\n except AttributeError:\n return False\n\n\n### utilities\n\n\ndef _make_rotate_left(dtype):\n if not jnp.issubdtype(dtype, np.integer):\n raise TypeError(\"_rotate_left only accepts integer dtypes.\")\n nbits = np.array(jnp.iinfo(dtype).bits, dtype)\n\n def _rotate_left(x, d):\n if lax.dtype(d) != dtype:\n d = lax.convert_element_type(d, dtype)\n if lax.dtype(x) != dtype:\n x = lax.convert_element_type(x, dtype)\n return lax.shift_left(x, d) | lax.shift_right_logical(x, nbits - d)\n return _rotate_left\n\n\ndef _bit_stats(bits):\n \"\"\"This is a debugging function to compute the statistics of bit fields.\"\"\"\n return np.array([list(map(int, np.binary_repr(x, 64))) for x in bits]).mean(0)\n\n\n### hash function and split\n\ndef _threefry2x32_abstract_eval(*args):\n if any(a.dtype != jnp.uint32 for a in args):\n raise TypeError(\"Arguments to threefry2x32 must have uint32 type, got {}\"\n .format(args))\n if all(isinstance(arg, core.ShapedArray) for arg in args):\n shape = lax._broadcasting_shape_rule(*args)\n named_shape = core.join_named_shapes(*(a.named_shape for a in args))\n aval = core.ShapedArray(shape, jnp.dtype(jnp.uint32), named_shape=named_shape)\n else:\n aval = core.UnshapedArray(jnp.dtype(jnp.uint32))\n return (aval,) * 2\n\nrotate_left = _make_rotate_left(np.uint32)\n\ndef apply_round(v, rot):\n v = v[:]\n v[0] = v[0] + v[1]\n v[1] = rotate_left(v[1], rot)\n v[1] = v[0] ^ v[1]\n return v\n\ndef rotate_list(xs):\n return xs[1:] + xs[:1]\n\ndef rolled_loop_step(i, state):\n x, ks, rotations = state\n for r in rotations[0]:\n x = apply_round(x, r)\n new_x = [x[0] + ks[0], x[1] + ks[1] + jnp.asarray(i + 1, dtype=np.uint32)]\n return new_x, rotate_list(ks), rotate_list(rotations)\n\ndef _threefry2x32_lowering(key1, key2, x1, x2, use_rolled_loops=True):\n \"\"\"Apply the Threefry 2x32 hash.\n\n Args:\n keypair: a pair of 32bit unsigned integers used for the key.\n count: an array of dtype uint32 used for the counts.\n\n Returns:\n An array of dtype uint32 with the same shape as `count`.\n \"\"\"\n x = [x1, x2]\n\n rotations = [np.array([13, 15, 26, 6], dtype=np.uint32),\n np.array([17, 29, 16, 24], dtype=np.uint32)]\n ks = [key1, key2, key1 ^ key2 ^ np.uint32(0x1BD11BDA)]\n\n x[0] = x[0] + ks[0]\n x[1] = x[1] + ks[1]\n\n if use_rolled_loops:\n x, _, _ = lax.fori_loop(0, 5, rolled_loop_step, (x, rotate_list(ks), rotations))\n\n else:\n for r in rotations[0]:\n x = apply_round(x, r)\n x[0] = x[0] + ks[1]\n x[1] = x[1] + ks[2] + np.uint32(1)\n\n for r in rotations[1]:\n x = apply_round(x, r)\n x[0] = x[0] + ks[2]\n x[1] = x[1] + ks[0] + np.uint32(2)\n\n for r in rotations[0]:\n x = apply_round(x, r)\n x[0] = x[0] + ks[0]\n x[1] = x[1] + ks[1] + np.uint32(3)\n\n for r in rotations[1]:\n x = apply_round(x, r)\n x[0] = x[0] + ks[1]\n x[1] = x[1] + ks[2] + np.uint32(4)\n\n for r in rotations[0]:\n x = apply_round(x, r)\n x[0] = x[0] + ks[2]\n x[1] = x[1] + ks[0] + np.uint32(5)\n\n return tuple(x)\n\n\ndef _threefry2x32_gpu_translation_rule(c, k1, k2, x1, x2):\n shape = lax.broadcast_shapes(\n c.get_shape(k1).dimensions(), c.get_shape(k2).dimensions(),\n c.get_shape(x1).dimensions(), c.get_shape(x2).dimensions())\n rank = len(shape)\n if 0 in shape:\n zeros = xla_client.ops.Broadcast(\n xla_bridge.constant(c, np.array(0, np.uint32)), shape)\n return xla_client.ops.Tuple(c, [zeros, zeros])\n def _broadcast(x):\n ndims = c.get_shape(x).rank()\n return xla_client.ops.BroadcastInDim(x, shape,\n tuple(range(rank - ndims, rank)))\n return cuda_prng.threefry2x32(\n c, (_broadcast(k1), _broadcast(k2)), (_broadcast(x1), _broadcast(x2)))\n\nthreefry2x32_p = core.Primitive(\"threefry2x32\")\nthreefry2x32_p.multiple_results = True\nthreefry2x32_p.def_impl(partial(xla.apply_primitive, threefry2x32_p))\nthreefry2x32_p.def_abstract_eval(_threefry2x32_abstract_eval)\nbatching.defbroadcasting(threefry2x32_p)\nxla.translations_with_avals[threefry2x32_p] = xla.lower_fun(\n partial(_threefry2x32_lowering, use_rolled_loops=False),\n multiple_results=True, with_avals=True)\nxla.backend_specific_translations['cpu'][threefry2x32_p] = xla.lower_fun(\n partial(_threefry2x32_lowering, use_rolled_loops=True),\n multiple_results=True)\nif cuda_prng:\n xla.backend_specific_translations['gpu'][threefry2x32_p] = \\\n _threefry2x32_gpu_translation_rule\n\n@jit\ndef threefry_2x32(keypair, count):\n \"\"\"Apply the Threefry 2x32 hash.\n\n Args:\n keypair: a pair of 32bit unsigned integers used for the key.\n count: an array of dtype uint32 used for the counts.\n\n Returns:\n An array of dtype uint32 with the same shape as `count`.\n \"\"\"\n key1, key2 = keypair\n if not lax.dtype(key1) == lax.dtype(key2) == lax.dtype(count) == np.uint32:\n msg = \"threefry_2x32 requires uint32 arguments, got {}\"\n raise TypeError(msg.format([lax.dtype(x) for x in [key1, key2, count]]))\n\n odd_size = count.size % 2\n if odd_size:\n x = list(jnp.split(jnp.concatenate([count.ravel(), np.uint32([0])]), 2))\n else:\n x = list(jnp.split(count.ravel(), 2))\n\n x = threefry2x32_p.bind(key1, key2, x[0], x[1])\n out = jnp.concatenate(x)\n assert out.dtype == np.uint32\n return lax.reshape(out[:-1] if odd_size else out, count.shape)\n\n\ndef split(key: jnp.ndarray, num: int = 2) -> jnp.ndarray:\n \"\"\"Splits a PRNG key into `num` new keys by adding a leading axis.\n\n Args:\n key: a PRNGKey (an array with shape (2,) and dtype uint32).\n num: optional, a positive integer indicating the number of keys to produce\n (default 2).\n\n Returns:\n An array with shape (num, 2) and dtype uint32 representing `num` new keys.\n \"\"\"\n return _split(key, int(num)) # type: ignore\n\n@partial(jit, static_argnums=(1,))\ndef _split(key, num) -> jnp.ndarray:\n counts = lax.iota(np.uint32, num * 2)\n return lax.reshape(threefry_2x32(key, counts), (num, 2))\n\n\ndef fold_in(key: jnp.ndarray, data: int) -> jnp.ndarray:\n \"\"\"Folds in data to a PRNG key to form a new PRNG key.\n\n Args:\n key: a PRNGKey (an array with shape (2,) and dtype uint32).\n data: a 32bit integer representing data to be folded in to the key.\n\n Returns:\n A new PRNGKey that is a deterministic function of the inputs and is\n statistically safe for producing a stream of new pseudo-random values.\n \"\"\"\n return _fold_in(key, jnp.uint32(data))\n\n@jit\ndef _fold_in(key, data):\n return threefry_2x32(key, PRNGKey(data))\n\n\n@partial(jit, static_argnums=(1, 2))\ndef _random_bits(key, bit_width, shape):\n \"\"\"Sample uniform random bits of given width and shape using PRNG key.\"\"\"\n if not _is_prng_key(key):\n raise TypeError(\"_random_bits got invalid prng key.\")\n if bit_width not in (8, 16, 32, 64):\n raise TypeError(\"requires 8-, 16-, 32- or 64-bit field width.\")\n shape = core.as_named_shape(shape)\n for name, size in shape.named_items:\n real_size = lax.psum(1, name)\n if real_size != size:\n raise ValueError(f\"The shape of axis {name} was specified as {size}, \"\n f\"but it really is {real_size}\")\n axis_index = lax.axis_index(name)\n key = fold_in(key, axis_index)\n size = prod(shape.positional)\n max_count = int(np.ceil(bit_width * size / 32))\n\n nblocks, rem = divmod(max_count, jnp.iinfo(np.uint32).max)\n\n if not nblocks:\n bits = threefry_2x32(key, lax.iota(np.uint32, rem))\n else:\n keys = split(key, nblocks + 1)\n subkeys, last_key = keys[:-1], keys[-1]\n blocks = vmap(threefry_2x32, in_axes=(0, None))(subkeys, lax.iota(np.uint32, jnp.iinfo(np.uint32).max))\n last = threefry_2x32(last_key, lax.iota(np.uint32, rem))\n bits = lax.concatenate([blocks.ravel(), last], 0)\n\n dtype = _UINT_DTYPES[bit_width]\n if bit_width == 64:\n bits = [lax.convert_element_type(x, dtype) for x in jnp.split(bits, 2)]\n bits = lax.shift_left(bits[0], dtype(32)) | bits[1]\n elif bit_width in [8, 16]:\n # this is essentially bits.view(dtype)[:size]\n bits = lax.bitwise_and(\n np.uint32(np.iinfo(dtype).max),\n lax.shift_right_logical(\n lax.broadcast(bits, (1,)),\n lax.mul(\n np.uint32(bit_width),\n lax.broadcasted_iota(np.uint32, (32 // bit_width, 1), 0)\n )\n )\n )\n bits = lax.reshape(bits, (np.uint32(max_count * 32 // bit_width),), (1, 0))\n bits = lax.convert_element_type(bits, dtype)[:size]\n return lax.reshape(bits, shape)\n\n\n### random samplers\n\n\ndef _check_shape(name, shape: Union[Sequence[int], NamedShape], *param_shapes):\n shape = core.as_named_shape(shape)\n\n if param_shapes:\n shape_ = lax.broadcast_shapes(shape.positional, *param_shapes)\n if shape.positional != shape_:\n msg = (\"{} parameter shapes must be broadcast-compatible with shape \"\n \"argument, and the result of broadcasting the shapes must equal \"\n \"the shape argument, but got result {} for shape argument {}.\")\n raise ValueError(msg.format(name, shape_, shape))\n\n\ndef uniform(key: jnp.ndarray,\n shape: Union[Sequence[int], NamedShape] = (),\n dtype: DTypeLikeFloat = dtypes.float_,\n minval: RealArray = 0.,\n maxval: RealArray = 1.) -> jnp.ndarray:\n \"\"\"Sample uniform random values in [minval, maxval) with given shape/dtype.\n\n Args:\n key: a PRNGKey used as the random key.\n shape: optional, a tuple of nonnegative integers representing the result\n shape. Default ().\n dtype: optional, a float dtype for the returned values (default float64 if\n jax_enable_x64 is true, otherwise float32).\n minval: optional, a minimum (inclusive) value broadcast-compatible with shape for the range (default 0).\n maxval: optional, a maximum (exclusive) value broadcast-compatible with shape for the range (default 1).\n\n Returns:\n A random array with the specified shape and dtype.\n \"\"\"\n if not dtypes.issubdtype(dtype, np.floating):\n raise ValueError(f\"dtype argument to `uniform` must be a float dtype, \"\n f\"got {dtype}\")\n dtype = dtypes.canonicalize_dtype(dtype)\n shape = core.as_named_shape(shape)\n return _uniform(key, shape, dtype, minval, maxval) # type: ignore\n\n@partial(jit, static_argnums=(1, 2))\ndef _uniform(key, shape, dtype, minval, maxval) -> jnp.ndarray:\n _check_shape(\"uniform\", shape)\n if not jnp.issubdtype(dtype, np.floating):\n raise TypeError(\"uniform only accepts floating point dtypes.\")\n\n minval = lax.convert_element_type(minval, dtype)\n maxval = lax.convert_element_type(maxval, dtype)\n minval = lax.broadcast_to_rank(minval, shape.positional_rank)\n maxval = lax.broadcast_to_rank(maxval, shape.positional_rank)\n\n finfo = jnp.finfo(dtype)\n nbits, nmant = finfo.bits, finfo.nmant\n\n if nbits not in (16, 32, 64):\n raise TypeError(\"uniform only accepts 32- or 64-bit dtypes.\")\n\n bits = _random_bits(key, nbits, shape)\n\n # The strategy here is to randomize only the mantissa bits with an exponent of\n # 1 (after applying the bias), then shift and scale to the desired range. The\n # bit-level transformation we use relies on Numpy and XLA having bit-for-bit\n # equivalent float representations, which might not be true on all platforms.\n float_bits = lax.bitwise_or(\n lax.shift_right_logical(bits, np.array(nbits - nmant, lax.dtype(bits))),\n np.array(1., dtype).view(_UINT_DTYPES[nbits]))\n floats = lax.bitcast_convert_type(float_bits, dtype) - np.array(1., dtype)\n return lax.max(\n minval,\n lax.reshape(floats * (maxval - minval) + minval, shape.positional))\n\n\ndef randint(key: jnp.ndarray,\n shape: Sequence[int],\n minval: IntegerArray,\n maxval: IntegerArray,\n dtype: DTypeLikeInt = dtypes.int_):\n \"\"\"Sample uniform random values in [minval, maxval) with given shape/dtype.\n\n Args:\n key: a PRNGKey used as the random key.\n shape: a tuple of nonnegative integers representing the shape.\n minval: int or array of ints broadcast-compatible with ``shape``, a minimum\n (inclusive) value for the range.\n maxval: int or array of ints broadcast-compatible with ``shape``, a maximum\n (exclusive) value for the range.\n dtype: optional, an int dtype for the returned values (default int64 if\n jax_enable_x64 is true, otherwise int32).\n\n Returns:\n A random array with the specified shape and dtype.\n \"\"\"\n dtype = dtypes.canonicalize_dtype(dtype)\n shape = core.canonicalize_shape(shape)\n return _randint(key, shape, minval, maxval, dtype)\n\n@partial(jit, static_argnums=(1, 4))\ndef _randint(key, shape, minval, maxval, dtype):\n _check_shape(\"randint\", shape, np.shape(minval), np.shape(maxval))\n if not jnp.issubdtype(dtype, np.integer):\n raise TypeError(f\"randint only accepts integer dtypes, got {dtype}\")\n\n _check_arraylike(\"randint\", minval, maxval)\n minval = jnp.asarray(minval)\n maxval = jnp.asarray(maxval)\n if not jnp.issubdtype(minval.dtype, np.integer):\n minval = minval.astype(int)\n if not jnp.issubdtype(maxval.dtype, np.integer):\n maxval = maxval.astype(int)\n\n # Flag where maxval is greater than the maximum value of dtype\n # in order to handle cases like randint(key, shape, 0, 256, 'uint8')\n maxval_out_of_range = lax.gt(\n maxval, _convert_and_clip_integer(jnp.array(jnp.iinfo(dtype).max, dtype), maxval.dtype))\n\n minval = _convert_and_clip_integer(minval, dtype)\n maxval = _convert_and_clip_integer(maxval, dtype)\n minval = lax.broadcast_to_rank(minval, len(shape))\n maxval = lax.broadcast_to_rank(maxval, len(shape))\n nbits = jnp.iinfo(dtype).bits\n\n if nbits not in (8, 16, 32, 64):\n raise TypeError(f\"randint only accepts 8-, 16-, 32-, or 64-bit dtypes, got {dtype}\")\n\n # This algorithm is biased whenever (maxval - minval) is not a power of 2.\n # We generate double the number of random bits required by the dtype so as to\n # reduce that bias.\n k1, k2 = split(key)\n rbits = lambda key: _random_bits(key, nbits, shape)\n higher_bits, lower_bits = rbits(k1), rbits(k2)\n\n unsigned_dtype = _UINT_DTYPES[nbits]\n span = lax.convert_element_type(maxval - minval, unsigned_dtype)\n\n # Ensure that span=1 when maxval <= minval, so minval is always returned;\n # https://github.com/google/jax/issues/222\n span = lax.select(maxval <= minval, lax.full_like(span, 1), span)\n\n # When maxval is out of range, the span has to be one larger.\n # If span is already the maximum representable value, this will wrap to zero,\n # causing remainders below to have no effect, which is the correct semantics.\n span = lax.select(\n maxval_out_of_range & (maxval > minval),\n lax.add(span, lax._const(span, 1)),\n span)\n\n # To compute a remainder operation on an integer that might have twice as many\n # bits as we can represent in the native unsigned dtype, we compute a\n # multiplier equal to 2**nbits % span. To avoid overflow, we use the identity:\n # (a * b) % N = [(a % N) * (b % N)] % N\n multiplier = lax.rem(lax._const(span, 2 ** (nbits // 2)), span)\n multiplier = lax.rem(lax.mul(multiplier, multiplier), span)\n\n random_offset = lax.add(lax.mul(lax.rem(higher_bits, span), multiplier),\n lax.rem(lower_bits, span))\n random_offset = lax.rem(random_offset, span)\n return lax.add(minval, lax.convert_element_type(random_offset, dtype))\n\n\ndef shuffle(key: jnp.ndarray, x: Array, axis: int = 0) -> jnp.ndarray:\n \"\"\"Shuffle the elements of an array uniformly at random along an axis.\n\n Args:\n key: a PRNGKey used as the random key.\n x: the array to be shuffled.\n axis: optional, an int axis along which to shuffle (default 0).\n\n Returns:\n A shuffled version of x.\n \"\"\"\n msg = (\"jax.random.shuffle is deprecated and will be removed in a future release. \"\n \"Use jax.random.permutation\")\n warnings.warn(msg, FutureWarning)\n return _shuffle(key, x, axis) # type: ignore\n\n\ndef permutation(key: jnp.ndarray, x: Array) -> jnp.ndarray:\n \"\"\"\n Permute elements of an array along its first axis or return a permuted range.\n\n If `x` is a multi-dimensional array, it is only shuffled along its\n first index.\n\n Args:n\n key: a PRNGKey used as the random key.\n x: the array or integer range to be shuffled.\n\n Returns:\n A shuffled version of x or array range\n \"\"\"\n if not np.ndim(x):\n # scalar case, must be a concrete integer\n if not np.issubdtype(lax.dtype(x), np.integer):\n raise TypeError(\"x must be an integer or at least 1-dimensional\")\n x = int(x) # type: ignore[assignment]\n return _shuffle(key, jnp.arange(x), 0)\n elif np.ndim(x) == 1:\n return _shuffle(key, x, 0)\n else:\n assert isinstance(x, jnp.ndarray)\n ind = _shuffle(key, jnp.arange(x.shape[0]), 0) # type: ignore[attribute-error]\n return x[ind]\n\n\n@partial(jit, static_argnums=(2,))\ndef _shuffle(key, x, axis) -> jnp.ndarray:\n # On parallel architectures, Fisher-Yates is more expensive than doing\n # multiple sorts. This algorithm is based on one developed and analyzed by\n # tjablin@. We sort according to randomly-generated 32bit keys, but those keys\n # may have collisions. If we repeat the process, using fresh 32bit keys for\n # each sort, then whenever all pairs of elements have been assigned distinct\n # keys at some iteration (or equivalently when the strings formed by\n # concatenating the successive keys for each element are all distinct) then we\n # are guaranteed to have a perfect sample (assuming that either the sort is\n # stable or that any bias is not value-dependent). Since checking uniqueness\n # at runtime may be expensive, we use a heuristic static stop criterion\n # developed by tjablin@. See tensorflow/compiler/tf2xla/random_ops.cc for more\n # info, and for the original implementation of this algorithm. See also\n # Section 2 of http://people.csail.mit.edu/costis/6896sp11/lec5s.pdf for\n # another analysis (where the keys are generated one bit at a time).\n exponent = 3 # see tjablin@'s analysis for explanation of this parameter\n uint32max = jnp.iinfo(np.uint32).max\n num_rounds = int(np.ceil(exponent * np.log(max(1, x.size)) / np.log(uint32max)))\n\n for _ in range(num_rounds):\n key, subkey = split(key)\n sort_keys = _random_bits(subkey, 32, x.shape)\n _, x = lax.sort_key_val(sort_keys, x, axis)\n\n return x\n\n\ndef choice(key: jnp.ndarray,\n a: IntegerArray,\n shape: Sequence[int] = (),\n replace: bool = True,\n p=None) -> jnp.ndarray:\n \"\"\"Generates a random sample from a given 1-D array.\n\n Args:\n key: a PRNGKey used as the random key.\n a : 1D array or int. If an ndarray, a random sample is generated from\n its elements. If an int, the random sample is generated as if a were\n arange(a).\n shape : tuple of ints, optional. Output shape. If the given shape is,\n e.g., ``(m, n)``, then ``m * n`` samples are drawn. Default is (),\n in which case a single value is returned.\n replace : boolean. Whether the sample is with or without replacement.\n default is True.\n p : 1-D array-like, The probabilities associated with each entry in a.\n If not given the sample assumes a uniform distribution over all\n entries in a.\n\n Returns:\n An array of shape `shape` containing samples from `a`.\n \"\"\"\n if not isinstance(shape, Sequence):\n raise TypeError(\"shape argument of jax.random.choice must be a sequence, \"\n f\"got {shape}\")\n if np.ndim(a) not in [0, 1]:\n raise ValueError(\"a must be an integer or 1-dimensional\")\n _check_arraylike(\"choice\", a)\n if np.ndim(a) == 0:\n a = core.concrete_or_error(int, a, \"The error occurred in jax.random.choice()\")\n else:\n a = jnp.asarray(a)\n n_inputs = int(a) if np.ndim(a) == 0 else len(a) # type: ignore[arg-type]\n n_draws = prod(shape)\n if n_draws == 0:\n return jnp.zeros(shape, dtype=lax.dtype(a))\n if n_inputs <= 0:\n raise ValueError(\"a must be greater than 0 unless no samples are taken\")\n if not replace and n_draws > n_inputs:\n raise ValueError(\"Cannot take a larger sample than population when 'replace=False'\")\n\n if p is None:\n if replace:\n ind = randint(key, shape, 0, n_inputs)\n result = ind if np.ndim(a) == 0 else a[ind] # type: ignore[index]\n else:\n result = permutation(key, a)[:n_draws]\n else:\n if p.shape != (n_inputs,):\n raise ValueError(\"p must be None or match the shape of a\")\n if replace:\n p_cuml = jnp.cumsum(p)\n r = p_cuml[-1] * (1 - uniform(key, shape))\n ind = jnp.searchsorted(p_cuml, r)\n result = ind if np.ndim(a) == 0 else a[ind] # type: ignore[index]\n else:\n # Gumbel top-k trick: https://timvieira.github.io/blog/post/2019/09/16/algorithms-for-sampling-without-replacement/\n g = -gumbel(key, (n_inputs,)) - jnp.log(p)\n ind = jnp.argsort(g)[:n_draws]\n result = ind if np.ndim(a) == 0 else a[ind] # type: ignore[index]\n return result.reshape(shape)\n\n\ndef normal(key: jnp.ndarray,\n shape: Union[Sequence[int], NamedShape] = (),\n dtype: DTypeLikeFloat = dtypes.float_) -> jnp.ndarray:\n \"\"\"Sample standard normal random values with given shape and float dtype.\n\n Args:\n key: a PRNGKey used as the random key.\n shape: optional, a tuple of nonnegative integers representing the result\n shape. Default ().\n dtype: optional, a float dtype for the returned values (default float64 if\n jax_enable_x64 is true, otherwise float32).\n\n Returns:\n A random array with the specified shape and dtype.\n \"\"\"\n if not dtypes.issubdtype(dtype, np.inexact):\n raise ValueError(f\"dtype argument to `normal` must be a float or complex dtype, \"\n f\"got {dtype}\")\n dtype = dtypes.canonicalize_dtype(dtype)\n shape = core.as_named_shape(shape)\n return _normal(key, shape, dtype) # type: ignore\n\n@partial(jit, static_argnums=(1, 2))\ndef _normal(key, shape, dtype) -> jnp.ndarray:\n if dtypes.issubdtype(dtype, np.complexfloating):\n sqrt2 = np.array(np.sqrt(2), dtype)\n\n key_re, key_im = split(key)\n real_dtype = np.array(0, dtype).real.dtype\n _re = _normal_real(key_re, shape, real_dtype)\n _im = _normal_real(key_im, shape, real_dtype)\n return (_re + 1j * _im) / sqrt2\n else:\n return _normal_real(key, shape, dtype) # type: ignore\n\n@partial(jit, static_argnums=(1, 2))\ndef _normal_real(key, shape, dtype) -> jnp.ndarray:\n _check_shape(\"normal\", shape)\n lo = np.nextafter(np.array(-1., dtype), np.array(0., dtype), dtype=dtype)\n hi = np.array(1., dtype)\n u = uniform(key, shape, dtype, lo, hi) # type: ignore[arg-type]\n return np.array(np.sqrt(2), dtype) * lax.erf_inv(u)\n\n\ndef multivariate_normal(key: jnp.ndarray,\n mean: RealArray,\n cov: RealArray,\n shape: Optional[Sequence[int]] = None,\n dtype: DTypeLikeFloat = dtypes.float_,\n method: str = 'cholesky') -> jnp.ndarray:\n \"\"\"Sample multivariate normal random values with given mean and covariance.\n\n Args:\n key: a PRNGKey used as the random key.\n mean: a mean vector of shape ``(..., n)``.\n cov: a positive definite covariance matrix of shape ``(..., n, n)``. The\n batch shape ``...`` must be broadcast-compatible with that of ``mean``.\n shape: optional, a tuple of nonnegative integers specifying the result\n batch shape; that is, the prefix of the result shape excluding the last\n axis. Must be broadcast-compatible with ``mean.shape[:-1]`` and\n ``cov.shape[:-2]``. The default (None) produces a result batch shape by\n broadcasting together the batch shapes of ``mean`` and ``cov``.\n dtype: optional, a float dtype for the returned values (default float64 if\n jax_enable_x64 is true, otherwise float32).\n method: optional, a method to compute the factor of ``cov``.\n Must be one of 'svd', eigh, and 'cholesky'. Default 'cholesky'.\n Returns:\n A random array with the specified dtype and shape given by\n ``shape + mean.shape[-1:]`` if ``shape`` is not None, or else\n ``broadcast_shapes(mean.shape[:-1], cov.shape[:-2]) + mean.shape[-1:]``.\n \"\"\"\n if method not in {'svd', 'eigh', 'cholesky'}:\n raise ValueError(\"method must be one of {'svd', 'eigh', 'cholesky'}\")\n if not dtypes.issubdtype(dtype, np.floating):\n raise ValueError(f\"dtype argument to `multivariate_normal` must be a float \"\n f\"dtype, got {dtype}\")\n dtype = dtypes.canonicalize_dtype(dtype)\n if shape is not None:\n shape = core.canonicalize_shape(shape)\n return _multivariate_normal(key, mean, cov, shape, dtype, method) # type: ignore\n\n@partial(jit, static_argnums=(3, 4, 5))\ndef _multivariate_normal(key, mean, cov, shape, dtype, method) -> jnp.ndarray:\n if not np.ndim(mean) >= 1:\n msg = \"multivariate_normal requires mean.ndim >= 1, got mean.ndim == {}\"\n raise ValueError(msg.format(np.ndim(mean)))\n if not np.ndim(cov) >= 2:\n msg = \"multivariate_normal requires cov.ndim >= 2, got cov.ndim == {}\"\n raise ValueError(msg.format(np.ndim(cov)))\n n = mean.shape[-1]\n if np.shape(cov)[-2:] != (n, n):\n msg = (\"multivariate_normal requires cov.shape == (..., n, n) for n={n}, \"\n \"but got cov.shape == {shape}.\")\n raise ValueError(msg.format(n=n, shape=np.shape(cov)))\n\n if shape is None:\n shape = lax.broadcast_shapes(mean.shape[:-1], cov.shape[:-2])\n else:\n _check_shape(\"normal\", shape, mean.shape[:-1], cov.shape[:-2])\n\n if method == 'svd':\n (u, s, _) = svd(cov)\n factor = u * jnp.sqrt(s)\n elif method == 'eigh':\n (w, v) = eigh(cov)\n factor = v * jnp.sqrt(w)\n else: # 'cholesky'\n factor = cholesky(cov)\n normal_samples = normal(key, shape + mean.shape[-1:], dtype)\n return mean + jnp.einsum('...ij,...j->...i', factor, normal_samples)\n\n\ndef truncated_normal(key: jnp.ndarray,\n lower: RealArray,\n upper: RealArray,\n shape: Optional[Union[Sequence[int], NamedShape]] = None,\n dtype: DTypeLikeFloat = dtypes.float_) -> jnp.ndarray:\n \"\"\"Sample truncated standard normal random values with given shape and dtype.\n\n Args:\n key: a PRNGKey used as the random key.\n lower: a float or array of floats representing the lower bound for\n truncation. Must be broadcast-compatible with ``upper``.\n upper: a float or array of floats representing the upper bound for\n truncation. Must be broadcast-compatible with ``lower``.\n shape: optional, a tuple of nonnegative integers specifying the result\n shape. Must be broadcast-compatible with ``lower`` and ``upper``. The\n default (None) produces a result shape by broadcasting ``lower`` and\n ``upper``.\n dtype: optional, a float dtype for the returned values (default float64 if\n jax_enable_x64 is true, otherwise float32).\n\n Returns:\n A random array with the specified dtype and shape given by ``shape`` if\n ``shape`` is not None, or else by broadcasting ``lower`` and ``upper``.\n Returns values in the open interval ``(lower, upper)``.\n \"\"\"\n if not dtypes.issubdtype(dtype, np.floating):\n raise ValueError(f\"dtype argument to `truncated_normal` must be a float \"\n f\"dtype, got {dtype}\")\n dtype = dtypes.canonicalize_dtype(dtype)\n if shape is not None:\n shape = core.as_named_shape(shape)\n return _truncated_normal(key, lower, upper, shape, dtype) # type: ignore\n\n@partial(jit, static_argnums=(3, 4))\ndef _truncated_normal(key, lower, upper, shape, dtype) -> jnp.ndarray:\n if shape is None:\n shape = lax.broadcast_shapes(np.shape(lower), np.shape(upper))\n else:\n _check_shape(\"truncated_normal\", shape, np.shape(lower), np.shape(upper))\n\n sqrt2 = np.array(np.sqrt(2), dtype)\n lower = lax.convert_element_type(lower, dtype)\n upper = lax.convert_element_type(upper, dtype)\n a = lax.erf(lower / sqrt2)\n b = lax.erf(upper / sqrt2)\n if not jnp.issubdtype(dtype, np.floating):\n raise TypeError(\"truncated_normal only accepts floating point dtypes.\")\n u = uniform(key, shape, dtype, minval=a, maxval=b)\n out = sqrt2 * lax.erf_inv(u)\n # Clamp the value to the open interval (lower, upper) to make sure that\n # rounding (or if we chose `a` for `u`) doesn't push us outside of the range.\n return jnp.clip(\n out,\n lax.nextafter(lax.stop_gradient(lower), np.array(np.inf, dtype=dtype)),\n lax.nextafter(lax.stop_gradient(upper), np.array(-np.inf, dtype=dtype)))\n\n\ndef bernoulli(key: jnp.ndarray,\n p: RealArray = np.float32(0.5),\n shape: Optional[Union[Sequence[int], NamedShape]] = None) -> jnp.ndarray:\n \"\"\"Sample Bernoulli random values with given shape and mean.\n\n Args:\n key: a PRNGKey used as the random key.\n p: optional, a float or array of floats for the mean of the random\n variables. Must be broadcast-compatible with ``shape``. Default 0.5.\n shape: optional, a tuple of nonnegative integers representing the result\n shape. Must be broadcast-compatible with ``p.shape``. The default (None)\n produces a result shape equal to ``p.shape``.\n\n Returns:\n A random array with boolean dtype and shape given by ``shape`` if ``shape``\n is not None, or else ``p.shape``.\n \"\"\"\n dtype = dtypes.canonicalize_dtype(lax.dtype(p))\n if shape is not None:\n shape = core.as_named_shape(shape)\n if not jnp.issubdtype(dtype, np.floating):\n msg = \"bernoulli probability `p` must have a floating dtype, got {}.\"\n raise TypeError(msg.format(dtype))\n p = lax.convert_element_type(p, dtype)\n return _bernoulli(key, p, shape) # type: ignore\n\n@partial(jit, static_argnums=(2,))\ndef _bernoulli(key, p, shape) -> jnp.ndarray:\n if shape is None:\n # TODO: Use the named part of `p` as well\n shape = np.shape(p)\n else:\n _check_shape(\"bernoulli\", shape, np.shape(p))\n\n return uniform(key, shape, lax.dtype(p)) < p\n\n\ndef beta(key: jnp.ndarray,\n a: RealArray,\n b: RealArray,\n shape: Optional[Sequence[int]] = None,\n dtype: DTypeLikeFloat = dtypes.float_) -> jnp.ndarray:\n \"\"\"Sample Beta random values with given shape and float dtype.\n\n Args:\n key: a PRNGKey used as the random key.\n a: a float or array of floats broadcast-compatible with ``shape``\n representing the first parameter \"alpha\".\n b: a float or array of floats broadcast-compatible with ``shape``\n representing the second parameter \"beta\".\n shape: optional, a tuple of nonnegative integers specifying the result\n shape. Must be broadcast-compatible with ``a`` and ``b``. The default\n (None) produces a result shape by broadcasting ``a`` and ``b``.\n dtype: optional, a float dtype for the returned values (default float64 if\n jax_enable_x64 is true, otherwise float32).\n\n Returns:\n A random array with the specified dtype and shape given by ``shape`` if\n ``shape`` is not None, or else by broadcasting ``a`` and ``b``.\n \"\"\"\n if not dtypes.issubdtype(dtype, np.floating):\n raise ValueError(f\"dtype argument to `beta` must be a float \"\n f\"dtype, got {dtype}\")\n dtype = dtypes.canonicalize_dtype(dtype)\n if shape is not None:\n shape = core.canonicalize_shape(shape)\n return _beta(key, a, b, shape, dtype)\n\ndef _beta(key, a, b, shape, dtype):\n if shape is None:\n shape = lax.broadcast_shapes(np.shape(a), np.shape(b))\n else:\n _check_shape(\"beta\", shape, np.shape(a), np.shape(b))\n\n a = lax.convert_element_type(a, dtype)\n b = lax.convert_element_type(b, dtype)\n key_a, key_b = split(key)\n a = jnp.broadcast_to(a, shape)\n b = jnp.broadcast_to(b, shape)\n gamma_a = gamma(key_a, a, shape, dtype)\n gamma_b = gamma(key_b, b, shape, dtype)\n return gamma_a / (gamma_a + gamma_b)\n\n\ndef cauchy(key: jnp.ndarray,\n shape: Sequence[int] = (),\n dtype: DTypeLikeFloat = dtypes.float_) -> jnp.ndarray:\n \"\"\"Sample Cauchy random values with given shape and float dtype.\n\n Args:\n key: a PRNGKey used as the random key.\n shape: optional, a tuple of nonnegative integers representing the result\n shape. Default ().\n dtype: optional, a float dtype for the returned values (default float64 if\n jax_enable_x64 is true, otherwise float32).\n\n Returns:\n A random array with the specified shape and dtype.\n \"\"\"\n if not dtypes.issubdtype(dtype, np.floating):\n raise ValueError(f\"dtype argument to `cauchy` must be a float \"\n f\"dtype, got {dtype}\")\n dtype = dtypes.canonicalize_dtype(dtype)\n shape = core.canonicalize_shape(shape)\n return _cauchy(key, shape, dtype)\n\n@partial(jit, static_argnums=(1, 2))\ndef _cauchy(key, shape, dtype):\n _check_shape(\"cauchy\", shape)\n u = uniform(key, shape, dtype, minval=jnp.finfo(dtype).eps, maxval=1.)\n pi = _constant_like(u, np.pi)\n return lax.tan(lax.mul(pi, lax.sub(u, _constant_like(u, 0.5))))\n\n\ndef dirichlet(key: jnp.ndarray,\n alpha: RealArray,\n shape: Optional[Sequence[int]] = None,\n dtype: DTypeLikeFloat = dtypes.float_) -> jnp.ndarray:\n \"\"\"Sample Dirichlet random values with given shape and float dtype.\n\n Args:\n key: a PRNGKey used as the random key.\n alpha: an array of shape ``(..., n)`` used as the concentration\n parameter of the random variables.\n shape: optional, a tuple of nonnegative integers specifying the result\n batch shape; that is, the prefix of the result shape excluding the last\n element of value ``n``. Must be broadcast-compatible with\n ``alpha.shape[:-1]``. The default (None) produces a result shape equal to\n ``alpha.shape``.\n dtype: optional, a float dtype for the returned values (default float64 if\n jax_enable_x64 is true, otherwise float32).\n\n Returns:\n A random array with the specified dtype and shape given by\n ``shape + (alpha.shape[-1],)`` if ``shape`` is not None, or else\n ``alpha.shape``.\n \"\"\"\n if not dtypes.issubdtype(dtype, np.floating):\n raise ValueError(f\"dtype argument to `dirichlet` must be a float \"\n f\"dtype, got {dtype}\")\n dtype = dtypes.canonicalize_dtype(dtype)\n if shape is not None:\n shape = core.canonicalize_shape(shape)\n return _dirichlet(key, alpha, shape, dtype)\n\n@partial(jit, static_argnums=(2, 3))\ndef _dirichlet(key, alpha, shape, dtype):\n if not np.ndim(alpha) >= 1:\n msg = \"dirichlet requires alpha.ndim >= 1, got alpha.ndim == {}\"\n raise ValueError(msg.format(np.ndim(alpha)))\n\n if shape is None:\n shape = np.shape(alpha)[:-1]\n else:\n _check_shape(\"dirichlet\", shape, np.shape(alpha)[:-1])\n\n alpha = lax.convert_element_type(alpha, dtype)\n gamma_samples = gamma(key, alpha, shape + np.shape(alpha)[-1:], dtype)\n return gamma_samples / jnp.sum(gamma_samples, axis=-1, keepdims=True)\n\n\ndef exponential(key: jnp.ndarray,\n shape: Sequence[int] = (),\n dtype: DTypeLikeFloat = dtypes.float_) -> jnp.ndarray:\n \"\"\"Sample Exponential random values with given shape and float dtype.\n\n Args:\n key: a PRNGKey used as the random key.\n shape: optional, a tuple of nonnegative integers representing the result\n shape. Default ().\n dtype: optional, a float dtype for the returned values (default float64 if\n jax_enable_x64 is true, otherwise float32).\n\n Returns:\n A random array with the specified shape and dtype.\n \"\"\"\n if not dtypes.issubdtype(dtype, np.floating):\n raise ValueError(f\"dtype argument to `exponential` must be a float \"\n f\"dtype, got {dtype}\")\n dtype = dtypes.canonicalize_dtype(dtype)\n shape = core.canonicalize_shape(shape)\n return _exponential(key, shape, dtype)\n\n@partial(jit, static_argnums=(1, 2))\ndef _exponential(key, shape, dtype):\n _check_shape(\"exponential\", shape)\n u = uniform(key, shape, dtype)\n # taking 1 - u to move the domain of log to (0, 1] instead of [0, 1)\n return lax.neg(lax.log1p(lax.neg(u)))\n\n\ndef _gamma_one(key, alpha):\n # Ref: A simple method for generating gamma variables, George Marsaglia and Wai Wan Tsang\n # The algorithm can also be founded in:\n # https://en.wikipedia.org/wiki/Gamma_distribution#Generating_gamma-distributed_random_variables\n zero = _constant_like(alpha, 0)\n one = _constant_like(alpha, 1)\n minus_one = _constant_like(alpha, -1)\n one_over_two = _constant_like(alpha, 0.5)\n one_over_three = _constant_like(alpha, 1. / 3.)\n squeeze_const = _constant_like(alpha, 0.0331)\n dtype = lax.dtype(alpha)\n\n key, subkey = split(key)\n # for alpha < 1, we boost alpha to alpha + 1 and get a sample according to\n # Gamma(alpha) ~ Gamma(alpha+1) * Uniform()^(1 / alpha)\n boost = lax.select(lax.ge(alpha, one),\n one,\n lax.pow(uniform(subkey, (), dtype=dtype), lax.div(one, alpha)))\n alpha = lax.select(lax.ge(alpha, one), alpha, lax.add(alpha, one))\n\n d = lax.sub(alpha, one_over_three)\n c = lax.div(one_over_three, lax.sqrt(d))\n\n def _cond_fn(kXVU):\n _, X, V, U = kXVU\n # TODO: use lax.cond when its batching rule is supported\n # The reason is to avoid evaluating second condition which involves log+log\n # if the first condition is satisfied\n cond = lax.bitwise_and(lax.ge(U, lax.sub(one, lax.mul(squeeze_const, lax.mul(X, X)))),\n lax.ge(lax.log(U), lax.add(lax.mul(X, one_over_two),\n lax.mul(d, lax.add(lax.sub(one, V),\n lax.log(V))))))\n return cond\n\n def _body_fn(kXVU):\n def _next_kxv(kxv):\n key = kxv[0]\n key, subkey = split(key)\n x = normal(subkey, (), dtype=dtype)\n v = lax.add(one, lax.mul(x, c))\n return key, x, v\n\n key = kXVU[0]\n key, x_key, U_key = split(key, 3)\n _, x, v = lax.while_loop(lambda kxv: lax.le(kxv[2], zero), _next_kxv, (x_key, zero, minus_one))\n X = lax.mul(x, x)\n V = lax.mul(lax.mul(v, v), v)\n U = uniform(U_key, (), dtype=dtype)\n return key, X, V, U\n\n # initial state is chosen such that _cond_fn will return True\n _, _, V, _ = lax.while_loop(_cond_fn, _body_fn, (key, zero, one, _constant_like(alpha, 2)))\n z = lax.mul(lax.mul(d, V), boost)\n return lax.select(lax.eq(z, zero), jnp.finfo(z.dtype).tiny, z)\n\n\ndef _gamma_grad(sample, a):\n samples = jnp.reshape(sample, -1)\n alphas = jnp.reshape(a, -1)\n if xla_bridge.get_backend().platform == 'cpu':\n grads = lax.map(lambda args: lax.random_gamma_grad(*args), (alphas, samples))\n else:\n grads = vmap(lax.random_gamma_grad)(alphas, samples)\n return grads.reshape(np.shape(a))\n\ndef _gamma_impl(key, a, use_vmap=False):\n a_shape = jnp.shape(a)\n # split key to match the shape of a\n key_ndim = jnp.ndim(key) - 1\n key = jnp.reshape(key, (-1, 2))\n key = vmap(split, in_axes=(0, None))(key, prod(a_shape[key_ndim:]))\n keys = jnp.reshape(key, (-1, 2))\n alphas = jnp.reshape(a, -1)\n if use_vmap:\n samples = vmap(_gamma_one)(keys, alphas)\n else:\n samples = lax.map(lambda args: _gamma_one(*args), (keys, alphas))\n\n return jnp.reshape(samples, a_shape)\n\ndef _gamma_batching_rule(batched_args, batch_dims):\n k, a = batched_args\n bk, ba = batch_dims\n size = next(t.shape[i] for t, i in zip(batched_args, batch_dims) if i is not None)\n k = batching.bdim_at_front(k, bk, size)\n a = batching.bdim_at_front(a, ba, size)\n return random_gamma_p.bind(k, a), 0\n\nrandom_gamma_p = core.Primitive('random_gamma')\nrandom_gamma_p.def_impl(_gamma_impl)\nrandom_gamma_p.def_abstract_eval(lambda key, a: core.raise_to_shaped(a))\nad.defjvp2(random_gamma_p, None, lambda tangent, ans, key, a: tangent * _gamma_grad(ans, a))\nxla.translations_with_avals[random_gamma_p] = xla.lower_fun(\n partial(_gamma_impl, use_vmap=True),\n multiple_results=False, with_avals=True)\nxla.backend_specific_translations['cpu'][random_gamma_p] = xla.lower_fun(\n partial(_gamma_impl, use_vmap=False),\n multiple_results=False)\nbatching.primitive_batchers[random_gamma_p] = _gamma_batching_rule\n\ndef gamma(key: jnp.ndarray,\n a: RealArray,\n shape: Optional[Sequence[int]] = None,\n dtype: DTypeLikeFloat = dtypes.float_) -> jnp.ndarray:\n \"\"\"Sample Gamma random values with given shape and float dtype.\n\n Args:\n key: a PRNGKey used as the random key.\n a: a float or array of floats broadcast-compatible with ``shape``\n representing the parameter of the distribution.\n shape: optional, a tuple of nonnegative integers specifying the result\n shape. Must be broadcast-compatible with ``a``. The default (None)\n produces a result shape equal to ``a.shape``.\n dtype: optional, a float dtype for the returned values (default float64 if\n jax_enable_x64 is true, otherwise float32).\n\n Returns:\n A random array with the specified dtype and with shape given by ``shape`` if\n ``shape`` is not None, or else by ``a.shape``.\n \"\"\"\n if not dtypes.issubdtype(dtype, np.floating):\n raise ValueError(f\"dtype argument to `gamma` must be a float \"\n f\"dtype, got {dtype}\")\n dtype = dtypes.canonicalize_dtype(dtype)\n if shape is not None:\n shape = core.canonicalize_shape(shape)\n return _gamma(key, a, shape, dtype)\n\n@partial(jit, static_argnums=(2, 3))\ndef _gamma(key, a, shape, dtype):\n if shape is None:\n shape = np.shape(a)\n else:\n _check_shape(\"gamma\", shape, np.shape(a))\n\n a = lax.convert_element_type(a, dtype)\n if np.shape(a) != shape:\n a = jnp.broadcast_to(a, shape)\n return random_gamma_p.bind(key, a)\n\n\n@partial(jit, static_argnums=(2, 3, 4))\ndef _poisson_knuth(key, lam, shape, dtype, max_iters):\n # Knuth's algorithm for generating Poisson random variates.\n # Reference:\n # https://en.wikipedia.org/wiki/Poisson_distribution#Generating_Poisson-distributed_random_variables\n\n def body_fn(carry):\n i, k, rng, log_prod = carry\n rng, subkey = split(rng)\n k = lax.select(log_prod > -lam, k + 1, k)\n u = uniform(subkey, shape, np.float32)\n return i + 1, k, rng, log_prod + jnp.log(u)\n\n def cond_fn(carry):\n i, log_prod = carry[0], carry[3]\n return (log_prod > -lam).any() & (i < max_iters)\n\n k_init = lax.full_like(lam, 0, dtype, shape)\n log_rate_init = lax.full_like(lam, 0, np.float32, shape)\n k = lax.while_loop(cond_fn, body_fn, (0, k_init, key, log_rate_init))[1]\n return (k - 1).astype(dtype)\n\n\n@partial(jit, static_argnums=(2, 3, 4))\ndef _poisson_rejection(key, lam, shape, dtype, max_iters):\n # Transformed rejection due to Hormann.\n # Reference:\n # http://citeseer.ist.psu.edu/viewdoc/citations;jsessionid=1BEB35946CC807879F55D42512E5490C?doi=10.1.1.48.3054.\n log_lam = lax.log(lam)\n b = 0.931 + 2.53 * lax.sqrt(lam)\n a = -0.059 + 0.02483 * b\n inv_alpha = 1.1239 + 1.1328 / (b - 3.4)\n v_r = 0.9277 - 3.6224 / (b - 2)\n\n def body_fn(carry):\n i, k_out, accepted, key = carry\n key, subkey_0, subkey_1 = split(key, 3)\n\n u = uniform(subkey_0, shape, lam.dtype) - 0.5\n v = uniform(subkey_1, shape, lam.dtype)\n u_shifted = 0.5 - abs(u)\n\n k = lax.floor((2 * a / u_shifted + b) * u + lam + 0.43)\n s = lax.log(v * inv_alpha / (a / (u_shifted * u_shifted) + b))\n t = -lam + k * log_lam - lax.lgamma(k + 1)\n\n accept1 = (u_shifted >= 0.07) & (v <= v_r)\n reject = (k < 0) | ((u_shifted < 0.013) & (v > u_shifted))\n accept2 = s <= t\n accept = accept1 | (~reject & accept2)\n\n k_out = lax.select(accept, k, k_out)\n accepted |= accept\n\n return i + 1, k_out, accepted, key\n\n def cond_fn(carry):\n i, k_out, accepted, key = carry\n return (~accepted).any() & (i < max_iters)\n\n k_init = lax.full_like(lam, -1, lam.dtype, shape)\n accepted = lax.full_like(lam, False, jnp.bool_, shape)\n k = lax.while_loop(cond_fn, body_fn, (0, k_init, accepted, key))[1]\n return k.astype(dtype)\n\n\n@partial(jit, static_argnums=(2, 3))\ndef _poisson(key, lam, shape, dtype):\n # The implementation matches TensorFlow and NumPy:\n # https://github.com/tensorflow/tensorflow/blob/v2.2.0-rc3/tensorflow/core/kernels/random_poisson_op.cc\n # https://github.com/numpy/numpy/blob/v1.18.3/numpy/random/src/distributions/distributions.c#L574\n # For lambda < 10, we use the Knuth algorithm; otherwise, we use transformed\n # rejection sampling.\n use_knuth = lam < 10\n lam_knuth = lax.select(use_knuth, lam, lax.full_like(lam, 0.0))\n # The acceptance probability for rejection sampling maxes out at 89% as\n # λ -> ∞, so pick some arbitrary large value.\n lam_rejection = lax.select(use_knuth, lax.full_like(lam, 1e5), lam)\n max_iters = dtype.type(jnp.iinfo(dtype).max) # insanely conservative\n result = lax.select(\n use_knuth,\n _poisson_knuth(key, lam_knuth, shape, dtype, max_iters),\n _poisson_rejection(key, lam_rejection, shape, dtype, max_iters),\n )\n return lax.select(lam == 0, jnp.zeros_like(result), result)\n\n\ndef poisson(key: jnp.ndarray,\n lam: RealArray,\n shape: Sequence[int] = (),\n dtype: DTypeLikeInt = dtypes.int_) -> jnp.ndarray:\n \"\"\"Sample Poisson random values with given shape and integer dtype.\n\n Args:\n key: a PRNGKey used as the random key.\n lam: rate parameter (mean of the distribution), must be >= 0.\n shape: optional, a tuple of nonnegative integers representing the result\n shape. Default ().\n dtype: optional, a integer dtype for the returned values (default int64 if\n jax_enable_x64 is true, otherwise int32).\n\n Returns:\n A random array with the specified shape and dtype.\n \"\"\"\n dtype = dtypes.canonicalize_dtype(dtype)\n shape = core.canonicalize_shape(shape)\n if np.shape(lam) != shape:\n lam = jnp.broadcast_to(lam, shape)\n lam = lax.convert_element_type(lam, np.float32)\n return _poisson(key, lam, shape, dtype)\n\n\ndef gumbel(key: jnp.ndarray,\n shape: Sequence[int] = (),\n dtype: DTypeLikeFloat = dtypes.float_) -> jnp.ndarray:\n \"\"\"Sample Gumbel random values with given shape and float dtype.\n\n Args:\n key: a PRNGKey used as the random key.\n shape: optional, a tuple of nonnegative integers representing the result\n shape. Default ().\n dtype: optional, a float dtype for the returned values (default float64 if\n jax_enable_x64 is true, otherwise float32).\n\n Returns:\n A random array with the specified shape and dtype.\n \"\"\"\n if not dtypes.issubdtype(dtype, np.floating):\n raise ValueError(f\"dtype argument to `gumbel` must be a float \"\n f\"dtype, got {dtype}\")\n dtype = dtypes.canonicalize_dtype(dtype)\n shape = core.canonicalize_shape(shape)\n return _gumbel(key, shape, dtype)\n\n@partial(jit, static_argnums=(1, 2))\ndef _gumbel(key, shape, dtype):\n _check_shape(\"gumbel\", shape)\n return -jnp.log(-jnp.log(\n uniform(key, shape, dtype, minval=jnp.finfo(dtype).tiny, maxval=1.)))\n\n\ndef categorical(key: jnp.ndarray,\n logits: RealArray,\n axis: int = -1,\n shape: Optional[Sequence[int]] = None) -> jnp.ndarray:\n \"\"\"Sample random values from categorical distributions.\n\n Args:\n key: a PRNGKey used as the random key.\n logits: Unnormalized log probabilities of the categorical distribution(s) to sample from,\n so that `softmax(logits, axis)` gives the corresponding probabilities.\n axis: Axis along which logits belong to the same categorical distribution.\n shape: Optional, a tuple of nonnegative integers representing the result shape.\n Must be broadcast-compatible with ``np.delete(logits.shape, axis)``.\n The default (None) produces a result shape equal to ``np.delete(logits.shape, axis)``.\n\n Returns:\n A random array with int dtype and shape given by ``shape`` if ``shape``\n is not None, or else ``np.delete(logits.shape, axis)``.\n \"\"\"\n\n if axis >= 0:\n axis -= len(logits.shape)\n\n batch_shape = tuple(np.delete(logits.shape, axis))\n if shape is None:\n shape = batch_shape\n else:\n shape = tuple(shape)\n _check_shape(\"categorical\", shape, batch_shape)\n\n sample_shape = shape[:len(shape)-len(batch_shape)]\n return jnp.argmax(\n gumbel(key, sample_shape + logits.shape, logits.dtype) +\n lax.expand_dims(logits, tuple(range(len(sample_shape)))),\n axis=axis)\n\n\ndef laplace(key: jnp.ndarray,\n shape: Sequence[int] = (),\n dtype: DTypeLikeFloat = dtypes.float_) -> jnp.ndarray:\n \"\"\"Sample Laplace random values with given shape and float dtype.\n\n Args:\n key: a PRNGKey used as the random key.\n shape: optional, a tuple of nonnegative integers representing the result\n shape. Default ().\n dtype: optional, a float dtype for the returned values (default float64 if\n jax_enable_x64 is true, otherwise float32).\n\n Returns:\n A random array with the specified shape and dtype.\n \"\"\"\n if not dtypes.issubdtype(dtype, np.floating):\n raise ValueError(f\"dtype argument to `laplace` must be a float \"\n f\"dtype, got {dtype}\")\n dtype = dtypes.canonicalize_dtype(dtype)\n shape = core.canonicalize_shape(shape)\n return _laplace(key, shape, dtype)\n\n@partial(jit, static_argnums=(1, 2))\ndef _laplace(key, shape, dtype):\n _check_shape(\"laplace\", shape)\n u = uniform(\n key, shape, dtype, minval=-1. + jnp.finfo(dtype).epsneg, maxval=1.)\n return lax.mul(lax.sign(u), lax.log1p(lax.neg(lax.abs(u))))\n\n\ndef logistic(key: jnp.ndarray,\n shape: Sequence[int] = (),\n dtype: DTypeLikeFloat = dtypes.float_) -> jnp.ndarray:\n \"\"\"Sample logistic random values with given shape and float dtype.\n\n Args:\n key: a PRNGKey used as the random key.\n shape: optional, a tuple of nonnegative integers representing the result\n shape. Default ().\n dtype: optional, a float dtype for the returned values (default float64 if\n jax_enable_x64 is true, otherwise float32).\n\n Returns:\n A random array with the specified shape and dtype.\n \"\"\"\n if not dtypes.issubdtype(dtype, np.floating):\n raise ValueError(f\"dtype argument to `logistic` must be a float \"\n f\"dtype, got {dtype}\")\n dtype = dtypes.canonicalize_dtype(dtype)\n shape = core.canonicalize_shape(shape)\n return _logistic(key, shape, dtype)\n\n@partial(jit, static_argnums=(1, 2))\ndef _logistic(key, shape, dtype):\n _check_shape(\"logistic\", shape)\n x = uniform(key, shape, dtype, minval=jnp.finfo(dtype).eps, maxval=1.)\n return lax.log(lax.div(x, lax.sub(lax._const(x, 1), x)))\n\n\ndef pareto(key: jnp.ndarray,\n b: RealArray,\n shape: Optional[Sequence[int]] = None,\n dtype: DTypeLikeFloat = dtypes.float_) -> jnp.ndarray:\n \"\"\"Sample Pareto random values with given shape and float dtype.\n\n Args:\n key: a PRNGKey used as the random key.\n b: a float or array of floats broadcast-compatible with ``shape``\n representing the parameter of the distribution.\n shape: optional, a tuple of nonnegative integers specifying the result\n shape. Must be broadcast-compatible with ``b``. The default (None)\n produces a result shape equal to ``b.shape``.\n dtype: optional, a float dtype for the returned values (default float64 if\n jax_enable_x64 is true, otherwise float32).\n\n Returns:\n A random array with the specified dtype and with shape given by ``shape`` if\n ``shape`` is not None, or else by ``b.shape``.\n \"\"\"\n if not dtypes.issubdtype(dtype, np.floating):\n raise ValueError(f\"dtype argument to `pareto` must be a float \"\n f\"dtype, got {dtype}\")\n dtype = dtypes.canonicalize_dtype(dtype)\n if shape is not None:\n shape = core.canonicalize_shape(shape)\n return _pareto(key, b, shape, dtype)\n\n@partial(jit, static_argnums=(2, 3))\ndef _pareto(key, b, shape, dtype):\n if shape is None:\n shape = np.shape(b)\n else:\n _check_shape(\"pareto\", shape)\n\n b = lax.convert_element_type(b, dtype)\n e = exponential(key, shape, dtype)\n return lax.exp(e / b)\n\n\ndef t(key: jnp.ndarray,\n df: RealArray,\n shape: Sequence[int] = (),\n dtype: DTypeLikeFloat = dtypes.float_) -> jnp.ndarray:\n \"\"\"Sample Student's t random values with given shape and float dtype.\n\n Args:\n key: a PRNGKey used as the random key.\n df: a float or array of floats broadcast-compatible with ``shape``\n representing the parameter of the distribution.\n shape: optional, a tuple of nonnegative integers specifying the result\n shape. Must be broadcast-compatible with ``df``. The default (None)\n produces a result shape equal to ``df.shape``.\n dtype: optional, a float dtype for the returned values (default float64 if\n jax_enable_x64 is true, otherwise float32).\n\n Returns:\n A random array with the specified dtype and with shape given by ``shape`` if\n ``shape`` is not None, or else by ``df.shape``.\n \"\"\"\n if not dtypes.issubdtype(dtype, np.floating):\n raise ValueError(f\"dtype argument to `t` must be a float \"\n f\"dtype, got {dtype}\")\n dtype = dtypes.canonicalize_dtype(dtype)\n shape = core.canonicalize_shape(shape)\n return _t(key, df, shape, dtype)\n\n@partial(jit, static_argnums=(2, 3))\ndef _t(key, df, shape, dtype):\n if shape is None:\n shape = np.shape(df)\n else:\n _check_shape(\"t\", shape, np.shape(df))\n\n df = lax.convert_element_type(df, dtype)\n key_n, key_g = split(key)\n n = normal(key_n, shape, dtype)\n two = _constant_like(n, 2)\n half_df = lax.div(df, two)\n g = gamma(key_n, half_df, shape, dtype)\n return n * jnp.sqrt(half_df / g)\n\n\ndef rademacher(key: jnp.ndarray,\n shape: Sequence[int],\n dtype: DTypeLikeInt = dtypes.int_) -> jnp.ndarray:\n \"\"\"Sample from a Rademacher distribution.\n\n Args:\n key: a PRNGKey key.\n shape: The shape of the returned samples.\n dtype: The type used for samples.\n\n Returns:\n A jnp.array of samples, of shape `shape`. Each element in the output has\n a 50% change of being 1 or -1.\n\n \"\"\"\n dtype = dtypes.canonicalize_dtype(dtype)\n shape = core.canonicalize_shape(shape)\n return _rademacher(key, shape, dtype)\n\n\n@partial(jit, static_argnums=(1, 2))\ndef _rademacher(key, shape, dtype):\n bernoulli_samples = bernoulli(key=key, p=0.5, shape=shape)\n return (2 * bernoulli_samples - 1).astype(dtype)\n\n\ndef maxwell(key: jnp.ndarray,\n shape: Sequence[int] = (),\n dtype: DTypeLikeFloat = dtypes.float_) -> jnp.ndarray:\n \"\"\"Sample from a one sided Maxwell distribution.\n\n The scipy counterpart is `scipy.stats.maxwell`.\n\n Args:\n key: a PRNGKey key.\n shape: The shape of the returned samples.\n dtype: The type used for samples.\n\n Returns:\n A jnp.array of samples, of shape `shape`.\n\n \"\"\"\n # Generate samples using:\n # sqrt(X^2 + Y^2 + Z^2), X,Y,Z ~N(0,1)\n if not dtypes.issubdtype(dtype, np.floating):\n raise ValueError(f\"dtype argument to `maxwell` must be a float \"\n f\"dtype, got {dtype}\")\n dtype = dtypes.canonicalize_dtype(dtype)\n shape = core.canonicalize_shape(shape)\n return _maxwell(key, shape, dtype)\n\n\n@partial(jit, static_argnums=(1, 2))\ndef _maxwell(key, shape, dtype):\n shape = shape + (3,)\n norm_rvs = normal(key=key, shape=shape, dtype=dtype)\n return jnp.linalg.norm(norm_rvs, axis=-1)\n\n\ndef double_sided_maxwell(key: jnp.ndarray,\n loc: RealArray,\n scale: RealArray,\n shape: Sequence[int] = (),\n dtype: DTypeLikeFloat = dtypes.float_) -> jnp.ndarray:\n \"\"\"Sample from a double sided Maxwell distribution.\n\n Samples using:\n loc + scale* sgn(U-0.5)* one_sided_maxwell U~Unif;\n\n Args:\n key: a PRNGKey key.\n loc: The location parameter of the distribution.\n scale: The scale parameter of the distribution.\n shape: The shape added to the parameters loc and scale broadcastable shape.\n dtype: The type used for samples.\n\n Returns:\n A jnp.array of samples.\n\n \"\"\"\n if not dtypes.issubdtype(dtype, np.floating):\n raise ValueError(f\"dtype argument to `double_sided_maxwell` must be a float\"\n f\" dtype, got {dtype}\")\n dtype = dtypes.canonicalize_dtype(dtype)\n shape = core.canonicalize_shape(shape)\n return _double_sided_maxwell(key, loc, scale, shape, dtype)\n\n\n@partial(jit, static_argnums=(3, 4))\ndef _double_sided_maxwell(key, loc, scale, shape, dtype):\n params_shapes = lax.broadcast_shapes(np.shape(loc), np.shape(scale))\n if not shape:\n shape = params_shapes\n\n shape = shape + params_shapes\n maxwell_key, rademacher_key = split(key)\n maxwell_rvs = maxwell(maxwell_key, shape=shape, dtype=dtype)\n # Generate random signs for the symmetric variates.\n random_sign = rademacher(rademacher_key, shape=shape, dtype=dtype)\n assert random_sign.shape == maxwell_rvs.shape\n\n return random_sign * maxwell_rvs * scale + loc\n\n\ndef weibull_min(key: jnp.ndarray,\n scale: RealArray,\n concentration: RealArray,\n shape: Sequence[int] = (),\n dtype: DTypeLikeFloat = dtypes.float_) -> jnp.ndarray:\n \"\"\"Sample from a Weibull distribution.\n\n The scipy counterpart is `scipy.stats.weibull_min`.\n\n Args:\n key: a PRNGKey key.\n scale: The scale parameter of the distribution.\n concentration: The concentration parameter of the distribution.\n shape: The shape added to the parameters loc and scale broadcastable shape.\n dtype: The type used for samples.\n\n Returns:\n A jnp.array of samples.\n\n \"\"\"\n if not dtypes.issubdtype(dtype, np.floating):\n raise ValueError(f\"dtype argument to `weibull_min` must be a float \"\n f\"dtype, got {dtype}\")\n dtype = dtypes.canonicalize_dtype(dtype)\n shape = core.canonicalize_shape(shape)\n return _weibull_min(key, scale, concentration, shape, dtype)\n\n\n@partial(jit, static_argnums=(3, 4))\ndef _weibull_min(key, scale, concentration, shape, dtype):\n random_uniform = uniform(\n key=key, shape=shape, minval=0, maxval=1, dtype=dtype)\n\n # Inverse weibull CDF.\n return jnp.power(-jnp.log1p(-random_uniform), 1.0/concentration) * scale\n" ]
[ [ "numpy.sqrt", "numpy.ceil", "numpy.issubdtype", "numpy.float32", "numpy.int64", "numpy.uint32", "numpy.iinfo", "numpy.log", "numpy.shape", "numpy.delete", "numpy.ndim", "numpy.array", "numpy.binary_repr" ] ]
omarsou/kernel_method_kaggle_challenge
[ "0f2e85166112b231699d9c9f7e3ae894e5ff7766" ]
[ "kernel/base_kernel.py" ]
[ "import numpy as np\nimport pickle\n\n\nclass Kernel:\n def __init__(self):\n self.train_phi = None\n self.K_matrix = None\n self.test_phi = None\n self.X_train = None\n pass\n\n def build_gram_matrix(self, X):\n raise NotImplementedError(\"Method build_gram_matrix not implemented.\")\n\n def test(self, x):\n raise NotImplementedError(\"Method test not implemented.\")\n\n def save_kernel(self, path):\n with open(path, \"wb\") as f:\n pickle.dump(self, f)\n\n @staticmethod\n def load_kernel(path):\n with open(path, \"rb\") as f:\n kernel_class = pickle.load(f)\n return kernel_class\n\n\nclass KernelIPExplicit(Kernel):\n def __init__(self):\n super().__init__()\n\n def build_gram_matrix(self, X):\n n = X.shape[0]\n output = np.zeros((n, n))\n self.train_phi = list()\n for i in range(n):\n item = X.loc[i, X.columns[1]]\n self.train_phi.append(self.make_phi(item))\n\n for i in range(n):\n for j in range(i, n):\n value = self.inner_product_phi(self.train_phi[i], self.train_phi[j])\n output[i, j] = output[j, i] = value\n\n self.K_matrix = output\n\n def test(self, indice_x):\n n = len(self.train_phi)\n output = np.zeros(n)\n for i in range(n):\n output[i] = self.inner_product_phi(self.train_phi[i], self.test_phi[indice_x])\n return output\n\n def make_test_phi(self, X):\n n = X.shape[0]\n self.test_phi = []\n for i in range(n):\n item = X.loc[i, X.columns[1]]\n self.test_phi.append(self.make_phi(item, train=False))\n return\n\n def make_phi(self, item, train=True):\n raise NotImplementedError(\"Method make_phi not implemented.\")\n\n def inner_product_phi(self, phi1, phi2):\n raise NotImplementedError(\"Method inner_product_phi not implemented.\")\n\n\nclass KernelIPImplicit(Kernel):\n def __init__(self):\n super().__init__()\n\n def build_gram_matrix(self, X):\n n = X.shape[0]\n self.X_train = X\n output = np.zeros((n, n))\n for i in range(n):\n for j in range(i, n):\n value1, value2 = X.loc[i, X.columns[1]], X.loc[j, X.columns[1]]\n output[i, j] = output[j, i] = self.K(value1, value2)\n self.K_matrix = output\n\n def test(self, x):\n X = self.X_train\n n = X.shape[0]\n output = np.zeros(n)\n for i in range(n):\n output[i] = self.K(X.loc[i, X.columns[1]], x)\n\n def K(self, item1, item2):\n raise NotImplementedError(\"Method K not implemented\")\n\n\nclass SumKernel:\n def __init__(self):\n self.train_phi = list()\n self.K_matrix = None\n self.test_phi = None\n self.X_train = None\n pass\n\n def build_gram_matrix(self, X):\n raise NotImplementedError(\"Method build_gram_matrix_sum not implemented.\")\n\n def build_gram_matrix_one(self, X, param):\n raise NotImplementedError(\"Method build_gram_matrix not implemented.\")\n\n def test(self, x):\n raise NotImplementedError(\"Method test not implemented.\")\n\n def save_kernel(self, path):\n with open(path, \"wb\") as f:\n pickle.dump(self, f)\n\n @staticmethod\n def load_kernel(path):\n with open(path, \"rb\") as f:\n kernel_class = pickle.load(f)\n return kernel_class\n\n\nclass SumKernelIPExplicitError(BaseException):\n pass\n\n\nclass SumKernelIPExplicit(SumKernel):\n def __init__(self, lst_params):\n super().__init__()\n if not isinstance(lst_params, list):\n raise SumKernelIPExplicitError(\"If you want to use only one param, you should use the individual param \"\n \"class method.\")\n self.lst_params = lst_params\n\n def build_gram_matrix(self, X):\n n = X.shape[0]\n output = np.zeros((n, n))\n for params in self.lst_params:\n intermediate_output, train_phi = self.build_gram_matrix_one(X, params)\n self.train_phi.append(train_phi)\n output += intermediate_output\n self.K_matrix = output\n\n def build_gram_matrix_one(self, X, params):\n n = X.shape[0]\n output = np.zeros((n, n))\n train_phi = list()\n for i in range(n):\n item = X.loc[i, X.columns[1]]\n train_phi.append(self.make_phi(item, True, params))\n\n for i in range(n):\n for j in range(i, n):\n value = self.inner_product_phi(train_phi[i], train_phi[j])\n output[i, j] = output[j, i] = value\n\n return output, train_phi\n\n def test(self, indice_x):\n n = len(self.train_phi[0])\n output = np.zeros(n)\n for idx, params in enumerate(self.lst_params):\n current_output = 0\n for i in range(n):\n current_output += self.inner_product_phi(self.train_phi[idx][i], self.test_phi[idx][indice_x])\n return output\n\n def make_test_phi(self, X):\n n = X.shape[0]\n self.test_phi = []\n for params in self.lst_params:\n current_test_phi = list()\n for i in range(n):\n item = X.loc[i, X.columns[1]]\n current_test_phi.append(self.make_phi(item, train=False, params=params))\n self.test_phi.append(current_test_phi)\n return\n\n def make_phi(self, item, train=True, params=None):\n raise NotImplementedError(\"Method make_phi not implemented.\")\n\n def inner_product_phi(self, phi1, phi2):\n raise NotImplementedError(\"Method inner_product_phi not implemented.\")\n" ]
[ [ "numpy.zeros" ] ]
Nebula4869/real-time-object-detection-YOLOv4
[ "a7b692999210747fd49cec2c35f2b7d8d5b7eecc" ]
[ "data_voc.py" ]
[ "import numpy as np\nimport random\nimport xml\nimport cv2\nimport os\n\n\ndef read_file(file_name):\n \"\"\"\n 读取 file_name 文件全部内容\n return:文件内容list\n \"\"\"\n if not os.path.isfile(file_name):\n return None\n result = []\n with open(file_name, 'r') as f:\n for line in f.readlines():\n # 去掉换行符和空格\n line = line.strip('\\n').strip()\n if len(line) == 0:\n continue\n result.append(line)\n return result\n\n\ndef word2id(names_file):\n \"\"\"\n 得到 名字 到 id 的转换字典\n return {}\n \"\"\"\n id_dict = {}\n contents = read_file(names_file)\n for i in range(len(contents)):\n id_dict[str(contents[i])] = i\n return id_dict\n\n\ndef parse_voc_xml(file_name, names_dict):\n \"\"\"\n 解析voc数据集的 xml 文件,每一个列表表示一个图片中的全部标签\n return [ [id1, x1, y1, w1, h1], [id2, x2, y2, w2, h2], ... ]\n \"\"\"\n # print(file_name)\n # print(names_dict)\n result = []\n if not os.path.isfile(file_name):\n return None\n doc = xml.dom.minidom.parse(file_name)\n root = doc.documentElement\n size = root.getElementsByTagName('size')[0]\n width = int(size.getElementsByTagName('width')[0].childNodes[0].data)\n height = int(size.getElementsByTagName('height')[0].childNodes[0].data)\n\n objs = root.getElementsByTagName('object')\n for obj in objs:\n name = obj.getElementsByTagName('name')[0].childNodes[0].data\n name_id = names_dict[name]\n\n bndbox = obj.getElementsByTagName('bndbox')[0]\n xmin = int(float(bndbox.getElementsByTagName('xmin')[0].childNodes[0].data))\n ymin = int(float(bndbox.getElementsByTagName('ymin')[0].childNodes[0].data))\n xmax = int(float(bndbox.getElementsByTagName('xmax')[0].childNodes[0].data))\n ymax = int(float(bndbox.getElementsByTagName('ymax')[0].childNodes[0].data))\n\n x = (xmax + xmin) / 2.0 / width\n w = (xmax - xmin) / width\n y = (ymax + ymin) / 2.0 / height\n h = (ymax - ymin) / height\n\n result.append([name_id, x, y, w, h])\n return result\n\n\nclass Data:\n def __init__(self, voc_root_dir, voc_dir_ls, voc_names, class_num, batch_size, anchors, multi_scale_img=True, width=608, height=608):\n self.data_dirs = [os.path.join(os.path.join(voc_root_dir, voc_dir), \"JPEGImages\") for voc_dir in voc_dir_ls] # 数据文件路径\n self.class_num = class_num # 分类数\n self.batch_size = batch_size\n self.anchors = np.asarray(anchors).astype(np.float32).reshape([-1, 2]) / [width, height] # [9,2]\n print(\"anchors:\\n\", self.anchors)\n self.multi_scale_img = multi_scale_img # 多尺度缩放图片\n\n self.imgs_path = []\n self.labels_path = []\n\n self.num_batch = 0 # 多少个 batch 了\n self.num_imgs = 0 # 一共多少张图片\n\n self.width = width\n self.height = height\n\n self.names_dict = word2id(voc_names) # 名字到 id 的字典\n\n # 初始化各项参数\n self.__init_args()\n \n # 初始化各项参数\n def __init_args(self):\n print(\"message:开始初始化路径\")\n\n # init imgs path\n for voc_dir in self.data_dirs:\n for img_name in os.listdir(voc_dir):\n img_path = os.path.join(voc_dir, img_name)\n label_path = img_path.replace(\"JPEGImages\", \"Annotations\")\n label_path = label_path.replace(img_name.split('.')[-1], \"xml\")\n if not os.path.isfile(img_path):\n print(\"warning:VOC 图片文件'\"+str(img_path)+\"'不存在\")\n continue\n if not os.path.isfile(label_path):\n print(\"warning:VOC 标签文件'\"+str(label_path)+\"'不存在\")\n continue\n self.imgs_path.append(img_path)\n self.labels_path.append(label_path)\n self.num_imgs += 1 \n print(\"message:VOC 数据初始化完成,一共有 \"+str(self.num_imgs)+\" 张图片\")\n \n if self.num_imgs <= 0:\n raise ValueError(\"没有可训练的图片, 程序退出\")\n \n return\n \n # 读取图片\n def read_img(self, img_file):\n \"\"\"\n 读取 img_file, 并 resize\n return:img, RGB & float\n \"\"\"\n if not os.path.exists(img_file):\n return None\n img = cv2.imread(img_file)\n img = cv2.resize(img, (self.width, self.height))\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n img = img.astype(np.float32)\n img = img/255.0\n return img\n \n # 读取标签\n def read_label(self, label_file, names_dict):\n \"\"\"\n 读取 label_file, 并生成 label_y1, label_y2, label_y3\n return:label_y1, label_y2, label_y3\n \"\"\"\n contents = parse_voc_xml(label_file, names_dict) \n if not contents:\n return None, None, None\n\n label_y1 = np.zeros((self.height // 32, self.width // 32, 3, 5 + self.class_num), np.float32)\n label_y2 = np.zeros((self.height // 16, self.width // 16, 3, 5 + self.class_num), np.float32)\n label_y3 = np.zeros((self.height // 8, self.width // 8, 3, 5 + self.class_num), np.float32)\n\n y_true = [label_y3, label_y2, label_y1]\n ratio = {0: 8, 1: 16, 2: 32}\n\n for label in contents:\n label_id = int(label[0])\n box = np.asarray(label[1: 5]).astype(np.float32) # label中保存的就是 x,y,w,h\n\n best_giou = 0\n best_index = 0\n for i in range(len(self.anchors)):\n min_wh = np.minimum(box[2:4], self.anchors[i])\n max_wh = np.maximum(box[2:4], self.anchors[i])\n giou = (min_wh[0] * min_wh[1]) / (max_wh[0] * max_wh[1])\n if giou > best_giou:\n best_giou = giou\n best_index = i\n \n # 012->0, 345->1, 678->2\n x = int(np.floor(box[0] * self.width / ratio[best_index // 3]))\n y = int(np.floor(box[1] * self.height / ratio[best_index // 3]))\n k = best_index % 3\n\n y_true[best_index // 3][y, x, k, 0:4] = box\n y_true[best_index // 3][y, x, k, 4:5] = 1.0\n y_true[best_index // 3][y, x, k, 5 + label_id] = 1.0\n \n return label_y1, label_y2, label_y3\n\n # 加载 batch_size 的数据\n def __get_data(self):\n \"\"\"\n 加载 batch_size 的标签和数据\n return:imgs, label_y1, label_y2, label_y3\n \"\"\"\n # 十个 batch 随机一次 size \n if self.multi_scale_img and (self.num_batch % 10 == 0):\n random_size = random.randint(10, 19) * 32\n self.width = self.height = random_size\n \n imgs = []\n labels_y1, labels_y2, labels_y3 = [], [], []\n\n count = 0\n while count < self.batch_size:\n curr_index = random.randint(0, self.num_imgs - 1)\n img_name = self.imgs_path[curr_index]\n label_name = self.labels_path[curr_index]\n\n img = self.read_img(img_name)\n label_y1, label_y2, label_y3 = self.read_label(label_name, self.names_dict)\n if img is None:\n print(\"VOC 文件'\" + img_name + \"'读取异常\")\n continue\n if label_y1 is None:\n print(\"VOC 文件'\" + label_name + \"'读取异常\")\n continue\n imgs.append(img)\n labels_y1.append(label_y1)\n labels_y2.append(label_y2)\n labels_y3.append(label_y3)\n\n count += 1\n\n self.num_batch += 1\n imgs = np.asarray(imgs)\n labels_y1 = np.asarray(labels_y1)\n labels_y2 = np.asarray(labels_y2)\n labels_y3 = np.asarray(labels_y3)\n \n return imgs, labels_y1, labels_y2, labels_y3\n\n # 迭代器\n def __next__(self):\n \"\"\"\n 迭代获得一个 batch 的数据\n \"\"\"\n return self.__get_data()\n" ]
[ [ "numpy.zeros", "numpy.floor", "numpy.asarray", "numpy.maximum", "numpy.minimum" ] ]
carlos-alcan/network_app_classification
[ "faa19842ed17b277259dd64e14c7133ce6a61e56" ]
[ "engineered_features.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Apr 16 12:14:37 2019\n\n@author: carlosalcantara\n\"\"\"\n\n'''\nExpand data with engineered features using the feature_engineering_function.py\nSaves new csv file with specified name, overwriting input file if no save file\nname is given.\n\nUsage: engineered_features.py csvfile [savefile=csvfile]\n'''\nimport pandas as pd\nimport sys\nimport feature_engineering_function\n\n# Check for command line arguments\nif len(sys.argv) < 1:\n print('Usage: engineered_features.py csvfile [savefile=csvfile]')\n sys.exit(-1)\n\n# use original file name as new csv filename if none specified\nfile = sys.argv[1]\nif len(sys.argv) > 2:\n savefile = sys.argv[2]\nelse:\n savefile = file\n\n# read NetFlow data file\ndf = pd.read_csv(file)\n# add engineered features\ndf = feature_engineering_function.BLINC_features(df)\n# write NetFlow data file\ndf.to_csv(savefile, index=False)" ]
[ [ "pandas.read_csv" ] ]
CAMeL-Lab/CAMeLBERT_morphosyntactic_tagger
[ "5bea542c2e731d263281d0ab16ba9c065f602f94" ]
[ "scripts/run_token_classification.py" ]
[ "# -*- coding: utf-8 -*-\n\n# MIT License\n#\n# Copyright 2018-2021 New York University Abu Dhabi\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\"\"\" Fine-tuning pre-trained models for token classification tasks.\n Heavily adapted from: https://github.com/huggingface/transformers/blob/\n v3.0.1/examples/token-classification/run_ner.py\"\"\"\n\n\nimport logging\nimport os\nimport sys\nfrom dataclasses import dataclass, field\nfrom typing import Dict, List, Optional, Tuple\n\nimport numpy as np\nfrom seqeval.metrics import (\n accuracy_score as seq_accuracy_score,\n f1_score as seq_f1_score,\n precision_score as seq_precision_score,\n recall_score as seq_recall_score\n)\nfrom sklearn.metrics import (\n accuracy_score,\n f1_score,\n precision_score,\n recall_score\n)\n\nfrom torch import nn\nfrom transformers import (\n AutoConfig,\n AutoModelForTokenClassification,\n AutoTokenizer,\n EvalPrediction,\n HfArgumentParser,\n Trainer,\n TrainingArguments,\n set_seed,\n)\nfrom utils import TokenClassificationDataSet, Split, get_labels\n\n\nlogger = logging.getLogger(__name__)\n\n\n@dataclass\nclass ModelArguments:\n \"\"\"\n Arguments pertaining to which model/config/tokenizer we are\n going to fine-tune from.\n \"\"\"\n\n model_name_or_path: str = field(\n metadata={\"help\": \"Path to pretrained model or model identifier from \"\n \"huggingface.co/models\"}\n )\n\n config_name: Optional[str] = field(\n default=None, metadata={\"help\": \"Pretrained config name or path if \"\n \"not the same as model_name\"}\n )\n\n # If you want to tweak more attributes on your tokenizer, you should do it\n # in a distinct script, or just modify its tokenizer_config.json.\n\n tokenizer_name: Optional[str] = field(\n default=None, metadata={\"help\": \"Pretrained tokenizer name or path if \"\n \"not the same as model_name\"}\n )\n\n use_fast: bool = field(default=False, metadata={\"help\": \"Set this flag to \"\n \"use fast \"\n \"tokenization.\"})\n task_type: Optional[str] = field(\n default=\"ner\", metadata={\"help\": \"the name of the task (ner or pos)\"}\n )\n\n cache_dir: Optional[str] = field(\n default=None, metadata={\"help\": \"Where do you want to store the \"\n \"pretrained models downloaded from s3\"}\n )\n\n\n@dataclass\nclass DataTrainingArguments:\n \"\"\"\n Arguments pertaining to what data we are going to input our model for \n training and eval.\n \"\"\"\n\n data_dir: str = field(\n metadata={\"help\": \"The input data dir. Should contain the .txt files \"\n \"for a CoNLL-2003-formatted task.\"}\n )\n labels: Optional[str] = field(\n default=None,\n metadata={\"help\": \"Path to a file containing all labels.\"},\n )\n max_seq_length: int = field(\n default=128,\n metadata={\n \"help\": \"The maximum total input sequence length after \"\n \"tokenization. Sequences longer than this will be truncated, \"\n \"sequences shorter will be padded.\"\n },\n )\n overwrite_cache: bool = field(\n default=False, metadata={\"help\": \"Overwrite the cached training and \"\n \"evaluation sets\"}\n )\n blind_test: bool = field(\n default=False, metadata={\"help\": \"Use blind test set\"}\n )\n\n\ndef main():\n # See all possible arguments in src/transformers/training_args.py\n # or by passing the --help flag to this script.\n # We now keep distinct sets of args, for a cleaner separation of concerns.\n\n parser = HfArgumentParser((ModelArguments,\n DataTrainingArguments,\n TrainingArguments))\n if len(sys.argv) == 2 and sys.argv[1].endswith(\".json\"):\n # If we pass only one argument to the script and it's the path to a\n # json file, let's parse it to get our arguments.\n model_args, data_args, training_args = parser.parse_json_file(\n json_file=os.path.abspath(\n sys.argv[1]))\n else:\n model_args, data_args, training_args = parser.parse_args_into_dataclasses()\n\n if (\n os.path.exists(training_args.output_dir)\n and os.listdir(training_args.output_dir)\n and training_args.do_train\n and not training_args.overwrite_output_dir\n ):\n raise ValueError(\n f\"Output directory ({training_args.output_dir}) already exists \"\n \"and is not empty. Use --overwrite_output_dir to overcome.\"\n )\n\n # Setup logging\n logging.basicConfig(\n format=\"%(asctime)s - %(levelname)s - %(name)s - %(message)s\",\n datefmt=\"%m/%d/%Y %H:%M:%S\",\n level=(logging.INFO if training_args.local_rank in [-1, 0]\n else logging.WARN),\n )\n logger.warning(\n \"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, \"\n \"16-bits training: %s\",\n training_args.local_rank,\n training_args.device,\n training_args.n_gpu,\n bool(training_args.local_rank != -1),\n training_args.fp16,\n )\n logger.info(\"Training/evaluation parameters %s\", training_args)\n\n # Set seed\n set_seed(training_args.seed)\n\n # Prepare task\n labels = get_labels(data_args.labels)\n label_map: Dict[int, str] = {i: label for i, label in enumerate(labels)}\n num_labels = len(labels)\n\n # Load pretrained model and tokenizer\n #\n # Distributed training:\n # The .from_pretrained methods guarantee that only one local process can\n # concurrently download model & vocab.\n\n config = AutoConfig.from_pretrained(\n (model_args.config_name if model_args.config_name\n else model_args.model_name_or_path),\n num_labels=num_labels,\n id2label=label_map,\n label2id={label: i for i, label in enumerate(labels)},\n cache_dir=model_args.cache_dir,\n )\n tokenizer = AutoTokenizer.from_pretrained(\n (model_args.tokenizer_name if model_args.tokenizer_name\n else model_args.model_name_or_path),\n cache_dir=model_args.cache_dir,\n use_fast=model_args.use_fast,\n )\n model = AutoModelForTokenClassification.from_pretrained(\n model_args.model_name_or_path,\n from_tf=bool(\".ckpt\" in model_args.model_name_or_path),\n config=config,\n cache_dir=model_args.cache_dir,\n )\n\n # Get datasets\n train_dataset = (\n TokenClassificationDataSet(\n data_dir=data_args.data_dir,\n tokenizer=tokenizer,\n labels=labels,\n model_type=config.model_type,\n max_seq_length=data_args.max_seq_length,\n overwrite_cache=data_args.overwrite_cache,\n mode=Split.train,\n )\n if training_args.do_train\n else None\n )\n eval_dataset = (\n TokenClassificationDataSet(\n data_dir=data_args.data_dir,\n tokenizer=tokenizer,\n labels=labels,\n model_type=config.model_type,\n max_seq_length=data_args.max_seq_length,\n overwrite_cache=data_args.overwrite_cache,\n mode=Split.dev,\n )\n if training_args.do_eval\n else None\n )\n\n def align_predictions(predictions: np.ndarray,\n label_ids: np.ndarray) -> Tuple[List[int], List[int]]:\n preds = np.argmax(predictions, axis=2)\n\n batch_size, seq_len = preds.shape\n\n out_label_list = [[] for _ in range(batch_size)]\n preds_list = [[] for _ in range(batch_size)]\n\n for i in range(batch_size):\n for j in range(seq_len):\n if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:\n out_label_list[i].append(label_map[label_ids[i][j]])\n preds_list[i].append(label_map[preds[i][j]])\n\n return preds_list, out_label_list\n\n def compute_metrics(p: EvalPrediction) -> Dict:\n preds_list, out_label_list = align_predictions(p.predictions,\n p.label_ids)\n # If task type is NER, use seqeval metrics.\n # Otherwise, use scikit learn\n if model_args.task_type == \"ner\":\n return {\n \"accuracy\": seq_accuracy_score(out_label_list, preds_list),\n \"precision\": seq_precision_score(out_label_list, preds_list),\n \"recall\": seq_recall_score(out_label_list, preds_list),\n \"f1\": seq_f1_score(out_label_list, preds_list),\n }\n else:\n # Flatten the preds_list and out_label_list\n preds_list = [p for sublist in preds_list for p in sublist]\n out_label_list = [p for sublist in out_label_list for p in sublist]\n return {\n \"accuracy\": accuracy_score(out_label_list, preds_list),\n \"precision_micro\": precision_score(out_label_list, preds_list,\n average=\"micro\"),\n \"recall_micro\": recall_score(out_label_list, preds_list,\n average=\"micro\"),\n \"f1_micro\": f1_score(out_label_list, preds_list,\n average=\"micro\"),\n \"precision_macro\": precision_score(out_label_list, preds_list,\n average=\"macro\"),\n \"recall_macro\": recall_score(out_label_list, preds_list,\n average=\"macro\"),\n \"f1_macro\": f1_score(out_label_list, preds_list,\n average=\"macro\"),\n }\n\n # Initialize our Trainer\n trainer = Trainer(\n model=model,\n args=training_args,\n train_dataset=train_dataset,\n eval_dataset=eval_dataset,\n compute_metrics=compute_metrics,\n )\n\n # Training\n if training_args.do_train:\n trainer.train(\n model_path=(model_args.model_name_or_path \n if os.path.isdir(model_args.model_name_or_path)\n else None)\n )\n trainer.save_model()\n # For convenience, we also re-save the tokenizer to the same directory,\n # so that you can share your model easily on huggingface.co/models =)\n if trainer.is_world_master():\n tokenizer.save_pretrained(training_args.output_dir)\n\n # Evaluation\n results = {}\n if training_args.do_eval:\n logger.info(\"*** Evaluate ***\")\n\n result = trainer.evaluate()\n\n output_eval_file = os.path.join(training_args.output_dir,\n \"eval_results.txt\")\n if trainer.is_world_master():\n with open(output_eval_file, \"w\") as writer:\n logger.info(\"***** Eval results *****\")\n for key, value in result.items():\n logger.info(\" %s = %s\", key, value)\n writer.write(\"%s = %s\\n\" % (key, value))\n\n results.update(result)\n\n # Predict\n if training_args.do_predict:\n data_split = Split.test\n if data_args.blind_test:\n data_split = Split.blind_test\n test_dataset = TokenClassificationDataSet(\n data_dir=data_args.data_dir,\n tokenizer=tokenizer,\n labels=labels,\n model_type=config.model_type,\n max_seq_length=data_args.max_seq_length,\n overwrite_cache=data_args.overwrite_cache,\n mode=data_split,\n )\n\n predictions, label_ids, metrics = trainer.predict(test_dataset)\n preds_list, _ = align_predictions(predictions, label_ids)\n\n output_test_results_file = os.path.join(training_args.output_dir,\n f\"{data_split.value}_results.txt\")\n if trainer.is_world_master():\n with open(output_test_results_file, \"w\") as writer:\n for key, value in metrics.items():\n logger.info(\" %s = %s\", key, value)\n writer.write(\"%s = %s\\n\" % (key, value))\n\n # Save predictions\n output_test_predictions_file = os.path.join(training_args.output_dir,\n f\"{data_split.value}_predictions.txt\")\n if trainer.is_world_master():\n with open(output_test_predictions_file, \"w\") as writer:\n with open(os.path.join(data_args.data_dir, f\"{data_split.value}.txt\"), \"r\") as f:\n example_id = 0\n for line in f:\n if (line.startswith(\"-DOCSTART-\") or line == \"\"\n or line == \"\\n\"):\n writer.write(line)\n if not preds_list[example_id]:\n example_id += 1\n elif preds_list[example_id]:\n output_line = (line.split()[0] + \" \" + \n preds_list[example_id].pop(0) + \"\\n\")\n writer.write(output_line)\n else:\n logger.warning(\n \"Maximum sequence length exceeded: \"\n \"No prediction for '%s'.\", line.split()[0])\n\n return results\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "numpy.argmax", "torch.nn.CrossEntropyLoss", "sklearn.metrics.f1_score", "sklearn.metrics.accuracy_score", "sklearn.metrics.precision_score", "sklearn.metrics.recall_score" ] ]
chunfuchen/qiskit-acqua-tutorials
[ "74b0bcaac1678fc6c0de5be13e99d7ecd11b3075" ]
[ "artificial_intelligence/qsvm_kernel_multiclass.py" ]
[ "# -*- coding: utf-8 -*-\n\n# Copyright 2018 IBM.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# =============================================================================\n\nfrom datasets import *\nfrom qiskit_aqua.utils import split_dataset_to_data_and_labels\nfrom qiskit_aqua.input import get_input_instance\nfrom qiskit_aqua import run_algorithm\nimport numpy as np\n\nn = 2 # dimension of each data point\n\nsample_Total, training_input, test_input, class_labels = Wine(training_size=40,\n test_size=10, n=n, PLOT_DATA=False)\n\ntemp = [test_input[k] for k in test_input]\ntotal_array = np.concatenate(temp)\n\nparams = {\n 'problem': {'name': 'svm_classification', 'random_seed': 10598},\n 'algorithm': {\n 'name': 'QSVM.Kernel',\n },\n 'backend': {'name': 'qasm_simulator', 'shots': 1024},\n # 'multiclass_extension': {'name': 'OneAgainstRest'},\n 'multiclass_extension': {'name': 'AllPairs'},\n # 'multiclass_extension': {'name': 'ErrorCorrectingCode', 'code_size': 5},\n 'feature_map': {'name': 'SecondOrderExpansion', 'depth': 2, 'entangler_map': {0: [1]}}\n }\n\nalgo_input = get_input_instance('SVMInput')\nalgo_input.training_dataset = training_input\nalgo_input.test_dataset = test_input\nalgo_input.datapoints = total_array\n\nresult = run_algorithm(params, algo_input)\nprint(result)\n" ]
[ [ "numpy.concatenate" ] ]
danielbee/PracticalIntroDataSci
[ "feecd7d1b18ba44fb3ea59d7709c2ff493c0c79f" ]
[ "scripts/parse_weather.py" ]
[ "# The purpose of this script is to collect all the station data into a single data structure. \n# This will require regular expressions to find things like station changes. \n\n#the hope is that we can simply export this single data structure to a single file is whatever format we want. \n\n# Need to figure out how to deal with 'null' values. \nimport re\n\nimport pandas as pd\n\ndef main(): \n\n dataPath = '../data/weather/'\n dataStationPath = dataPath+'stations/'\n with open(dataPath+'stations.txt') as f:\n stations = f.read().splitlines()\n bigFrame = []\n stationDataRaw = {}\n for station in stations: \n print(station)\n stationDataRaw[station]= open(dataStationPath+station+'.txt').read().splitlines()\n stationFrame = getDataFrame(stationDataRaw[station])\n # Extract things like height above sea level, longitude and latitude and site changes.\n stationFrame = getDataExtras(stationDataRaw[station],stationFrame)\n # add a column for the station\n stationFrame['station'] = station\n # Make station column the most signifiant index in the multiIndex\n stationFrame.set_index(['station', stationFrame.index],inplace=True)\n # Append to list of dataframes\n bigFrame.append(stationFrame)\n # Combine all the dataframes\n stationsData = pd.concat(bigFrame)\n # print(stationsData.reset_index().dtypes)\n # Print out in desired formats\n stationsData.to_excel(dataPath+'stationData.xlsx')\n stationsData.to_csv(dataPath+'stationData.csv')\n \n stationsData.to_string(dataPath+'stationData.txt')\n\n# bit of an assumption\ntableStart = re.compile('\\s{3}yyyy')\nreWord = re.compile('\\w+')\nreNum = re.compile('[0-9.]+')\ndef getDataFrame(raw):\n for ln,line in enumerate(raw):\n if re.search(tableStart,line):\n tableStartLine = ln\n # stop going through lines\n break\n\n table = raw[tableStartLine:]\n # remove empty string lines\n table = list(filter(None, table))\n headers= table[0].split()\n #print(headers)\n prevEnd = 0\n units = {}\n headerCols = [re.search(header,table[0]) for header in headers]\n for colI,col in enumerate(headerCols):\n units[headers[colI]] = reWord.findall(table[1],prevEnd,col.end())\n prevEnd = col.end()\n records = []\n for row in table[2:]:\n \n prevEnd = 0\n record = {}\n for colI,col in enumerate(headerCols):\n res= reNum.findall(row,prevEnd,col.end())\n \n record[headers[colI]] = res[0] if res else None\n prevEnd = col.end()\n if record['yyyy'] != None:\n records.append(record)\n \n df = pd.DataFrame.from_dict(records)\n df[['yyyy','mm']] = df[['yyyy','mm']].astype(int)\n # other columns\n df[['tmax','tmin','af','rain','sun']] = df[['tmax','tmin','af','rain','sun']].astype(float)\n df.set_index(['yyyy', 'mm'],inplace=True)\n #print(df)\n return df\n \nimport math\ndef getDataExtras(raw,df):\n topRaw = '\\n'.join(raw[0:20])\n\n gridRef = re.findall(r'\\d+E \\d+N',topRaw)\n asml=[]\n latlon=[]\n lowerYr=[]\n upperYrMonth=[]\n upperYr=[]\n ## Extract Features\n for line in raw[0:20]:\n if re.search(gridRef[0],line):\n print(line)\n if len(gridRef) > 1 : \n yearSearch = re.search(r'([1-2][7-9,0][0-9]{2})?\\s+(\\bfrom\\b|\\bafter\\b|\\bto\\b|\\buntil\\b)\\s+([a-zA-Z]*)\\s*([1-2][7-9,0][0-9]{2})',line)\n #print(yearSearch)\n if yearSearch:\n lowerYr.append(yearSearch.group(1))\n upperYrMonth.append(yearSearch.group(3))\n upperYr.append(yearSearch.group(4))\n print('from {} to {} {}'.format(lowerYr[0],upperYrMonth[0],upperYr[0]))\n\n asml.append(re.search(r'(\\d+)\\s*m\\w*\\samsl',line).group(1))\n latlonSearch = re.search(r'lat\\s*(-*\\d+\\.\\d+) lon\\s*(-*\\d+\\.\\d+)',str.lower(line))\n if latlonSearch:\n latlon.append((latlonSearch.group(1),latlonSearch.group(2)))\n else:\n #print(\"No long lat!!\")\n latlon.append(getLatLong(gridRef[0]))\n if len(gridRef) > 1 :\n # we have site change\n if re.search(gridRef[1],line):\n print(line)\n yearSearch = re.search(r'([1-2][7-9,0][0-9]{2})?\\s+(\\bfrom\\b|\\bafter\\b|\\bto\\b)\\s+([a-zA-Z]*)\\s*([1-2][7-9,0][0-9]{2})',line)\n #print(yearSearch)\n if yearSearch:\n lowerYr.append(yearSearch.group(1))\n upperYrMonth.append(yearSearch.group(3))\n upperYr.append(yearSearch.group(4))\n print('from {} to {} {}'.format(lowerYr[-1],upperYrMonth[-1],upperYr[-1]))\n asml.append(re.search(r'(\\d+)\\s*m\\w*\\samsl',line).group(1))\n latlonSearch = re.search(r'lat\\s*(-*\\d+\\.\\d+) lon\\s*(-*\\d+\\.\\d+)',str.lower(line))\n if latlonSearch:\n latlon.append((latlonSearch.group(1),latlonSearch.group(2)))\n else:\n #print(\"No long lat!!\")\n latlon.append(getLatLong(gridRef[0]))\n #print('asml:{}\\nlatlon:{}'.format(asml,latlon))\n ## Add features to dataframe\n\n # This is wrong, but i just want to get data in there and start classify.\n # Tehcnically, we should determine site changes , which may have a significant impact on frost days if asml gets higher. \n extra_df = setExtrasInDf(df,\n df_filter= df.index.get_level_values('yyyy') > 0,\n asml=asml[0], lat=latlon[0][0],long=latlon[0][1],gridRef=gridRef[0]\n )\n \n with open('dfL.txt','a') as f:\n print(extra_df.to_string(), file=f)\n return extra_df\n if len(gridRef) >1:\n # Need to apply features using extracted years. \n #print(df.dtypes)\n tempTypeDf = df.reset_index()\n #tempTypeDf[['yyyy','mm']] = tempTypeDf[['yyyy','mm']].astype(int)\n #tempTypeDf[['tmax','tmin','af','rain','sun']] = tempTypeDf[['tmax','tmin','af','rain','sun']].astype(float)\n #defensive\n if len(lowerYr) >0 and len(upperYr) >0:\n # We were able to find SOMETHING we can use.\n print('lower: {} \\t upper: {} \\t month {}'.format(lowerYr,upperYr,upperYrMonth))\n #if upperYr[0] > lowerYr[1]: \n # print('issue')\n if len(lowerYr) == 1:\n # super simple\n #if upperYrMonth[0]:\n # \n # tempTypeDf = setExtrasInDf(tempTypeDf,\n # df_filter= tempTypeDf['yyyy']<int(upperYr[0]) or (tempTypeDf['yyyy']==int(upperYr[0]) and tempTypeDf['mm']<int(upperYrMonth[0])),\n # asml=asml[0], lat=latlon[0][0],long=latlon[0][1],gridRef=gridRef[0]\n # )\n # tempTypeDf = setExtrasInDf(tempTypeDf,\n # df_filter= tempTypeDf['yyyy']>=int(upperYr[0]) or (tempTypeDf['yyyy']==int(upperYr[0]) and tempTypeDf['mm']>=int(upperYrMonth[0])),\n # asml=asml[1], lat=latlon[1][0],long=latlon[1][1],gridRef=gridRef[1]\n # )\n #else:\n tempTypeDf = setExtrasInDf(tempTypeDf,\n df_filter= tempTypeDf['yyyy']<int(upperYr[0]),\n asml=asml[0], lat=latlon[0][0],long=latlon[0][1],gridRef=gridRef[0]\n )\n\n tempTypeDf = setExtrasInDf(tempTypeDf,\n df_filter=tempTypeDf['yyyy']>=int(upperYr[0]),\n asml=asml[1], lat=latlon[1][0],long=latlon[1][1],gridRef=gridRef[1]\n )\n #if lowerYr[0] and upperYr[0]:\n # tempTypeDf = setExtrasInDf(tempTypeDf,\n # df_filter= tempTypeDf['yyyy']>=int(lowerYr[0]) and tempTypeDf['yyyy']<int(upperYr[0]),\n # asml=asml[0], lat=latlon[0][0],long=latlon[0][1],gridRef=gridRef[0]\n # )\n # tempTypeDf = setExtrasInDf(tempTypeDf,\n # df_filter= tempTypeDf['yyyy']>=int(upperYr[0]),\n # asml=asml[1], lat=latlon[1][0],long=latlon[1][1],gridRef=gridRef[1]\n # )\n #elif upperYr[0] and lowerYr[0] == None:\n \n\n #if lowerYr[0] == None and lowerYr[1] == None:\n # if upperYr[0] and upperYr[1]:\n # # Nice simple case\n # if upperYr[0] == upperYr[1]:\n # tempTypeDf = setExtrasInDf(tempTypeDf,\n # df_filter= tempTypeDf['yyyy']<int(upperYr[1]),\n # asml=asml[0], lat=latlon[0][0],long=latlon[0][1],gridRef=gridRef[0]\n # )\n # tempTypeDf = setExtrasInDf(tempTypeDf,\n # df_filter= tempTypeDf['yyyy']>=int(upperYr[1]),\n # asml=asml[0], lat=latlon[0][0],long=latlon[0][1],gridRef=gridRef[0]\n # )\n ## TODO: \n #if upperYrMonth[0] and upperYrMonth[1] :\n#\n #elif upperYrMonth[0] and upperYrMonth[1] == None:\n #elif upperYrMonth[1]:\n #else:\n else : \n print('unable to aquire site change year. Will dump other grid refs of {} and keep only {}.'.format(gridRef[1:],gridRef[0]))\n if len(upperYr) >0 :\n #tempTypeDf = setExtrasInDf(\n # tempTypeDf,\n # df_filter= tempTypeDf['yyyy']<int(upperYr[-1]),\n # asml=asml[0], lat=latlon[0][0],long=latlon[0][1],gridRef=gridRef[0])\n #tempTypeDf = setExtrasInDf(\n # tempTypeDf,\n # df_filter= tempTypeDf['yyyy']>=int(upperYr[-1]),\n # asml=asml[-1], lat=latlon[-1][0],long=latlon[-1][1],gridRef=gridRef[-1])\n #tempTypeDf.loc[tempTypeDf['yyyy']<int(upperYr[-1]),'asml'] = int(asml[0])\n #tempTypeDf.loc[tempTypeDf['yyyy']<int(upperYr[-1]),'Lat'] = float(latlon[0][0])\n #tempTypeDf.loc[tempTypeDf['yyyy']<int(upperYr[-1]),'Long'] = float(latlon[0][1])\n #tempTypeDf.loc[tempTypeDf['yyyy']>=int(upperYr[-1]),'asml'] = int(asml[-1])\n #tempTypeDf.loc[tempTypeDf['yyyy']>=int(upperYr[-1]),'Lat'] = float(latlon[-1][0])\n #tempTypeDf.loc[tempTypeDf['yyyy']>=int(upperYr[-1]),'Long'] = float(latlon[-1][1])\n #print(len(tempTypeDf.reset_index()['yyyy'])) \n #print(len([int(x) for x in tempTypeDf.index.get_level_values('yyyy').values if (math.isnan(float(x)) == False)]))\n with open('df.txt','a') as f:\n print(tempTypeDf.to_string(), file=f)\n # print(tempTypeDf.reset_index().dropna(subset=['yyyy']).to_string(), file=f)\n #with open('df_before.txt','w') as f:\n # print(tempTypeDf.reset_index().to_string(), file=f)\n #.loc[:(upperYr[-1],),:])\n #print([int(x) for x in tempTypeDf.index.get_level_values('yyyy').values if (math.isnan(float(x)) == False and x == upperYr[-1])])\n #print([int(x) for x in tempTypeDf.index.get_level_values('yyyy').values if (math.isnan(float(x)) == False and x != upperYr[-1])])\n\ndef setExtrasInDf(df, df_filter, asml, lat, long, gridRef): \n df.loc[df_filter,'asml'] = int(asml)\n df.loc[df_filter,'lat'] = float(lat)\n df.loc[df_filter,'long'] = float(long)\n df.loc[df_filter,'gridRef'] = str(gridRef)\n return df\ndef getLatLong(gridRef):\n import requests\n page = requests.get('http://www.nearby.org.uk/coord.cgi?p='+gridRef+'&f=conv')\n #print(page.text)\n pageSearch = re.search(r'Decimal: <B>(-*\\d+\\.\\d+) (-*\\d+\\.\\d+)</B>',page.text)\n return (pageSearch.group(1),pageSearch.group(2))\nmain()" ]
[ [ "pandas.concat", "pandas.DataFrame.from_dict" ] ]
51N84D/Virtual-Try-On
[ "3b3d4f6066885446e2a6eadb6c2668237e62e03b" ]
[ "data/dataloader.py" ]
[ "# coding=utf-8\nimport torch\nimport torch.utils.data as data\nimport torchvision.transforms as transforms\nfrom torch.utils.data import DataLoader, Dataset\nfrom PIL import Image\nfrom PIL import ImageDraw\nfrom addict import Dict\nimport os.path as osp\nimport numpy as np\nimport argparse\nimport matplotlib.pyplot as plt\nimport sys\nimport cv2\nimport json\n\nclass CPDataset(data.Dataset):\n def __init__(self, opt):\n super(CPDataset, self).__init__()\n # base setting\n self.opt = opt\n\n self.dataroot = opt.data.files.base\n\n if opt.model.is_train:\n self.datamode = \"train\"\n self.data_list = opt.data.files.train\n else:\n self.datamode = \"test\"\n self.data_list = opt.data.files.test\n\n\n print(self.data_list)\n self.fine_height = opt.data.transforms.height\n self.fine_width = opt.data.transforms.width\n self.radius = opt.data.transforms.radius\n\n self.data_path = osp.join(self.dataroot, self.datamode)\n\n self.transform = transforms.Compose(\n [transforms.ToTensor(), transforms.Normalize((0.5,), (0.5,))]\n )\n\n # load data list\n im_names = []\n c_names = []\n\n with open(osp.join(self.dataroot, self.data_list), \"r\") as f:\n print(f)\n for line in f.readlines():\n im_name, c_name = line.strip().split()\n im_names.append(im_name)\n c_names.append(c_name)\n\n self.im_names = im_names\n self.c_names = c_names\n\n def name(self):\n return \"CPDataset\"\n\n def __getitem__(self, index):\n c_name = self.c_names[index]\n im_name = self.im_names[index]\n\n # cloth image & cloth mask\n c = Image.open(osp.join(self.data_path, \"cloth\", c_name))\n #c.show()\n cm = Image.open(osp.join(self.data_path, \"cloth-mask\", c_name))\n\n c = self.transform(c) # [-1,1]\n cm_array = np.array(cm)\n cm_array = (cm_array >= 128).astype(np.float32)\n cm = torch.from_numpy(cm_array) # [0,1]\n cm.unsqueeze_(0)\n\n # person image\n im = Image.open(osp.join(self.data_path, \"image\", im_name))\n im = self.transform(im) # [-1,1]\n\n # load parsing image\n parse_name = im_name.replace(\".jpg\", \".png\")\n im_parse = Image.open(osp.join(self.data_path, \"image-parse\", parse_name))\n parse_array = np.array(im_parse)\n\n # -------Find segmentation class labels manually\n #Image1 = Image.open(osp.join(self.data_path, 'image-parse', parse_name))\n Image2 = Image.open(osp.join(self.data_path, \"image\", im_name))\n\n #plt.imshow(Image1)\n #plt.imshow(parse_array, alpha=0.5)\n #plt.imshow(Image2)\n\n #plt.colorbar()\n #plt.show()\n # shirt = 126, pants = 59\n # hair = 76, face = 29\n # ------End\n\n parse_shape = (parse_array > 0).astype(np.float32)\n\n parse_cloth = (parse_array == 126).astype(np.float32)\n\n # get cropped top img\n source = Image.open(osp.join(self.data_path, \"image\", im_name))\n mask = Image.fromarray(np.uint8(255 * parse_cloth)).convert(\"L\")\n blankImg = Image.new(\"RGB\", (self.fine_height, self.fine_width), (255, 255, 255))\n\n imgCropped = Image.composite(source, blankImg, mask)\n #imgCropped.show()\n #mask.show()\n imgCropped = self.transform(imgCropped) # [-1,1]\n\n # shape downsample\n parse_shape = Image.fromarray((parse_shape * 255).astype(np.uint8))\n parse_shape = parse_shape.resize(\n (self.fine_width // 16, self.fine_height // 16), Image.BILINEAR\n )\n parse_shape = parse_shape.resize((self.fine_width, self.fine_height), Image.BILINEAR)\n shape = self.transform(parse_shape) # [-1,1]\n pcm = torch.from_numpy(parse_cloth) # [0,1]\n #plt.imshow(pcm)\n #plt.show()\n\n # clean up\n im_c = im * pcm + (1 - pcm) # [-1,1], fill 1 for other parts\n\n pcm = pcm.unsqueeze_(0) \n\n #-----pose\n pose_name = im_name.replace('.jpg', '_keypoints.json')\n with open(osp.join(self.data_path, 'pose', pose_name), 'r') as f:\n pose_label = json.load(f)\n pose_data = pose_label['people'][0]['pose_keypoints']\n pose_data = np.array(pose_data)\n pose_data = pose_data.reshape((-1,3))\n\n point_num = pose_data.shape[0]\n pose_map = torch.zeros(point_num, self.fine_height, self.fine_width)\n r = self.radius\n im_pose = Image.new('L', (self.fine_width, self.fine_height))\n pose_draw = ImageDraw.Draw(im_pose)\n for i in range(point_num):\n one_map = Image.new('L', (self.fine_width, self.fine_height))\n draw = ImageDraw.Draw(one_map)\n pointx = pose_data[i,0]\n pointy = pose_data[i,1]\n if pointx > 1 and pointy > 1:\n draw.ellipse((pointx-r, pointy-r, pointx+r, pointy+r), 'white', 'white')\n pose_draw.ellipse((pointx-r, pointy-r, pointx+r, pointy+r), 'white', 'white')\n #plt.imshow(one_map, cmap='jet', alpha=.9)\n #plt.show()\n one_map = self.transform(one_map) #[-1,1]\n pose_map[i] = one_map[0]\n\n #plt.imshow(im_pose, cmap='jet', alpha=0.5)\n #plt.show()\n\n #for i in range(18):\n # show_ = np.squeeze(pose_map[i])\n # plt.imshow(Image2)\n # plt.imshow(show_, cmap=\"jet\", alpha=.5)\n # plt.show()\n\n #just for visualization\n im_pose = self.transform(im_pose)\n\n\n result = {\n \"c_name\": c_name, # for visualization\n \"im_name\": im_name, # for visualization or ground truth\n \"pose_image\": im_pose, #visualize pose, can overlay with image for better visualization\n \"pose\": pose_map, #for input\n \"cloth\": c, # for input\n \"cloth_mask\": cm, # for input\n \"image\": imgCropped, # for visualization\n \"parse_cloth\": pcm, # was im_c # for ground truth\n \"shape\": shape, # for visualization\n }\n\n return Dict(result)\n\n def __len__(self):\n return len(self.im_names)\n\n\nclass CPDataLoader(object):\n def __init__(self, opt, dataset):\n super(CPDataLoader, self).__init__()\n\n if opt.data.loaders.shuffle:\n train_sampler = torch.utils.data.sampler.RandomSampler(dataset)\n else:\n train_sampler = None\n\n self.data_loader = torch.utils.data.DataLoader(\n dataset,\n batch_size=opt.data.loaders.batch_size,\n shuffle=(train_sampler is None),\n num_workers=opt.data.loaders.num_workers,\n pin_memory=True,\n sampler=train_sampler,\n )\n self.dataset = dataset\n self.data_iter = self.data_loader.__iter__()\n\n def next_batch(self):\n try:\n batch = self.data_iter.__next__()\n except StopIteration:\n self.data_iter = self.data_loader.__iter__()\n batch = self.data_iter.__next__()\n\n return batch\n\n\ndef get_loader(opts):\n return DataLoader(\n CPDataset(opts),\n batch_size=opts.data.loaders.get(\"batch_size\", 4),\n shuffle=True,\n num_workers=opts.data.loaders.get(\"num_workers\", 8),\n )\n" ]
[ [ "torch.utils.data.DataLoader", "torch.zeros", "torch.from_numpy", "numpy.array", "torch.utils.data.sampler.RandomSampler", "numpy.uint8" ] ]
pmohtat/PyBaMM
[ "8f0a6d82e26c19f5735ed81b55671574af29eb16" ]
[ "tests/unit/test_expression_tree/test_operations/test_jac.py" ]
[ "#\n# Tests for the jacobian methods\n#\nimport pybamm\n\nimport numpy as np\nimport unittest\nfrom scipy.sparse import eye\nfrom tests import get_mesh_for_testing\n\n\ndef test_multi_var_function(arg1, arg2):\n return arg1 + arg2\n\n\nclass TestJacobian(unittest.TestCase):\n def test_variable_is_statevector(self):\n a = pybamm.Symbol(\"a\")\n with self.assertRaisesRegex(\n TypeError, \"Jacobian can only be taken with respect to a 'StateVector'\"\n ):\n a.jac(a)\n\n def test_linear(self):\n y = pybamm.StateVector(slice(0, 4))\n u = pybamm.StateVector(slice(0, 2))\n v = pybamm.StateVector(slice(2, 4))\n\n y0 = np.ones(4)\n\n func = u\n jacobian = np.array([[1, 0, 0, 0], [0, 1, 0, 0]])\n dfunc_dy = func.jac(y).evaluate(y=y0)\n np.testing.assert_array_equal(jacobian, dfunc_dy.toarray())\n\n func = -v\n jacobian = np.array([[0, 0, -1, 0], [0, 0, 0, -1]])\n dfunc_dy = func.jac(y).evaluate(y=y0)\n np.testing.assert_array_equal(jacobian, dfunc_dy.toarray())\n\n func = 3 * u + 4 * v\n jacobian = np.array([[3, 0, 4, 0], [0, 3, 0, 4]])\n dfunc_dy = func.jac(y).evaluate(y=y0)\n np.testing.assert_array_equal(jacobian, dfunc_dy.toarray())\n\n func = 7 * u - v * 9\n jacobian = np.array([[7, 0, -9, 0], [0, 7, 0, -9]])\n dfunc_dy = func.jac(y).evaluate(y=y0)\n np.testing.assert_array_equal(jacobian, dfunc_dy.toarray())\n\n A = pybamm.Matrix(2 * eye(2))\n func = A @ u\n jacobian = np.array([[2, 0, 0, 0], [0, 2, 0, 0]])\n dfunc_dy = func.jac(y).simplify().evaluate(y=y0)\n np.testing.assert_array_equal(jacobian, dfunc_dy.toarray())\n\n func = u @ pybamm.StateVector(slice(0, 1))\n with self.assertRaises(NotImplementedError):\n func.jac(y)\n\n # when differentiating by independent part of the state vector\n jacobian = np.array([[0, 0], [0, 0]])\n du_dv = u.jac(v).evaluate().toarray()\n np.testing.assert_array_equal(du_dv, jacobian)\n\n def test_nonlinear(self):\n y = pybamm.StateVector(slice(0, 4))\n u = pybamm.StateVector(slice(0, 2))\n v = pybamm.StateVector(slice(2, 4))\n\n y0 = np.array([1, 2, 3, 4])\n\n func = v ** 2\n jacobian = np.array([[0, 0, 6, 0], [0, 0, 0, 8]])\n dfunc_dy = func.jac(y).evaluate(y=y0)\n np.testing.assert_array_equal(jacobian, dfunc_dy.toarray())\n\n func = 2 ** v\n jacobian = np.array(\n [[0, 0, 2 ** 3 * np.log(2), 0], [0, 0, 0, 2 ** 4 * np.log(2)]]\n )\n dfunc_dy = func.jac(y).evaluate(y=y0)\n np.testing.assert_array_equal(jacobian, dfunc_dy.toarray())\n\n func = v ** v\n jacobian = [[0, 0, 27 * (1 + np.log(3)), 0], [0, 0, 0, 256 * (1 + np.log(4))]]\n dfunc_dy = func.jac(y).evaluate(y=y0)\n np.testing.assert_array_almost_equal(jacobian, dfunc_dy.toarray())\n\n func = u * v\n jacobian = np.array([[3, 0, 1, 0], [0, 4, 0, 2]])\n dfunc_dy = func.jac(y).evaluate(y=y0)\n np.testing.assert_array_equal(jacobian, dfunc_dy.toarray())\n\n func = u * (u + v)\n jacobian = np.array([[5, 0, 1, 0], [0, 8, 0, 2]])\n dfunc_dy = func.jac(y).evaluate(y=y0)\n np.testing.assert_array_equal(jacobian, dfunc_dy.toarray())\n\n func = 1 / u + v / 3\n jacobian = np.array([[-1, 0, 1 / 3, 0], [0, -1 / 4, 0, 1 / 3]])\n dfunc_dy = func.jac(y).evaluate(y=y0)\n np.testing.assert_array_equal(jacobian, dfunc_dy.toarray())\n\n func = u / v\n jacobian = np.array([[1 / 3, 0, -1 / 9, 0], [0, 1 / 4, 0, -1 / 8]])\n dfunc_dy = func.jac(y).evaluate(y=y0)\n np.testing.assert_array_equal(jacobian, dfunc_dy.toarray())\n\n func = v / (1 + v)\n jacobian = np.array([[0, 0, 1 / 16, 0], [0, 0, 0, 1 / 25]])\n dfunc_dy = func.jac(y).evaluate(y=y0)\n np.testing.assert_array_equal(jacobian, dfunc_dy.toarray())\n\n def test_multislice_raises(self):\n y1 = pybamm.StateVector(slice(0, 4), slice(7, 8))\n y_dot1 = pybamm.StateVectorDot(slice(0, 4), slice(7, 8))\n y2 = pybamm.StateVector(slice(4, 7))\n with self.assertRaises(NotImplementedError):\n y1.jac(y1)\n with self.assertRaises(NotImplementedError):\n y2.jac(y1)\n with self.assertRaises(NotImplementedError):\n y_dot1.jac(y1)\n\n def test_linear_ydot(self):\n y = pybamm.StateVector(slice(0, 4))\n y_dot = pybamm.StateVectorDot(slice(0, 4))\n u = pybamm.StateVector(slice(0, 2))\n v = pybamm.StateVector(slice(2, 4))\n u_dot = pybamm.StateVectorDot(slice(0, 2))\n v_dot = pybamm.StateVectorDot(slice(2, 4))\n\n y0 = np.ones(4)\n y_dot0 = np.ones(4)\n\n func = u_dot\n jacobian = np.array([[1, 0, 0, 0], [0, 1, 0, 0]])\n dfunc_dy = func.jac(y_dot).evaluate(y=y0, y_dot=y_dot0)\n np.testing.assert_array_equal(jacobian, dfunc_dy.toarray())\n\n func = -v_dot\n jacobian = np.array([[0, 0, -1, 0], [0, 0, 0, -1]])\n dfunc_dy = func.jac(y_dot).evaluate(y=y0, y_dot=y_dot0)\n np.testing.assert_array_equal(jacobian, dfunc_dy.toarray())\n\n func = u_dot\n jacobian = np.array([[0, 0, 0, 0], [0, 0, 0, 0]])\n dfunc_dy = func.jac(y).evaluate(y=y0, y_dot=y_dot0)\n np.testing.assert_array_equal(jacobian, dfunc_dy.toarray())\n\n func = -v_dot\n jacobian = np.array([[0, 0, 0, 0], [0, 0, 0, 0]])\n dfunc_dy = func.jac(y).evaluate(y=y0, y_dot=y_dot0)\n np.testing.assert_array_equal(jacobian, dfunc_dy.toarray())\n\n func = u\n jacobian = np.array([[0, 0, 0, 0], [0, 0, 0, 0]])\n dfunc_dy = func.jac(y_dot).evaluate(y=y0, y_dot=y_dot0)\n np.testing.assert_array_equal(jacobian, dfunc_dy.toarray())\n\n func = -v\n jacobian = np.array([[0, 0, 0, 0], [0, 0, 0, 0]])\n dfunc_dy = func.jac(y_dot).evaluate(y=y0, y_dot=y_dot0)\n np.testing.assert_array_equal(jacobian, dfunc_dy.toarray())\n\n def test_functions(self):\n y = pybamm.StateVector(slice(0, 4))\n u = pybamm.StateVector(slice(0, 2))\n v = pybamm.StateVector(slice(2, 4))\n const = pybamm.Scalar(1)\n\n y0 = np.array([1.0, 2.0, 3.0, 4.0])\n\n func = pybamm.sin(u)\n jacobian = np.array([[np.cos(1), 0, 0, 0], [0, np.cos(2), 0, 0]])\n dfunc_dy = func.jac(y).evaluate(y=y0)\n np.testing.assert_array_equal(jacobian, dfunc_dy.toarray())\n\n func = pybamm.cos(v)\n jacobian = np.array([[0, 0, -np.sin(3), 0], [0, 0, 0, -np.sin(4)]])\n dfunc_dy = func.jac(y).evaluate(y=y0)\n np.testing.assert_array_equal(jacobian, dfunc_dy.toarray())\n\n func = pybamm.sin(3 * u * v)\n jacobian = np.array(\n [\n [9 * np.cos(9), 0, 3 * np.cos(9), 0],\n [0, 12 * np.cos(24), 0, 6 * np.cos(24)],\n ]\n )\n dfunc_dy = func.jac(y).evaluate(y=y0)\n np.testing.assert_array_equal(jacobian, dfunc_dy.toarray())\n\n func = pybamm.cos(5 * pybamm.exp(u + v))\n jacobian = np.array(\n [\n [\n -5 * np.exp(4) * np.sin(5 * np.exp(4)),\n 0,\n -5 * np.exp(4) * np.sin(5 * np.exp(4)),\n 0,\n ],\n [\n 0,\n -5 * np.exp(6) * np.sin(5 * np.exp(6)),\n 0,\n -5 * np.exp(6) * np.sin(5 * np.exp(6)),\n ],\n ]\n )\n dfunc_dy = func.jac(y).evaluate(y=y0)\n np.testing.assert_array_equal(jacobian, dfunc_dy.toarray())\n\n # when child evaluates to number\n func = pybamm.sin(const)\n dfunc_dy = func.jac(y).evaluate(y=y0)\n np.testing.assert_array_equal(0, dfunc_dy)\n\n # several children\n func = pybamm.Function(test_multi_var_function, 2 * y, 3 * y)\n jacobian = np.diag(5 * np.ones(4))\n dfunc_dy = func.jac(y).evaluate(y=y0)\n np.testing.assert_array_equal(jacobian, dfunc_dy.toarray())\n\n def test_index(self):\n vec = pybamm.StateVector(slice(0, 5))\n ind = pybamm.Index(vec, 3)\n jac = ind.jac(vec).evaluate(y=np.linspace(0, 2, 5)).toarray()\n np.testing.assert_array_equal(jac, np.array([[0, 0, 0, 1, 0]]))\n\n # jac of ind of something that isn't a StateVector should return zeros\n const_vec = pybamm.Vector(np.ones(3))\n ind = pybamm.Index(const_vec, 2)\n jac = ind.jac(vec).evaluate(y=np.linspace(0, 2, 5)).toarray()\n np.testing.assert_array_equal(jac, np.array([[0, 0, 0, 0, 0]]))\n\n def test_jac_of_number(self):\n \"Jacobian of a number should be zero\"\n a = pybamm.Scalar(1)\n b = pybamm.Scalar(2)\n\n y = pybamm.StateVector(slice(0, 1))\n\n self.assertEqual(a.jac(y).evaluate(), 0)\n\n add = a + b\n self.assertEqual(add.jac(y).evaluate(), 0)\n\n subtract = a - b\n self.assertEqual(subtract.jac(y).evaluate(), 0)\n\n multiply = a * b\n self.assertEqual(multiply.jac(y).evaluate(), 0)\n\n divide = a / b\n self.assertEqual(divide.jac(y).evaluate(), 0)\n\n power = a ** b\n self.assertEqual(power.jac(y).evaluate(), 0)\n\n def test_jac_of_symbol(self):\n a = pybamm.Symbol(\"a\")\n y = pybamm.StateVector(slice(0, 1))\n with self.assertRaises(NotImplementedError):\n a.jac(y)\n\n def test_spatial_operator(self):\n a = pybamm.Variable(\"a\")\n b = pybamm.SpatialOperator(\"Operator\", a)\n y = pybamm.StateVector(slice(0, 1))\n with self.assertRaises(NotImplementedError):\n b.jac(y)\n\n def test_jac_of_unary_operator(self):\n a = pybamm.Scalar(1)\n b = pybamm.UnaryOperator(\"Operator\", a)\n y = pybamm.StateVector(slice(0, 1))\n with self.assertRaises(NotImplementedError):\n b.jac(y)\n\n def test_jac_of_independent_variable(self):\n a = pybamm.IndependentVariable(\"Variable\")\n y = pybamm.StateVector(slice(0, 1))\n self.assertEqual(a.jac(y).evaluate(), 0)\n\n def test_jac_of_inner(self):\n a = pybamm.Scalar(1)\n b = pybamm.Scalar(2)\n y = pybamm.StateVector(slice(0, 1))\n self.assertEqual(pybamm.inner(a, b).jac(y).evaluate(), 0)\n self.assertEqual(pybamm.inner(a, y).jac(y).evaluate(), 1)\n self.assertEqual(pybamm.inner(y, b).jac(y).evaluate(), 2)\n vec = pybamm.StateVector(slice(0, 2))\n jac = pybamm.inner(a * vec, b * vec).jac(vec).evaluate(y=np.ones(2)).toarray()\n np.testing.assert_array_equal(jac, 4 * np.eye(2))\n\n def test_jac_of_heaviside(self):\n a = pybamm.Scalar(1)\n y = pybamm.StateVector(slice(0, 5))\n np.testing.assert_array_equal(\n ((a < y) * y ** 2).jac(y).evaluate(y=5 * np.ones(5)), 10 * np.eye(5)\n )\n np.testing.assert_array_equal(\n ((a < y) * y ** 2).jac(y).evaluate(y=-5 * np.ones(5)), 0\n )\n\n def test_jac_of_minimum_maximum(self):\n y = pybamm.StateVector(slice(0, 10))\n y_test = np.linspace(0, 2, 10)\n np.testing.assert_array_equal(\n np.diag(pybamm.minimum(1, y ** 2).jac(y).evaluate(y=y_test)),\n 2 * y_test * (y_test < 1),\n )\n np.testing.assert_array_equal(\n np.diag(pybamm.maximum(1, y ** 2).jac(y).evaluate(y=y_test)),\n 2 * y_test * (y_test > 1),\n )\n\n def test_jac_of_abs(self):\n y = pybamm.StateVector(slice(0, 10))\n absy = abs(y)\n jac = absy.jac(y)\n y_test = np.linspace(-2, 2, 10)\n np.testing.assert_array_equal(\n np.diag(jac.evaluate(y=y_test).toarray()), np.sign(y_test)\n )\n\n def test_jac_of_sign(self):\n y = pybamm.StateVector(slice(0, 10))\n func = pybamm.sign(y) * y\n jac = func.jac(y)\n y_test = np.linspace(-2, 2, 10)\n np.testing.assert_array_equal(np.diag(jac.evaluate(y=y_test)), np.sign(y_test))\n\n def test_jac_of_domain_concatenation(self):\n # create mesh\n mesh = get_mesh_for_testing()\n y = pybamm.StateVector(slice(0, 100))\n\n # Jacobian of a DomainConcatenation of constants is a zero matrix of the\n # appropriate size\n a_dom = [\"negative electrode\"]\n b_dom = [\"separator\"]\n c_dom = [\"positive electrode\"]\n a_npts = mesh[a_dom[0]][0].npts\n b_npts = mesh[b_dom[0]][0].npts\n c_npts = mesh[c_dom[0]][0].npts\n a = 2 * pybamm.Vector(np.ones(a_npts), domain=a_dom)\n b = pybamm.Vector(np.ones(b_npts), domain=b_dom)\n c = 3 * pybamm.Vector(np.ones(c_npts), domain=c_dom)\n\n conc = pybamm.DomainConcatenation([a, b, c], mesh)\n jac = conc.jac(y).evaluate().toarray()\n np.testing.assert_array_equal(jac, np.zeros((100, 100)))\n\n # Jacobian of a DomainConcatenation of StateVectors\n a = 2 * pybamm.StateVector(slice(0, a_npts), domain=a_dom)\n b = pybamm.StateVector(slice(a_npts, a_npts + b_npts), domain=b_dom)\n c = 3 * pybamm.StateVector(\n slice(a_npts + b_npts, a_npts + b_npts + c_npts), domain=c_dom\n )\n conc = pybamm.DomainConcatenation([a, b, c], mesh)\n\n y0 = np.ones(100)\n jac = conc.jac(y).evaluate(y=y0).toarray()\n np.testing.assert_array_equal(\n jac,\n np.diag(\n np.concatenate(\n [2 * np.ones(a_npts), np.ones(b_npts), 3 * np.ones(c_npts)]\n )\n ),\n )\n\n # multi=domain case not implemented\n a = 2 * pybamm.StateVector(slice(0, a_npts), domain=a_dom)\n b = pybamm.StateVector(\n slice(a_npts, a_npts + b_npts + c_npts), domain=b_dom + c_dom\n )\n conc = pybamm.DomainConcatenation([a, b], mesh)\n with self.assertRaisesRegex(\n NotImplementedError, \"jacobian only implemented for when each child has\"\n ):\n conc.jac(y)\n\n\nif __name__ == \"__main__\":\n print(\"Add -v for more debug output\")\n import sys\n\n if \"-v\" in sys.argv:\n debug = True\n pybamm.settings.debug_mode = True\n unittest.main()\n" ]
[ [ "numpy.ones", "numpy.eye", "numpy.sign", "numpy.zeros", "numpy.testing.assert_array_equal", "numpy.cos", "numpy.exp", "scipy.sparse.eye", "numpy.log", "numpy.array", "numpy.sin", "numpy.linspace" ] ]
KhelmholtzR/ProgLearn
[ "f5177c720e53d2f5936272998b94e0746135a3b9" ]
[ "proglearn/transformers.py" ]
[ "\"\"\"\nMain Author: Will LeVine\nCorresponding Email: [email protected]\n\"\"\"\nfrom tensorflow import keras\nimport numpy as np\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.utils.validation import check_array, check_is_fitted, check_X_y\n\nfrom .base import BaseTransformer\n\n\nclass NeuralClassificationTransformer(BaseTransformer):\n \"\"\"\n A class used to transform data from a category to a specialized representation.\n\n Parameters\n ----------\n network : object\n A neural network used in the classification transformer.\n\n euclidean_layer_idx : int\n An integer to represent the final layer of the transformer.\n\n optimizer : str or keras.optimizers instance\n An optimizer used when compiling the neural network.\n\n loss : str, default=\"categorical_crossentropy\"\n A loss function used when compiling the neural network.\n\n pretrained : bool, default=False\n A boolean used to identify if the network is pretrained.\n\n compile_kwargs : dict, default={\"metrics\": [\"acc\"]}\n A dictionary containing metrics for judging network performance.\n\n fit_kwargs : dict, default={\n \"epochs\": 100,\n \"callbacks\": [keras.callbacks.EarlyStopping(patience=5, monitor=\"val_acc\")],\n \"verbose\": False,\n \"validation_split\": 0.33,\n },\n A dictionary to hold epochs, callbacks, verbose, and validation split for the network.\n\n Attributes\n ----------\n encoder_ : object\n A Keras model with inputs and outputs based on the network attribute.\n Output layers are determined by the euclidean_layer_idx parameter.\n\n fitted_ : boolean\n A boolean flag initialized after the model is fitted.\n \"\"\"\n\n def __init__(\n self,\n network,\n euclidean_layer_idx,\n optimizer,\n loss=\"categorical_crossentropy\",\n pretrained=False,\n compile_kwargs={\"metrics\": [\"acc\"]},\n fit_kwargs={\n \"epochs\": 100,\n \"callbacks\": [keras.callbacks.EarlyStopping(patience=5, monitor=\"val_acc\")],\n \"verbose\": False,\n \"validation_split\": 0.33,\n },\n ):\n self.network = keras.models.clone_model(network)\n self.encoder_ = keras.models.Model(\n inputs=self.network.inputs,\n outputs=self.network.layers[euclidean_layer_idx].output,\n )\n self.pretrained = pretrained\n self.optimizer = optimizer\n self.loss = loss\n self.compile_kwargs = compile_kwargs\n self.fit_kwargs = fit_kwargs\n\n def fit(self, X, y):\n \"\"\"\n Fits the transformer to data X with labels y.\n\n Parameters\n ----------\n X : ndarray\n Input data matrix.\n y : ndarray\n Output (i.e. response data matrix).\n\n Returns\n -------\n self : NeuralClassificationTransformer\n The object itself.\n \"\"\"\n check_X_y(X, y, ensure_2d=False, allow_nd=True)\n _, y = np.unique(y, return_inverse=True)\n\n self.network.compile(\n loss=self.loss, optimizer=self.optimizer, **self.compile_kwargs\n )\n\n self.network.fit(X, keras.utils.to_categorical(y), **self.fit_kwargs)\n self.fitted_ = True\n\n return self\n\n def transform(self, X):\n \"\"\"\n Performs inference using the transformer.\n\n Parameters\n ----------\n X : ndarray\n Input data matrix.\n\n Returns\n -------\n X_transformed : ndarray\n The transformed input.\n\n Raises\n ------\n NotFittedError\n When the model is not fitted.\n \"\"\"\n check_array(X, ensure_2d=False, allow_nd=True)\n check_is_fitted(self, attributes=\"fitted_\")\n return self.encoder_.predict(X)\n\n\nclass TreeClassificationTransformer(BaseTransformer):\n \"\"\"\n A class used to transform data from a category to a specialized representation.\n\n Parameters\n ----------\n kwargs : dict, default={}\n A dictionary to contain parameters of the tree.\n\n Attributes\n ----------\n transformer : sklearn.tree.DecisionTreeClassifier\n an internal sklearn DecisionTreeClassifier\n \"\"\"\n\n def __init__(self, kwargs={}):\n self.kwargs = kwargs\n\n def fit(self, X, y):\n \"\"\"\n Fits the transformer to data X with labels y.\n\n Parameters\n ----------\n X : ndarray\n Input data matrix.\n y : ndarray\n Output (i.e. response data matrix).\n\n Returns\n -------\n self : TreeClassificationTransformer\n The object itself.\n \"\"\"\n X, y = check_X_y(X, y)\n self.transformer_ = DecisionTreeClassifier(**self.kwargs).fit(X, y)\n return self\n\n def transform(self, X):\n \"\"\"\n Performs inference using the transformer.\n\n Parameters\n ----------\n X : ndarray\n Input data matrix.\n\n Returns\n -------\n X_transformed : ndarray\n The transformed input.\n\n Raises\n ------\n NotFittedError\n When the model is not fitted.\n \"\"\"\n X = check_array(X)\n check_is_fitted(self)\n return self.transformer_.apply(X)\n" ]
[ [ "sklearn.utils.validation.check_is_fitted", "tensorflow.keras.utils.to_categorical", "sklearn.tree.DecisionTreeClassifier", "tensorflow.keras.models.Model", "tensorflow.keras.models.clone_model", "tensorflow.keras.callbacks.EarlyStopping", "sklearn.utils.validation.check_array", "sklearn.utils.validation.check_X_y", "numpy.unique" ] ]
AndreyBuyanov/ImageProcessing.Lb5.TextureSegmentation
[ "1509817ee2719573b04eba6f49154d7b38af853d" ]
[ "App.py" ]
[ "from PyQt5 import QtWidgets, uic\nfrom PyQt5.QtGui import QImage, QPixmap, QPalette, qRgb, qGray\nimport sys\nimport numpy as np\nfrom typing import Callable\nfrom numbers import Number\n\n\ndef process_image(\n input_image: np.array,\n kernel_size: int,\n kernel_fn: Callable[[np.array], float]) -> np.array:\n padding_width: int = kernel_size // 2\n padding_height: int = kernel_size // 2\n padding = ((padding_height, padding_height), (padding_width, padding_width))\n input_image_padding: np.array = np.pad(\n array=input_image,\n pad_width=padding,\n mode='edge')\n result_image: np.array = np.zeros(input_image.shape, dtype='float')\n image_height, image_width = result_image.shape\n for image_x in range(image_width):\n for image_y in range(image_height):\n x_pos_begin = image_x\n x_pos_end = image_x + kernel_size\n y_pos_begin = image_y\n y_pos_end = image_y + kernel_size\n image_segment: np.array = input_image_padding[y_pos_begin:y_pos_end, x_pos_begin:x_pos_end]\n result_image[image_y][image_x] = kernel_fn(image_segment)\n return result_image\n\n\ndef mean_fn(\n image_segment: np.array) -> float:\n return float(np.mean(image_segment))\n\n\ndef std_fn(\n image_segment: np.array) -> float:\n return float(np.std(image_segment))\n\n\ndef convert_to_binary(\n input_image: np.array,\n threshold: int = 127) -> np.array:\n max_val: int = 255\n min_val: int = 0\n initial_conv: np.array = np.where((input_image <= threshold), input_image, max_val)\n final_conv: np.array = np.where((initial_conv > threshold), initial_conv, min_val)\n return final_conv\n\n\ndef normalize_image(\n input_image: np.array) -> np.array:\n result_image: np.array = np.zeros(input_image.shape)\n input_max = input_image.max()\n input_min = input_image.min()\n input_range = input_max - input_min\n height, width = input_image.shape\n for y in range(height):\n for x in range(width):\n input_value = input_image[y][x]\n scaled_input_value = (input_value - input_min) / input_range if input_range != 0 else 0\n result_image[y][x] = scaled_input_value * 255.0\n return result_image\n\n\ndef fill_image(\n input_image: np.array,\n value: Number,\n replace_value: Number):\n height, width = input_image.shape\n for y in range(height):\n for x in range(width):\n if input_image[y, x] == value:\n input_image[y, x] = replace_value\n\n\ndef mark_objects(\n input_image: np.array) -> np.array:\n result_image: np.array = np.copy(input_image)\n current_object_id = 1\n height, width = input_image.shape\n for y in range(height):\n for x in range(width):\n if y == 0:\n c = 0\n else:\n c = result_image[y - 1, x]\n if x == 0:\n b = 0\n else:\n b = result_image[y, x - 1]\n a = result_image[y, x]\n if a == 0:\n pass\n elif b == 0 and c == 0:\n current_object_id += 1\n result_image[y, x] = current_object_id\n elif b != 0 and c == 0:\n result_image[y, x] = b\n elif b == 0 and c != 0:\n result_image[y, x] = c\n elif b != 0 and c != 0:\n if b == c:\n result_image[y, x] = b\n else:\n result_image[y, x] = b\n fill_image(\n input_image=result_image,\n value=c,\n replace_value=b)\n return result_image\n\n\ndef delete_objects(\n input_image: np.array,\n object_size: int):\n unique_mask, hist = np.unique(input_image, return_counts=True)\n for i in range(1, len(unique_mask)):\n if hist[i] < object_size:\n for (y, x), _ in np.ndenumerate(input_image):\n if input_image[y, x] == unique_mask[i]:\n input_image[y, x] = 0\n\n\nclass Ui(QtWidgets.QMainWindow):\n def __init__(self):\n super(Ui, self).__init__()\n uic.loadUi('Main.ui', self)\n\n self.action_open = self.findChild(QtWidgets.QAction, 'actionOpen')\n self.action_open.triggered.connect(self.action_open_triggered)\n\n self.action_exit = self.findChild(QtWidgets.QAction, 'actionExit')\n self.action_exit.triggered.connect(self.action_exit_triggered)\n\n self.bt_apply = self.findChild(QtWidgets.QPushButton, 'btApply')\n self.bt_apply.clicked.connect(self.bt_apply_pressed)\n\n self.input_image_canvas = QtWidgets.QLabel()\n self.input_image_canvas.setBackgroundRole(QPalette.Base)\n self.input_image_canvas.setSizePolicy(\n QtWidgets.QSizePolicy.Ignored,\n QtWidgets.QSizePolicy.Ignored)\n self.input_image_canvas.setScaledContents(True)\n self.sa_input_image = self.findChild(QtWidgets.QScrollArea, 'saInputImage')\n self.sa_input_image.setWidget(self.input_image_canvas)\n self.sa_input_image.setWidgetResizable(False)\n\n self.processed_image_canvas = QtWidgets.QLabel()\n self.processed_image_canvas.setBackgroundRole(QPalette.Base)\n self.processed_image_canvas.setSizePolicy(\n QtWidgets.QSizePolicy.Ignored,\n QtWidgets.QSizePolicy.Ignored)\n self.processed_image_canvas.setScaledContents(True)\n self.sa_processed_image = self.findChild(QtWidgets.QScrollArea, 'saProcessedImage')\n self.sa_processed_image.setWidget(self.processed_image_canvas)\n self.sa_processed_image.setWidgetResizable(False)\n\n self.mask_image_canvas = QtWidgets.QLabel()\n self.mask_image_canvas.setBackgroundRole(QPalette.Base)\n self.mask_image_canvas.setSizePolicy(\n QtWidgets.QSizePolicy.Ignored,\n QtWidgets.QSizePolicy.Ignored)\n self.mask_image_canvas.setScaledContents(True)\n self.sa_mask_image = self.findChild(QtWidgets.QScrollArea, 'saMask')\n self.sa_mask_image.setWidget(self.mask_image_canvas)\n self.sa_mask_image.setWidgetResizable(False)\n\n self.segmented_image_canvas = QtWidgets.QLabel()\n self.segmented_image_canvas.setBackgroundRole(QPalette.Base)\n self.segmented_image_canvas.setSizePolicy(\n QtWidgets.QSizePolicy.Ignored,\n QtWidgets.QSizePolicy.Ignored)\n self.segmented_image_canvas.setScaledContents(True)\n self.sa_segmented_image = self.findChild(QtWidgets.QScrollArea, 'saSegmentedImage')\n self.sa_segmented_image.setWidget(self.segmented_image_canvas)\n self.sa_segmented_image.setWidgetResizable(False)\n\n self.cb_method = self.findChild(QtWidgets.QComboBox, 'cbMethod')\n self.cb_method.addItems(['Mean', 'Std'])\n\n self.le_kernel_size = self.findChild(QtWidgets.QLineEdit, 'leKernelSize')\n\n self.le_threshold = self.findChild(QtWidgets.QLineEdit, 'leThreshold')\n\n self.le_delete_objects = self.findChild(QtWidgets.QLineEdit, 'leDeleteObjects')\n\n self.show()\n\n def action_open_triggered(self):\n options = QtWidgets.QFileDialog.Options()\n file_name, _ = QtWidgets.QFileDialog.\\\n getOpenFileName(self,\n 'QFileDialog.getOpenFileName()',\n '',\n 'Images (*.png *.jpeg *.jpg *.bmp *.gif)',\n options=options)\n if file_name:\n image = QImage(file_name).convertToFormat(QImage.Format_Grayscale8)\n if image.isNull():\n QtWidgets.QMessageBox.\\\n information(self,\n \"Texture segmentation\",\n \"Cannot load %s.\" % file_name)\n return\n\n self.input_image_canvas.setPixmap(QPixmap.fromImage(image))\n self.input_image_canvas.adjustSize()\n\n def action_exit_triggered(self):\n self.close()\n\n def bt_apply_pressed(self):\n method = self.cb_method.currentIndex()\n kernel_size = int(self.le_kernel_size.text())\n threshold = int(self.le_threshold.text())\n object_size = int(self.le_delete_objects.text())\n\n input_q_image = self.input_image_canvas.pixmap().toImage().convertToFormat(QImage.Format_Grayscale8)\n input_image = np.zeros((input_q_image.height(), input_q_image.width()), dtype='float')\n for (y, x), _ in np.ndenumerate(input_image):\n input_image[y, x] = qGray(input_q_image.pixel(x, y))\n\n if method == 0:\n kernel_fn = mean_fn\n elif method == 1:\n kernel_fn = std_fn\n else:\n return\n processed_image: np.array = process_image(\n input_image=input_image,\n kernel_size=kernel_size,\n kernel_fn=kernel_fn)\n normalized_image: np.array = normalize_image(input_image=processed_image)\n binarized_image: np.array = convert_to_binary(input_image=normalized_image, threshold=threshold)\n marked_image = mark_objects(input_image=binarized_image)\n delete_objects(\n input_image=marked_image,\n object_size=object_size)\n segmented_image = np.copy(input_image)\n for (y, x), _ in np.ndenumerate(segmented_image):\n if marked_image[y, x] == 0:\n segmented_image[y, x] = 0\n self.set_image(\n input_image=normalized_image,\n canvas=self.processed_image_canvas)\n self.set_image(\n input_image=normalize_image(\n input_image=marked_image),\n canvas=self.mask_image_canvas)\n self.set_image(\n input_image=segmented_image,\n canvas=self.segmented_image_canvas)\n\n @staticmethod\n def set_image(input_image: np.array, canvas: QtWidgets.QLineEdit):\n height, width = input_image.shape\n q_image = QImage(width, height, QImage.Format_RGB32)\n for y in range(height):\n for x in range(width):\n pixel = int(input_image[y, x])\n q_image.setPixel(x, y, qRgb(pixel, pixel, pixel))\n canvas.setPixmap(QPixmap.fromImage(q_image))\n canvas.adjustSize()\n\n\nif __name__ == '__main__':\n app = QtWidgets.QApplication(sys.argv)\n window = Ui()\n app.exec_()\n" ]
[ [ "numpy.zeros", "numpy.pad", "numpy.copy", "numpy.ndenumerate", "numpy.std", "numpy.where", "numpy.unique", "numpy.mean" ] ]
BUTSpeechFIT/ASR_Transformer
[ "814f720aa8265e9a377869f93dc65b251338e985" ]
[ "Transformer_training_V2.py" ]
[ "#!/usr/bin/python\nimport sys\nimport os\nimport subprocess\nfrom os.path import join, isdir\nimport torch\n\n\n#*************************************************************************************************************************\n####### Loading the Parser and default arguments\n#import pdb;pdb.set_trace()\n\n#sys.path.insert(0,'/mnt/matylda3/vydana/HOW2_EXP/Gen_V1/ATTNCODE/Trans_V1')\nsys.path.insert(0,'/mnt/matylda3/vydana/HOW2_EXP/ASR_Transformer/ASR_TransV1')\nimport Transformer_arg\nfrom Transformer_arg import parser\nargs = parser.parse_args()\n\n#************************\nimport Set_gpus\nfrom Set_gpus import Set_gpu\nif args.gpu:\n Set_gpu()\n\n#import safe_gpu\n#from safe_gpu import safe_gpu\n#gpu_owner = safe_gpu.GPUOwner()\n#***********************\n\nimport numpy as np\nimport fileinput\nimport json\nimport random\nfrom itertools import chain\nfrom numpy.random import permutation\n##------------------------------------------------------------------\n#import torch\nfrom torch.autograd import Variable\n#----------------------------------------\nimport torch.nn as nn\nfrom torch import autograd, nn, optim\nos.environ['PYTHONUNBUFFERED'] = '0'\nimport torch.nn.parallel\nimport torch.backends.cudnn as cudnn\nimport torch.optim as optim\n\nfrom random import shuffle\nfrom statistics import mean\nimport matplotlib\nimport matplotlib.pyplot as plt\nplt.switch_backend('agg')\nmatplotlib.pyplot.viridis()\nimport glob\n\n###save architecture for decoding\nmodel_path_name=join(args.model_dir,'model_architecture_')\nwith open(model_path_name, 'w') as f:\n json.dump(args.__dict__, f, indent=2)\nprint(args)\nsys.path.insert(0,'/mnt/matylda3/vydana/HOW2_EXP/ASR_Transformer/ASR_TransV1')\n# #####setting the gpus in the gpu cluster\n# #**********************************\n#import Set_gpus\n#from Set_gpus import Set_gpu\n#if args.gpu:\n# Set_gpu()\n \n###----------------------------------------\nfrom Dataloader_for_AM_v2 import DataLoader\nfrom utils__ import weights_init,reduce_learning_rate,read_as_list,gaussian_noise,plotting\n#==============================================================\nsys.path.insert(0,'/mnt/matylda3/vydana/HOW2_EXP/ASR_Transformer/ASR_TransV1')\nfrom TRANSFORMER_ASR_V1 import Transformer\nfrom Initializing_Transformer_ASR import Initialize_Att_model\nfrom Transformer_Training_loop import train_val_model\nfrom Load_sp_model import Load_sp_models\n##==================================\n#==============================================================\nif not isdir(args.model_dir):\n os.makedirs(args.model_dir)\n\npng_dir=args.model_dir+'_png'\nif not isdir(png_dir):\n os.makedirs(png_dir)\n############################################\n#=============================================================\ndef main():\n ##Load setpiece models for Dataloaders\n Word_model=Load_sp_models(args.Word_model_path)\n Char_model=Load_sp_models(args.Char_model_path)\n ###initilize the model\n model,optimizer=Initialize_Att_model(args)\n #============================================================\n #------------------------------------------------------------ \n #\n train_gen = DataLoader(files=glob.glob(args.data_dir + \"train_splits/*\"),\n max_batch_label_len=args.max_batch_label_len,\n max_batch_len=args.max_batch_len,\n max_feat_len=args.max_feat_len,\n max_label_len=args.max_label_len,\n Word_model=Word_model,\n Char_model=Char_model,\n apply_cmvn=int(args.apply_cmvn))\n\n dev_gen = DataLoader(files=glob.glob(args.data_dir + \"dev_splits/*\"),\n max_batch_label_len=2000,\n max_batch_len=args.max_batch_len,\n max_feat_len=5000,\n max_label_len=1000,\n Word_model=Word_model,\n Char_model=Char_model,\n apply_cmvn=int(args.apply_cmvn))\n\n\n #Flags that may change while training \n if args.spec_aug_flag==2:\n weight_noise_flag=False\n spec_aug_flag=True\n else:\n weight_noise_flag=False\n spec_aug_flag=False\n val_history=np.zeros(args.nepochs)\n #======================================\n for epoch in range(args.nepochs):\n ##start of the epoch\n tr_CER=[]; tr_BPE_CER=[]; L_train_cost=[]\n model.train();\n validate_interval = int(args.validate_interval * args.accm_grad) if args.accm_grad>0 else args.validate_interval\n for trs_no in range(validate_interval):\n B1 = train_gen.next()\n assert B1 is not None, \"None should never come out of the DataLoader\"\n\n Output_trainval_dict=train_val_model(smp_no=trs_no,\n args = args, \n model = model,\n optimizer = optimizer,\n data_dict = B1,\n weight_noise_flag=weight_noise_flag,\n spec_aug_flag=spec_aug_flag,\n trainflag = True)\n #\n #\n #get the losses form the dict\n L_train_cost.append(Output_trainval_dict.get('cost_cpu'))\n tr_CER.append(Output_trainval_dict.get('Char_cer'))\n tr_BPE_CER.append(Output_trainval_dict.get('Word_cer'))\n #attention_map=Output_trainval_dict.get('attention_record').data.cpu().numpy()\n #==========================================\n if (trs_no%args.tr_disp==0):\n print(\"tr ep:==:>\",epoch,\"sampl no:==:>\",trs_no,\"train_cost==:>\",mean(L_train_cost),\"CER:\",mean(tr_CER),'BPE_CER',mean(tr_BPE_CER),flush=True) \n #------------------------\n if args.plot_fig_training:\n plot_name=join(png_dir,'train_epoch'+str(epoch)+'_attention_single_file_'+str(trs_no)+'.png')\n\n plotting(plot_name,attention_map)\n \n ###validate the model\n model.eval()\n #=======================================================\n Vl_CER=[]; Vl_BPE_CER=[];L_val_cost=[]\n val_examples=0\n for vl_smp in range(args.max_val_examples):\n B1 = dev_gen.next()\n smp_feat = B1.get('smp_feat')\n val_examples+=smp_feat.shape[0]\n assert B1 is not None, \"None should never come out of the DataLoader\"\n\n ##brak when the examples are more\n if (val_examples >= args.max_val_examples):\n break;\n #-------------------------------------- \n Val_Output_trainval_dict=train_val_model(smp_no=trs_no,\n args=args,\n model = model,\n optimizer = optimizer,\n data_dict = B1,\n weight_noise_flag=False,\n spec_aug_flag=False,\n trainflag = False)\n \n L_val_cost.append(Val_Output_trainval_dict.get('cost_cpu'))\n Vl_CER.append(Val_Output_trainval_dict.get('Char_cer'))\n Vl_BPE_CER.append(Val_Output_trainval_dict.get('Word_cer'))\n #attention_map=Val_Output_trainval_dict.get('attention_record').data.cpu().numpy()\n\n #====================================================== \n #======================================================\n if (vl_smp%args.vl_disp==0) or (val_examples==args.max_val_examples-1):\n print(\"val epoch:==:>\",epoch,\"val smp no:==:>\",vl_smp,\"val_cost:==:>\",mean(L_val_cost),\"CER:\",mean(Vl_CER),'BPE_CER',mean(Vl_BPE_CER),flush=True) \n\n if args.plot_fig_validation:\n plot_name=join(png_dir,'val_epoch'+str(epoch)+'_attention_single_file_'+str(vl_smp)+'.png') \n plotting(plot_name,attention_map) \n #----------------------------------------------------\n#==================================================================\n val_history[epoch]=(mean(Vl_CER)*100)\n print(\"val_history:\",val_history[:epoch+1])\n #================================================================== \n ####saving_weights \n ct=\"model_epoch_\"+str(epoch)+\"_sample_\"+str(trs_no)+\"_\"+str(mean(L_train_cost))+\"___\"+str(mean(L_val_cost))+\"__\"+str(mean(Vl_CER))\n print(ct)\n torch.save(model.state_dict(),join(args.model_dir,str(ct)))\n ####saving otpimizer helped Transformer\n #torch.save(optimizer.state_dict(),join(args.model_dir,str(ct)+'_opt'))\n\n ####################################################### \n #######################################################\n ###open the file write and close it to avoid delays\n with open(args.weight_text_file,'a+') as weight_saving_file:\n print(join(args.model_dir,str(ct)), file=weight_saving_file)\n\n with open(args.Res_text_file,'a+') as Res_saving_file:\n print(float(mean(Vl_CER)), file=Res_saving_file)\n #=================================\n # early_stopping and checkpoint averaging: \n if args.early_stopping:\n A=val_history\n Non_zero_loss=A[A>0]\n min_cpts=np.argmin(Non_zero_loss)\n Non_zero_len=len(Non_zero_loss)\n\n if ((Non_zero_len-min_cpts)>1):\n weight_noise_flag=True\n spec_aug_flag=True\n\n if (Non_zero_len-min_cpts) > args.early_stopping_patience: \n print(\"The model is early stopping........\",\"minimum value of model is:\",min_cpts)\n exit(0)\n\n#=======================================================\n#=============================================================================================\nif __name__ == '__main__':\n main()\n\n\n\n" ]
[ [ "numpy.zeros", "matplotlib.pyplot.switch_backend", "matplotlib.pyplot.viridis", "numpy.argmin" ] ]
petabricks/petabricks
[ "b498b93880b0c4ac3924ddb82cff2e6541e60bd1" ]
[ "scripts/misc/csvavg.py" ]
[ "#!/usr/bin/python\n\nimport csv, sys\nimport numpy\n\ndialect = csv.excel_tab\nmulti_file=len(sys.argv[1:])>1\n\ninputs = map(lambda x: csv.DictReader(x, dialect=dialect), map(open, sys.argv[1:]))\nrows = map(csv.DictReader.next, inputs)\nheaders = inputs[0].fieldnames\noutput = csv.writer(sys.stdout, dialect=dialect)\noutput.writerow(headers)\n\ndef mkavg(k):\n try:\n values = map(lambda x: float(x[k]), rows)\n return \"%f +- %f\" % (numpy.mean(values), numpy.std(values))\n except:\n return 'error'\n\nif multi_file:\n try:\n while True:\n output.writerow(map(mkavg, headers))\n rows = map(csv.DictReader.next, inputs)\n except StopIteration:\n pass\nelse:\n counts=dict()\n sums=dict()\n for k in headers:\n try:\n sums[k]=float(rows[0][k])\n except:\n sums[k]=0.0\n counts[k]=1.0\n for row in inputs[0]:\n for k in headers:\n try:\n sums[k]+=float(row[k])\n except:\n sums[k]=0.0\n counts[k]+=1.0\n\n\n output.writerow(map(lambda k: sums[k]/counts[k], headers))\n\n\n" ]
[ [ "numpy.std", "numpy.mean" ] ]
MaxCodeXTC/panel
[ "1d34e8ce4734eec10f8e64af11c5a3fecaab5bac" ]
[ "panel/widgets/indicators.py" ]
[ "import os\nimport sys\n\nfrom math import pi\n\nimport numpy as np\nimport param\n\nfrom bokeh.plotting import figure\nfrom bokeh.models import ColumnDataSource\nfrom tqdm.asyncio import tqdm as _tqdm\n\nfrom ..layout import Column, Row\nfrom ..models import (\n HTML, Progress as _BkProgress, TrendIndicator as _BkTrendIndicator\n)\nfrom ..pane.markup import Str\nfrom ..reactive import SyncableData\nfrom ..util import escape, updating\nfrom ..viewable import Viewable\nfrom .base import Widget\n\nRED = \"#d9534f\"\nGREEN = \"#5cb85c\"\nBLUE = \"#428bca\"\n\nclass Indicator(Widget):\n \"\"\"\n Indicator is a baseclass for widgets which indicate some state.\n \"\"\"\n\n sizing_mode = param.ObjectSelector(default='fixed', objects=[\n 'fixed', 'stretch_width', 'stretch_height', 'stretch_both',\n 'scale_width', 'scale_height', 'scale_both', None])\n\n __abstract = True\n\n\nclass BooleanIndicator(Indicator):\n\n value = param.Boolean(default=False, doc=\"\"\"\n Whether the indicator is active or not.\"\"\")\n\n __abstract = True\n\n\nclass BooleanStatus(BooleanIndicator):\n\n color = param.ObjectSelector(default='dark', objects=[\n 'primary', 'secondary', 'success', 'info', 'danger', 'warning',\n 'light', 'dark'])\n\n height = param.Integer(default=20, doc=\"\"\"\n height of the circle.\"\"\")\n\n width = param.Integer(default=20, doc=\"\"\"\n Width of the circle.\"\"\")\n\n value = param.Boolean(default=False, doc=\"\"\"\n Whether the indicator is active or not.\"\"\")\n\n _rename = {'color': None}\n\n _source_transforms = {'value': None}\n\n _widget_type = HTML\n\n def _process_param_change(self, msg):\n msg = super()._process_param_change(msg)\n value = msg.pop('value', None)\n if value is None:\n return msg\n msg['css_classes'] = ['dot-filled', self.color] if value else ['dot']\n return msg\n\n\nclass LoadingSpinner(BooleanIndicator):\n\n bgcolor = param.ObjectSelector(default='light', objects=['dark', 'light'])\n\n color = param.ObjectSelector(default='dark', objects=[\n 'primary', 'secondary', 'success', 'info', 'danger', 'warning',\n 'light', 'dark'])\n\n height = param.Integer(default=125, doc=\"\"\"\n height of the circle.\"\"\")\n\n width = param.Integer(default=125, doc=\"\"\"\n Width of the circle.\"\"\")\n\n value = param.Boolean(default=False, doc=\"\"\"\n Whether the indicator is active or not.\"\"\")\n\n _rename = {'color': None, 'bgcolor': None}\n\n _source_transforms = {'value': None}\n\n _widget_type = HTML\n\n def _process_param_change(self, msg):\n msg = super()._process_param_change(msg)\n value = msg.pop('value', None)\n if value is None:\n return msg\n color_cls = f'{self.color}-{self.bgcolor}'\n msg['css_classes'] = ['loader', 'spin', color_cls] if value else ['loader', self.bgcolor]\n return msg\n\n\nclass ValueIndicator(Indicator):\n \"\"\"\n A ValueIndicator provides a visual representation for a numeric\n value.\n \"\"\"\n\n value = param.Number(default=None, allow_None=True)\n\n __abstract = True\n\n\nclass Progress(ValueIndicator):\n\n active = param.Boolean(default=True, doc=\"\"\"\n If no value is set the active property toggles animation of the\n progress bar on and off.\"\"\")\n\n bar_color = param.ObjectSelector(default='success', objects=[\n 'primary', 'secondary', 'success', 'info', 'danger', 'warning',\n 'light', 'dark'])\n\n max = param.Integer(default=100, doc=\"The maximum value of the progress bar.\")\n\n value = param.Integer(default=None, bounds=(-1, None), doc=\"\"\"\n The current value of the progress bar. If set to None the progress\n bar will be indeterminate and animate depending on the active\n parameter. If set to -1 the progress bar will be empty.\"\"\")\n\n _rename = {'name': None}\n\n _widget_type = _BkProgress\n\n @param.depends('max', watch=True)\n def _update_value_bounds(self):\n self.param.value.bounds = (-1, self.max)\n\n def __init__(self,**params):\n super().__init__(**params)\n self._update_value_bounds()\n\n\nclass Number(ValueIndicator):\n \"\"\"\n The Number indicator renders the value as text optionally colored\n according to the color thresholds.\n \"\"\"\n\n default_color = param.String(default='black')\n\n colors = param.List(default=None)\n\n format = param.String(default='{value}')\n\n font_size = param.String(default='54pt')\n\n nan_format = param.String(default='-', doc=\"\"\"\n How to format nan values.\"\"\")\n\n title_size = param.String(default='18pt')\n\n _rename = {}\n\n _source_transforms = {\n 'value': None, 'colors': None, 'default_color': None,\n 'font_size': None, 'format': None, 'nan_format': None,\n 'title_size': None\n }\n\n _widget_type = HTML\n\n def _process_param_change(self, msg):\n msg = super()._process_param_change(msg)\n font_size = msg.pop('font_size', self.font_size)\n title_font_size = msg.pop('title_size', self.title_size)\n name = msg.pop('name', self.name)\n format = msg.pop('format', self.format)\n value = msg.pop('value', self.value)\n nan_format = msg.pop('nan_format', self.nan_format)\n color = msg.pop('default_color', self.default_color)\n colors = msg.pop('colors', self.colors)\n for val, clr in (colors or [])[::-1]:\n if value is not None and value <= val:\n color = clr\n if value is None:\n value = float('nan')\n value = format.format(value=value).replace('nan', nan_format)\n text = f'<div style=\"font-size: {font_size}; color: {color}\">{value}</div>'\n if self.name:\n title_font_size = msg.pop('title_size', self.title_size)\n text = f'<div style=\"font-size: {title_font_size}; color: {color}\">{name}</div>\\n{text}'\n msg['text'] = escape(text)\n return msg\n\n\nclass String(ValueIndicator):\n \"\"\"\n The String indicator renders a string with a title.\n \"\"\"\n\n default_color = param.String(default='black')\n\n font_size = param.String(default='54pt')\n\n title_size = param.String(default='18pt')\n\n value = param.String(default=None, allow_None=True)\n\n _rename = {}\n\n _source_transforms = {\n 'value': None, 'default_color': None, 'font_size': None, 'title_size': None\n }\n\n _widget_type = HTML\n\n def _process_param_change(self, msg):\n msg = super()._process_param_change(msg)\n font_size = msg.pop('font_size', self.font_size)\n title_font_size = msg.pop('title_size', self.title_size)\n name = msg.pop('name', self.name)\n value = msg.pop('value', self.value)\n color = msg.pop('default_color', self.default_color)\n text = f'<div style=\"font-size: {font_size}; color: {color}\">{value}</div>'\n if self.name:\n title_font_size = msg.pop('title_size', self.title_size)\n text = f'<div style=\"font-size: {title_font_size}; color: {color}\">{name}</div>\\n{text}'\n msg['text'] = escape(text)\n return msg\n\n\nclass Gauge(ValueIndicator):\n \"\"\"\n A Gauge represents a value in some range as a position on\n speedometer or gauge. It is similar to a Dial but visually a lot\n busier.\n \"\"\"\n\n annulus_width = param.Integer(default=10, doc=\"\"\"\n Width of the gauge annulus.\"\"\")\n\n bounds = param.Range(default=(0, 100), doc=\"\"\"\n The upper and lower bound of the dial.\"\"\")\n\n colors = param.List(default=None, doc=\"\"\"\n Color thresholds for the Gauge, specified as a list of tuples\n of the fractional threshold and the color to switch to.\"\"\")\n\n custom_opts = param.Dict(doc=\"\"\"\n Additional options to pass to the ECharts Gauge definition.\"\"\")\n\n height = param.Integer(default=300, bounds=(0, None))\n\n end_angle = param.Number(default=-45, doc=\"\"\"\n Angle at which the gauge ends.\"\"\")\n\n format = param.String(default='{value}%', doc=\"\"\"\n Formatting string for the value indicator.\"\"\")\n\n num_splits = param.Integer(default=10, doc=\"\"\"\n Number of splits along the gauge.\"\"\")\n\n show_ticks = param.Boolean(default=True, doc=\"\"\"\n Whether to show ticks along the dials.\"\"\")\n\n show_labels = param.Boolean(default=True, doc=\"\"\"\n Whether to show tick labels along the dials.\"\"\")\n\n start_angle = param.Number(default=225, doc=\"\"\"\n Angle at which the gauge starts.\"\"\")\n\n tooltip_format = param.String(default='{b} : {c}%', doc=\"\"\"\n Formatting string for the hover tooltip.\"\"\")\n\n title_size = param.Integer(default=18, doc=\"\"\"\n Size of title font.\"\"\")\n\n value = param.Number(default=25, doc=\"\"\"\n Value to indicate on the gauge a value within the declared bounds.\"\"\")\n\n width = param.Integer(default=300, bounds=(0, None))\n\n _rename = {}\n\n _source_transforms = {\n 'annulus_width': None, 'bounds': None, 'colors': None,\n 'custom_opts': None, 'end_angle': None, 'format': None,\n 'num_splits': None, 'show_ticks': None, 'show_labels': None,\n 'start_angle': None, 'tooltip_format': None, 'title_size': None,\n 'value': None\n }\n\n @property\n def _widget_type(self):\n if 'panel.models.echarts' not in sys.modules:\n from ..models.echarts import ECharts\n else:\n ECharts = getattr(sys.modules['panel.models.echarts'], 'ECharts')\n return ECharts\n\n def __init__(self, **params):\n super().__init__(**params)\n self._update_value_bounds()\n\n @param.depends('bounds', watch=True)\n def _update_value_bounds(self):\n self.param.value.bounds = self.bounds\n\n def _process_param_change(self, msg):\n msg = super()._process_param_change(msg)\n vmin, vmax = msg.pop('bounds', self.bounds)\n msg['data'] = {\n 'tooltip': {\n 'formatter': msg.pop('tooltip_format', self.tooltip_format)\n },\n 'series': [{\n 'name': 'Gauge',\n 'type': 'gauge',\n 'axisTick': {'show': msg.pop('show_ticks', self.show_ticks)},\n 'axisLabel': {'show': msg.pop('show_labels', self.show_labels)},\n 'title': {'fontWeight': 'bold', 'fontSize': msg.pop('title_size', self.title_size)},\n 'splitLine': {'show': True},\n 'radius': '100%',\n 'detail': {'formatter': msg.pop('format', self.format)},\n 'min': vmin,\n 'max': vmax,\n 'startAngle': msg.pop('start_angle', self.start_angle),\n 'endAngle': msg.pop('end_angle', self.end_angle),\n 'splitNumber': msg.pop('num_splits', self.num_splits),\n 'data': [{'value': msg.pop('value', self.value), 'name': self.name}],\n 'axisLine': {\n 'lineStyle': {\n 'width': msg.pop('annulus_width', self.annulus_width),\n }\n }\n }]\n }\n colors = msg.pop('colors', self.colors)\n if colors:\n msg['data']['series'][0]['axisLine']['lineStyle']['color'] = colors\n custom_opts = msg.pop('custom_opts', self.custom_opts)\n if custom_opts:\n gauge = msg['data']['series'][0]\n for k, v in custom_opts.items():\n if k not in gauge or not isinstance(gauge[k], dict):\n gauge[k] = v\n else:\n gauge[k].update(v)\n return msg\n\n\nclass Dial(ValueIndicator):\n \"\"\"\n A Dial represents a value in some range as a position on an\n annular dial. It is similar to a Gauge but more minimal visually.\n \"\"\"\n\n annulus_width = param.Number(default=0.2, doc=\"\"\"\n Width of the radial annulus as a fraction of the total.\"\"\")\n\n bounds = param.Range(default=(0, 100), doc=\"\"\"\n The upper and lower bound of the dial.\"\"\")\n\n colors = param.List(default=None, doc=\"\"\"\n Color thresholds for the Dial, specified as a list of tuples\n of the fractional threshold and the color to switch to.\"\"\")\n\n default_color = param.String(default='lightblue', doc=\"\"\"\n Color of the radial annulus if not color thresholds are supplied.\"\"\")\n\n end_angle = param.Number(default=25, doc=\"\"\"\n Angle at which the dial ends.\"\"\")\n\n format = param.String(default='{value}%', doc=\"\"\"\n Formatting string for the value indicator and lower/upper bounds.\"\"\")\n\n height = param.Integer(default=250, bounds=(1, None))\n\n nan_format = param.String(default='-', doc=\"\"\"\n How to format nan values.\"\"\")\n\n needle_color = param.String(default='black', doc=\"\"\"\n Color of the Dial needle.\"\"\")\n\n needle_width = param.Number(default=0.1, doc=\"\"\"\n Radial width of the needle.\"\"\")\n\n start_angle = param.Number(default=-205, doc=\"\"\"\n Angle at which the dial starts.\"\"\")\n\n tick_size = param.String(default=None, doc=\"\"\"\n Font size of the Dial min/max labels.\"\"\")\n\n title_size = param.String(default=None, doc=\"\"\"\n Font size of the Dial title.\"\"\")\n\n unfilled_color = param.String(default='whitesmoke', doc=\"\"\"\n Color of the unfilled region of the Dial.\"\"\")\n\n value_size = param.String(default=None, doc=\"\"\"\n Font size of the Dial value label.\"\"\")\n\n value = param.Number(default=25, allow_None=True, doc=\"\"\"\n Value to indicate on the dial a value within the declared bounds.\"\"\")\n\n width = param.Integer(default=250, bounds=(1, None))\n\n _manual_params = [\n 'value', 'start_angle', 'end_angle', 'bounds',\n 'annulus_width', 'format', 'background', 'needle_width',\n 'tick_size', 'title_size', 'value_size', 'colors',\n 'default_color', 'unfilled_color', 'height',\n 'width', 'nan_format', 'needle_color'\n ]\n\n _data_params = _manual_params\n\n _rename = {'background': 'background_fill_color'}\n\n def __init__(self, **params):\n super().__init__(**params)\n self._update_value_bounds()\n\n @param.depends('bounds', watch=True)\n def _update_value_bounds(self):\n self.param.value.bounds = self.bounds\n\n def _get_data(self):\n vmin, vmax = self.bounds\n value = self.value\n if value is None:\n value = float('nan')\n fraction = (value-vmin)/(vmax-vmin)\n start = (np.radians(360-self.start_angle) - pi % (2*pi)) + pi\n end = (np.radians(360-self.end_angle) - pi % (2*pi)) + pi\n distance = (abs(end-start) % (pi*2))\n if end>start:\n distance = (pi*2)-distance\n radial_fraction = distance*fraction\n angle = start if np.isnan(fraction) else (start-radial_fraction)\n inner_radius = 1-self.annulus_width\n\n color = self.default_color\n for val, clr in (self.colors or [])[::-1]:\n if fraction <= val:\n color = clr\n\n annulus_data = {\n 'starts': np.array([start, angle]),\n 'ends' : np.array([angle, end]),\n 'color': [color, self.unfilled_color],\n 'radius': np.array([inner_radius, inner_radius])\n }\n\n x0s, y0s, x1s, y1s, clrs = [], [], [], [], []\n colors = self.colors or []\n for (val, _), (_, clr) in zip(colors[:-1], colors[1:]):\n tangle = start-(distance*val)\n if (vmin + val * (vmax-vmin)) <= value:\n continue\n x0, y0 = np.cos(tangle), np.sin(tangle)\n x1, y1 = x0*inner_radius, y0*inner_radius\n x0s.append(x0)\n y0s.append(y0)\n x1s.append(x1)\n y1s.append(y1)\n clrs.append(clr)\n\n threshold_data = {\n 'x0': x0s, 'y0': y0s, 'x1': x1s, 'y1': y1s, 'color': clrs\n }\n\n center_radius = 1-self.annulus_width/2.\n x, y = np.cos(angle)*center_radius, np.sin(angle)*center_radius\n needle_start = pi+angle-(self.needle_width/2.)\n needle_end = pi+angle+(self.needle_width/2.)\n needle_data = {\n 'x': np.array([x]),\n 'y': np.array([y]),\n 'start': np.array([needle_start]),\n 'end': np.array([needle_end]),\n 'radius': np.array([center_radius])\n }\n\n value = self.format.format(value=value).replace('nan', self.nan_format)\n min_value = self.format.format(value=vmin)\n max_value = self.format.format(value=vmax)\n tminx, tminy = np.cos(start)*center_radius, np.sin(start)*center_radius\n tmaxx, tmaxy = np.cos(end)*center_radius, np.sin(end)*center_radius\n tmin_angle, tmax_angle = start+pi, end+pi % pi\n scale = (self.height/400)\n title_size = self.title_size if self.title_size else '%spt' % (scale*32)\n value_size = self.value_size if self.value_size else '%spt' % (scale*48)\n tick_size = self.tick_size if self.tick_size else '%spt' % (scale*18)\n\n text_data= {\n 'x': np.array([0, 0, tminx, tmaxx]),\n 'y': np.array([-.2, -.5, tminy, tmaxy]),\n 'text': [self.name, value, min_value, max_value],\n 'rot': np.array([0, 0, tmin_angle, tmax_angle]),\n 'size': [title_size, value_size, tick_size, tick_size],\n 'color': ['black', color, 'black', 'black']\n }\n return annulus_data, needle_data, threshold_data, text_data\n\n def _get_model(self, doc, root=None, parent=None, comm=None):\n params = self._process_param_change(self._init_params())\n model = figure(\n x_range=(-1,1), y_range=(-1,1), tools=[],\n outline_line_color=None, toolbar_location=None,\n width=self.width, height=self.height, **params\n )\n model.xaxis.visible = False\n model.yaxis.visible = False\n model.grid.visible = False\n\n annulus, needle, threshold, text = self._get_data()\n\n # Draw annulus\n annulus_source = ColumnDataSource(data=annulus, name='annulus_source')\n model.annular_wedge(\n x=0, y=0, inner_radius='radius', outer_radius=1, start_angle='starts',\n end_angle='ends', line_color='gray', color='color', direction='clock',\n source=annulus_source\n )\n\n # Draw needle\n needle_source = ColumnDataSource(data=needle, name='needle_source')\n model.wedge(\n x='x', y='y', radius='radius', start_angle='start', end_angle='end',\n fill_color=self.needle_color, line_color=self.needle_color,\n source=needle_source, name='needle_renderer'\n )\n\n # Draw thresholds\n threshold_source = ColumnDataSource(data=threshold, name='threshold_source')\n model.segment(\n x0='x0', x1='x1', y0='y0', y1='y1', line_color='color', source=threshold_source,\n line_width=2\n )\n\n # Draw labels\n text_source = ColumnDataSource(data=text, name='label_source')\n model.text(\n x='x', y='y', text='text', font_size='size', text_align='center',\n text_color='color', source=text_source, text_baseline='top',\n angle='rot'\n )\n\n if root is None:\n root = model\n self._models[root.ref['id']] = (model, parent)\n return model\n\n def _manual_update(self, events, model, doc, root, parent, comm):\n update_data = False\n for event in events:\n if event.name in ('width', 'height'):\n model.update(**{event.name: event.new})\n if event.name in self._data_params:\n update_data = True\n elif event.name == 'needle_color':\n needle_r = model.select(name='needle_renderer')\n needle_r.glyph.line_color = event.new\n needle_r.glyph.fill_color = event.new\n if not update_data:\n return\n annulus, needle, threshold, labels = self._get_data()\n model.select(name='annulus_source').data.update(annulus)\n model.select(name='needle_source').data.update(needle)\n model.select(name='threshold_source').data.update(threshold)\n model.select(name='label_source').data.update(labels)\n\n\nclass Trend(SyncableData, Indicator):\n \"\"\"\n The Trend indicator enables the user to display a Dashboard KPI Card.\n\n The card can be layout out as:\n\n * a column (text and plot on top of each other) or\n * a row (text and plot after each other)\n\n The text section is responsive and resizes on window resize.\n \"\"\"\n\n data = param.Parameter(doc=\"\"\"\n The plot data declared as a dictionary of arrays or a DataFrame.\"\"\")\n\n layout = param.ObjectSelector(default=\"column\", objects=[\"column\", \"row\"])\n\n plot_x = param.String(default=\"x\", doc=\"\"\"\n The name of the key in the plot_data to use on the x-axis.\"\"\")\n\n plot_y = param.String(default=\"y\", doc=\"\"\"\n The name of the key in the plot_data to use on the y-axis.\"\"\")\n\n plot_color = param.String(default=BLUE, doc=\"\"\"\n The color to use in the plot.\"\"\")\n\n plot_type = param.ObjectSelector(default=\"bar\", objects=[\"line\", \"step\", \"area\", \"bar\"], doc=\"\"\"\n The plot type to render the plot data as.\"\"\")\n\n pos_color = param.String(GREEN, doc=\"\"\"\n The color used to indicate a positive change.\"\"\")\n\n neg_color = param.String(RED, doc=\"\"\"\n The color used to indicate a negative change.\"\"\")\n\n title = param.String(doc=\"\"\"The title or a short description of the card\"\"\")\n\n value = param.Parameter(default='auto', doc=\"\"\"\n The primary value to be displayed.\"\"\")\n\n value_change = param.Parameter(default='auto', doc=\"\"\"\n A secondary value. For example the change in percent.\"\"\")\n\n _data_params = ['data']\n\n _manual_params = ['data']\n\n _rename = {'data': None, 'selection': None}\n\n _widget_type = _BkTrendIndicator\n\n def _get_data(self):\n if self.data is None:\n return None, {self.plot_x: [], self.plot_y: []}\n elif isinstance(self.data, dict):\n return self.data, self.data\n return self.data, ColumnDataSource.from_df(self.data)\n\n def _init_params(self):\n props = super()._init_params()\n self._processed, self._data = self._get_data()\n props['source'] = ColumnDataSource(data=self._data)\n return props\n\n def _trigger_auto_values(self):\n trigger = []\n if self.value == 'auto':\n trigger.append('value')\n if self.value_change == 'auto':\n trigger.append('value_change')\n if trigger:\n self.param.trigger(*trigger)\n\n @updating\n def _stream(self, stream, rollover=None):\n self._trigger_auto_values()\n super()._stream(stream, rollover)\n\n def _update_cds(self, *events):\n super()._update_cds(*events)\n self._trigger_auto_values()\n\n def _process_param_change(self, msg):\n msg = super()._process_param_change(msg)\n ys = self._data.get(self.plot_y, [])\n if 'value' in msg and msg['value'] == 'auto':\n if len(ys):\n msg['value'] = ys[-1]\n else:\n msg['value'] = 0\n if 'value_change' in msg and msg['value_change'] == 'auto':\n if len(ys) > 1:\n y1, y2 = self._data.get(self.plot_y)[-2:]\n msg['value_change'] = 0 if y1 == 0 else (y2/y1 - 1)\n else:\n msg['value_change'] = 0\n return msg\n\n\nMARGIN = {\n \"text_pane\": {\"column\": (5, 10, 0, 10), \"row\": (0, 10, 0, 10)},\n \"progress\": {\"column\": (0, 10, 5, 10), \"row\": (12, 10, 0, 10)},\n}\n\n\n\nclass ptqdm(_tqdm):\n\n def __init__(self, *args, **kwargs):\n self._indicator = kwargs.pop('indicator')\n super().__init__(*args, **kwargs)\n\n def display(self, msg=None, pos=None, bar_style=None):\n super().display(msg, pos)\n style = self._indicator.text_pane.style or {}\n color = self.colour or 'black'\n self._indicator.text_pane.style = dict(style, color=color)\n if self.total is not None and self.n is not None:\n self._indicator.max = int(self.total) # Can be numpy.int64\n self._indicator.value = int(self.n)\n self._indicator.text = self._to_text(**self.format_dict)\n return True\n\n def _to_text(self, n, total, **kwargs):\n return self.format_meter(n, total, **{**kwargs, \"ncols\": 0})\n\n def close(self):\n super().close()\n if not self.leave:\n self._indicator.reset()\n return _tqdm\n\n\nclass Tqdm(Indicator):\n\n layout = param.ClassSelector(class_=(Column, Row), precedence=-1, constant=True, doc=\"\"\"\n The layout for the text and progress indicator.\"\"\",)\n\n max = param.Integer(default=100, doc=\"\"\"\n The maximum value of the progress indicator.\"\"\")\n\n progress = param.ClassSelector(class_=Progress, precedence=-1, doc=\"\"\"\n The Progress indicator used to display the progress.\"\"\",)\n\n text = param.String(default='', doc=\"\"\"\n The current tqdm style progress text.\"\"\")\n\n text_pane = param.ClassSelector(class_=Str, precedence=-1, doc=\"\"\"\n The pane to display the text to.\"\"\")\n\n value = param.Integer(default=0, bounds=(0, None), doc=\"\"\"\n The current value of the progress bar. If set to None the progress\n bar will be indeterminate and animate depending on the active\n parameter.\"\"\")\n\n margin = param.Parameter(default=0, doc=\"\"\"\n Allows to create additional space around the component. May\n be specified as a two-tuple of the form (vertical, horizontal)\n or a four-tuple (top, right, bottom, left).\"\"\")\n\n width = param.Integer(default=400, bounds=(0, None), doc=\"\"\"\n The width of the component (in pixels). This can be either\n fixed or preferred width, depending on width sizing policy.\"\"\")\n\n write_to_console = param.Boolean(default=False, doc=\"\"\"\n Whether or not to also write to the console.\"\"\")\n\n _layouts = {Row: 'row', Column: 'column'}\n\n _rename = {'value': None, 'min': None, 'max': None, 'text': None}\n\n def __init__(self, **params):\n layout = params.pop('layout', 'column')\n layout = self._layouts.get(layout, layout) \n if \"text_pane\" not in params:\n sizing_mode = 'stretch_width' if layout == 'column' else 'fixed'\n params[\"text_pane\"] = Str(\n None, min_height=20, min_width=280, sizing_mode=sizing_mode,\n margin=MARGIN[\"text_pane\"][layout],\n )\n if \"progress\" not in params:\n params[\"progress\"] = Progress(\n active=False,\n sizing_mode=\"stretch_width\",\n min_width=100,\n margin=MARGIN[\"progress\"][layout],\n )\n\n layout_params = {p: params.get(p, getattr(self, p)) for p in Viewable.param}\n if layout == 'row' or layout is Row:\n params['layout'] = Row(\n params['progress'], params['text_pane'], **layout_params\n )\n else:\n params['layout'] = Column(\n params['text_pane'], params['progress'], **layout_params\n )\n super().__init__(**params)\n\n self.param.watch(self._update_layout, list(Viewable.param))\n\n if self.value == 0:\n # Hack: to give progress the initial look\n self.progress.max = 100000\n self.progress.value = 1\n else:\n self.progress.max = self.max\n self.progress.value = self.value\n self.text_pane.object = self.text\n\n def _get_model(self, doc, root=None, parent=None, comm=None):\n model = self.layout._get_model(doc, root, parent, comm)\n if root is None:\n root = model\n self._models[root.ref['id']] = (model, parent)\n return model\n\n def _cleanup(self, root):\n super()._cleanup(root)\n self.layout._cleanup(root)\n\n def _update_layout(self, *events):\n self.layout.param.set_param(**{event.name: event.new for event in events})\n\n @param.depends(\"text\", watch=True)\n def _update_text(self):\n if self.text_pane:\n self.text_pane.object = self.text\n\n @param.depends(\"value\", watch=True)\n def _update_value(self):\n if self.progress:\n self.progress.value = self.value\n\n @param.depends(\"max\", watch=True)\n def _update_max(self):\n if self.progress:\n self.progress.max = self.max\n\n def __call__(self, *args, **kwargs):\n kwargs['indicator'] = self\n if not self.write_to_console:\n f = open(os.devnull, 'w')\n kwargs['file'] = f\n return ptqdm(*args, **kwargs)\n\n __call__.__doc__ = ptqdm.__doc__\n\n def pandas(self, *args, **kwargs):\n kwargs['indicator'] = self\n if not self.write_to_console and 'file' not in kwargs:\n f = open(os.devnull, 'w')\n kwargs['file'] = f\n return ptqdm.pandas(*args, **kwargs)\n\n def reset(self):\n \"\"\"Resets the parameters\"\"\"\n self.value = self.param.value.default\n self.text = self.param.text.default\n" ]
[ [ "numpy.cos", "numpy.isnan", "numpy.array", "numpy.sin", "numpy.radians" ] ]
rubenlozanoaht3m/DataDogm
[ "cd605e8072cca31e8418830c3300657ae2fa5b16" ]
[ "examples/pipeline/hetero_ftl/pipeline-hetero-ftl-with-predict.py" ]
[ "#\n# Copyright 2019 The FATE Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport argparse\n\nfrom pipeline.backend.pipeline import PipeLine\nfrom pipeline.component import DataTransform\nfrom pipeline.component.hetero_ftl import HeteroFTL\nfrom pipeline.component.reader import Reader\nfrom pipeline.interface.data import Data\nfrom tensorflow.keras import optimizers\nfrom tensorflow.keras.layers import Dense\nfrom tensorflow.keras import initializers\nfrom pipeline.component.evaluation import Evaluation\n\nfrom pipeline.utils.tools import load_job_config\n\n\ndef main(config=\"../../config.yaml\", namespace=\"\"):\n # obtain config\n if isinstance(config, str):\n config = load_job_config(config)\n parties = config.parties\n guest = parties.guest[0]\n host = parties.host[0]\n\n guest_train_data = {\"name\": \"nus_wide_guest\", \"namespace\": f\"experiment{namespace}\"}\n host_train_data = {\"name\": \"nus_wide_host\", \"namespace\": f\"experiment{namespace}\"}\n pipeline = PipeLine().set_initiator(role='guest', party_id=guest).set_roles(guest=guest, host=host)\n\n reader_0 = Reader(name=\"reader_0\")\n reader_0.get_party_instance(role='guest', party_id=guest).component_param(table=guest_train_data)\n reader_0.get_party_instance(role='host', party_id=host).component_param(table=host_train_data)\n\n data_transform_0 = DataTransform(name=\"data_transform_0\")\n data_transform_0.get_party_instance(\n role='guest', party_id=guest).component_param(\n with_label=True, output_format=\"dense\")\n data_transform_0.get_party_instance(role='host', party_id=host).component_param(with_label=False)\n\n hetero_ftl_0 = HeteroFTL(name='hetero_ftl_0',\n epochs=10, alpha=1, batch_size=-1, mode='plain')\n\n hetero_ftl_0.add_nn_layer(Dense(units=32, activation='sigmoid',\n kernel_initializer=initializers.RandomNormal(stddev=1.0),\n bias_initializer=initializers.Zeros()))\n\n hetero_ftl_0.compile(optimizer=optimizers.Adam(lr=0.01))\n evaluation_0 = Evaluation(name='evaluation_0', eval_type=\"binary\")\n\n pipeline.add_component(reader_0)\n pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data))\n pipeline.add_component(hetero_ftl_0, data=Data(train_data=data_transform_0.output.data))\n pipeline.add_component(evaluation_0, data=Data(data=hetero_ftl_0.output.data))\n\n pipeline.compile()\n\n pipeline.fit()\n\n # predict\n # deploy required components\n pipeline.deploy_component([data_transform_0, hetero_ftl_0])\n\n predict_pipeline = PipeLine()\n # add data reader onto predict pipeline\n predict_pipeline.add_component(reader_0)\n # add selected components from train pipeline onto predict pipeline\n # specify data source\n predict_pipeline.add_component(\n pipeline, data=Data(\n predict_input={\n pipeline.data_transform_0.input.data: reader_0.output.data}))\n # run predict model\n predict_pipeline.predict()\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(\"PIPELINE DEMO\")\n parser.add_argument(\"-config\", type=str,\n help=\"config file\")\n args = parser.parse_args()\n if args.config is not None:\n main(args.config)\n else:\n main()\n" ]
[ [ "tensorflow.keras.initializers.Zeros", "tensorflow.keras.optimizers.Adam", "tensorflow.keras.initializers.RandomNormal" ] ]
Surya97/MWDB-project
[ "508562913624416415cd143cef9b7689066037ef" ]
[ "Phase3/Feedback.py" ]
[ "import os\nimport sys\nfrom pathlib import Path\nsys.path.insert(1, '../Phase1')\nsys.path.insert(2, '../Phase2')\nimport misc\nimport numpy as np\n\nclass Feedback:\n def __init__(self):\n self.task5_result = None\n self.reduced_pickle_file_folder = os.path.join(Path(os.path.dirname(__file__)).parent,\n 'Phase2', 'pickle_files')\n self.set_task5_result()\n self.dataset = list()\n self.X = None\n self.y = None\n self.dataset=list()\n\n def generate_input_data_set(self, rorir_map, dataset_features):\n for image_id, label in rorir_map.items():\n image_id = os.path.basename(image_id)\n if label==0 or label==1:\n feat = dataset_features[image_id].tolist()\n feat+=[label]\n self.dataset.append(np.array(feat))\n return\n\n def set_task5_result(self):\n self.task5_result = misc.load_from_pickle(self.reduced_pickle_file_folder, 'Task_5_Result')\n\n def generate_input_data(self, rorir_map, dataset_features):\n X = []\n y = []\n\n for image_id, label in rorir_map.items():\n image_id = os.path.basename(image_id)\n if label == 0 or label == 1:\n X.append(dataset_features[image_id])\n y+=[rorir_map[image_id]]\n X = np.array(X)\n y = np.array(y)\n self.X=X\n self.y=y\n\n return\n\n def euclidean_distance(self, dist1, dist2):\n return (sum([(a - b) ** 2 for a, b in zip(dist1, dist2)])) ** 0.5\n\n def save_result(self, result):\n reduced_pickle_file_folder = os.path.join(Path(os.path.dirname(__file__)).parent,\n 'Phase2', 'pickle_files')\n misc.save2pickle(result, reduced_pickle_file_folder, 'Task_5_Result')" ]
[ [ "numpy.array" ] ]
XiaoshengLin/shadow3
[ "d007ae59a2038db4f9275f7bb026bd1b11549e5f" ]
[ "tests/test_lens.py" ]
[ "import Shadow\nimport numpy\n\n# using mac oasys, for plots\n# from srxraylib.plot.gol import set_qt\n# set_qt()\n\n#\n# runs an absorber of 10 um thickness for a source at 10 keV\n#\n#\n\ndef run_example_lens(user_units_to_cm=1.0,npoint=5000,use_prerefl=0):\n #\n # Python script to run shadow3. Created automatically with ShadowTools.make_python_script_from_list().\n #\n #\n # initialize shadow3 source (oe0) and beam\n #\n beam = Shadow.Beam()\n oe0 = Shadow.Source()\n oe1 = Shadow.OE()\n oe2 = Shadow.OE()\n\n #\n # Define variables. See meaning of variables in:\n # https://raw.githubusercontent.com/srio/shadow3/master/docs/source.nml\n # https://raw.githubusercontent.com/srio/shadow3/master/docs/oe.nml\n #\n\n oe0.FDISTR = 3\n oe0.FSOURCE_DEPTH = 0\n oe0.F_PHOT = 0\n oe0.HDIV1 = 1.0\n oe0.HDIV2 = 1.0\n oe0.ISTAR1 = 0\n oe0.NPOINT = 500000\n oe0.PH1 = 8000.0\n oe0.SIGDIX = 2.49999994e-05\n oe0.SIGDIZ = 8.00000089e-06\n oe0.SIGMAX = 0.0122999996 / user_units_to_cm\n oe0.SIGMAZ = 0.000699999975 / user_units_to_cm\n oe0.VDIV1 = 1.0\n oe0.VDIV2 = 1.0\n\n\n\n oe1.CCC = numpy.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -1.0, 0.0])\n oe1.FCYL = 1\n if use_prerefl:\n oe1.F_R_IND = 2\n oe1.R_ATTENUATION_OBJ = 0.0\n oe1.R_IND_OBJ = 1.0\n oe1.FILE_R_IND_IMA = b'prerefl.dat'\n else:\n oe1.F_R_IND = 0\n oe1.R_IND_OBJ = 1.0\n oe1.R_IND_IMA = 0.9999923264754235\n oe1.R_ATTENUATION_OBJ = 0.0\n oe1.R_ATTENUATION_IMA = 150.727\n\n oe1.FMIRR = 10\n oe1.FWRITE = 3\n oe1.F_EXT = 1\n oe1.F_REFRAC = 1\n oe1.T_INCIDENCE = 0.0\n oe1.T_REFLECTION = 180.0\n oe1.T_SOURCE = 4700.9 / user_units_to_cm\n oe1.T_IMAGE = 0.01 / user_units_to_cm\n oe1.DUMMY = user_units_to_cm\n\n oe2.CCC = numpy.array([0.0, 292.67523*user_units_to_cm**2, 0.0045013279*user_units_to_cm**2, 0.0, 0.0, 0.0, 0.0, 0.0, 0.13418387*user_units_to_cm, 0.0])\n oe2.FCYL = 1\n if use_prerefl:\n oe2.F_R_IND = 1\n oe2.FILE_R_IND_OBJ = b'prerefl.dat'\n oe2.R_ATTENUATION_IMA = 0.0\n oe2.R_IND_IMA = 1.0\n else:\n oe2.F_R_IND = 0\n oe2.R_IND_OBJ = 0.9999923264754235\n oe2.R_IND_IMA = 1.0\n oe2.R_ATTENUATION_OBJ = 150.727\n oe2.R_ATTENUATION_IMA = 0.0\n\n oe2.FMIRR = 10\n oe2.FWRITE = 3\n oe2.F_EXT = 1\n oe2.F_REFRAC = 1\n oe2.T_INCIDENCE = 0.0\n oe2.T_REFLECTION = 180.0\n oe2.T_SOURCE = 0.0 / user_units_to_cm\n oe2.T_IMAGE = 30.065 / user_units_to_cm\n oe2.DUMMY = user_units_to_cm\n\n\n\n beam.genSource(oe0)\n\n\n #\n #run optical element 1\n #\n print(\" Running optical element: %d\"%(1))\n\n beam.traceOE(oe1,1)\n\n\n #\n #run optical element 2\n #\n print(\" Running optical element: %d\"%(2))\n\n beam.traceOE(oe2,2)\n\n\n # print(oe0.sourcinfo())\n # print(oe1.mirinfo())\n # print(oe2.mirinfo())\n\n return beam\n\n\ndef test_lens():\n\n\n #\n # inputs\n #\n cm_or_mm = 1 # 0=using cm, 1=using mm\n use_prerefl = 0 # 0=No, 1=Yes\n\n\n if cm_or_mm == 0:\n user_units_to_cm = 1.0\n title = \"Units are cm\"\n elif cm_or_mm == 1:\n user_units_to_cm = 0.1\n title = \"Units are mm\"\n else:\n print(\"No way...\")\n\n\n #\n # run prerefl\n #\n if use_prerefl:\n import xraylib\n symbol = \"Si\"\n density = xraylib.ElementDensity(xraylib.SymbolToAtomicNumber(symbol))\n Shadow.ShadowPreprocessorsXraylib.prerefl(interactive=0,SYMBOL=symbol,DENSITY=density,FILE=\"prerefl.dat\",E_MIN=5000.0,E_MAX=15000.0,E_STEP=100.0)\n\n #\n # run SHADOW\n #\n beam = run_example_lens(user_units_to_cm=user_units_to_cm)\n\n\n tkt = Shadow.ShadowTools.plotxy(beam,3,6,ref=0,nolost=1,nbins=301,title=\"Z,Z' \"+title)\n print(\"Intensity: %f \"%tkt[\"intensity\"])\n print(\"Number of rays: %d, number of GOOD rays: %d \"%(beam.nrays(nolost=0),beam.nrays(nolost=1)))\n\n\n #numpy.testing.assert_almost_equal(sh100,xrl100,2)\n\n\nif __name__ == \"__main__\":\n test_lens()\n" ]
[ [ "numpy.array" ] ]
dibyajyotidash/https-github.com-kylejusticemagnuson-pyti
[ "08532970f9d2b163f1223599e3ac80f6c51533e4" ]
[ "tests/test_average_true_range_percent.py" ]
[ "from __future__ import absolute_import\nimport unittest\nimport numpy as np\n\nfrom tests.sample_data import SampleData\nfrom pyti import average_true_range_percent\n\n\nclass TestAverageTrueRangePercent(unittest.TestCase):\n def setUp(self):\n \"\"\"Create data to use for testing.\"\"\"\n self.close_data = SampleData().get_sample_close_data()\n\n self.atr_period_6_expected = [np.nan, np.nan, np.nan, np.nan, np.nan,\n np.nan, np.nan, np.nan, np.nan, np.nan, 1.189235578986088,\n 1.1523554512220486, 1.1278525931922065, 1.1546224092640733,\n 1.1660380962839427, 1.465357027466913, 1.7831881894142803,\n 2.3561329806184581, 2.6708206943371162, 3.2466120755686263,\n 3.3784546239726194, 3.3564621491521369, 3.2791301980772869,\n 3.2778865256303997, 3.2875442760137483, 3.2810676552694984,\n 3.0012226151326331, 2.7233687488098011, 2.5062178027349966,\n 2.2774730211707057, 2.1306723573292059, 2.0231111698118602,\n 2.4639048069082961, 2.7153248878733027, 2.9415900735797162,\n 3.457810754140358, 4.0649377298167551, 4.6505410623216603,\n 4.8377005165939497, 4.7010401069556149, 4.5393599025684406,\n 4.3416370097985153, 4.1909513300536148, 4.2334214723046726,\n 4.2994054993189517, 4.244940888039114, 3.9739765293353395,\n 3.7984682769968288, 3.5821945386433534, 3.3670297979975179,\n 3.0716656116914933, 2.8662794746678979, 3.0289151976072608,\n 2.9969860158644486, 2.9760460695914741, 2.9289691288143112,\n 2.8058612079021295, 2.531556736800797, 2.4252616931651314,\n 2.2944282121480746, 2.1964244646895756, 2.1062390474088564,\n 2.0476395013091233, 1.7748361482743773, 1.558061265928161,\n 1.4856536290363038, 1.4497927574913438, 1.4352358669002241,\n 1.4299189209362686, 1.4620245560453282, 1.5102324721906708,\n 1.6037560819721852, 1.7746556607866535, 1.9035211913074188,\n 2.0074893237351557, 2.0029061884391339, 1.9371230450535861,\n 1.8548689401186171, 1.8355003791530897, 1.8003331288038178,\n 1.8931540501005137, 1.9806126301955329, 2.0822871750835494,\n 2.1587399768435973, 2.1858863683758751, 2.1992145124735707,\n 2.2042274600601361, 1.9903770888121171, 1.7884145439862129,\n 1.6114041799566228, 1.4484765868823961, 1.3246773786986321,\n 1.2742050031825125, 1.2954614666198452, 1.3205653492681662,\n 1.2899663246832471, 1.2549300623614186, 1.197182571361552,\n 1.1407924958934879, 1.1008057151615109, 1.0691600335312013,\n 0.96093180817465618, 0.8664228618513774, 0.96576000827190556,\n 1.0376009347982038, 1.0764636750622629, 1.0975646487156931,\n 1.2540789775805865, 1.8437302592780713, 2.3966411426581957,\n 2.9608753508340118, 3.423129872873973, 3.5883658875288575,\n 3.2621236585354922, 2.8752781621886734, 2.5375908547247414,\n 2.2497857207671332, 2.4554221153770741, 2.5315780677888444,\n 2.7585119334766222, 2.8337261439349244, 2.9745745527293854,\n 2.9297633150649793, 3.1503331074467429, 3.212529671651343,\n 3.3456605064982394, 3.2905345939522999]\n\n self.atr_period_8_expected = [np.nan, np.nan, np.nan, np.nan, np.nan,\n np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan,\n 1.4180925319661521, 1.6202706653923087, 1.841626084216712,\n 2.3148854933979575, 2.6901735299560841, 3.2282310244121613,\n 3.563083750221574, 3.7982037524565646, 3.7546634785721498,\n 3.7323220510040827, 3.6914812566023922, 3.6379421910796386,\n 3.5587099948976539, 3.5146074512555128, 3.3287123687477114,\n 3.0890446215528855, 2.8876354582425368, 2.7197748421358332,\n 2.9957755812395579, 3.0918928706039539, 3.2034849639589456,\n 3.5276212120453141, 3.966956483762083, 4.4299504506994678,\n 4.9204122583250323, 5.2383912644056707, 5.1851996464032979,\n 4.932742857755783, 4.7691968174243575, 4.7104555366635612,\n 4.7209742731687623, 4.7303883735587853, 4.8062829601892965,\n 4.8770730470382375, 4.786932261959409, 4.5940745527992979,\n 4.2712108603228502, 4.0426131987685459, 4.0492483355737523,\n 4.0367369950162013, 4.0266065104420958, 3.8442225538659289,\n 3.7281167468319927, 3.5969454050028618, 3.5246336505629778,\n 3.284458875889694, 3.0905268522063674, 2.9085376948755512,\n 2.7680000175672808, 2.6252958389957679, 2.5159579023784877,\n 2.3306698200373246, 2.197229157817036, 2.1031142412351342,\n 2.0360589047455808, 2.0179156129481299, 1.9962963663924316,\n 2.010951331437755, 2.0924060195314591, 2.1470029836845206,\n 2.1917407916945137, 2.3469908853240153, 2.4897011782528256,\n 2.4061646855957806, 2.3351333133106342, 2.230276487867163,\n 2.2408826576806198, 2.2629816480494824, 2.3143268379407238,\n 2.3476629061550369, 2.3674721414695301, 2.374550948419341,\n 2.352947385951865, 2.351910270923812, 2.3499424768917128,\n 2.1608124958654997, 1.9893774678680414, 1.851281037063653,\n 1.7449273052921825, 1.6992086324724789, 1.7010190503124114,\n 1.7165471586824528, 1.6847862993283729, 1.634206765480277,\n 1.6018940222973894, 1.5378290457744153, 1.4602893936269465,\n 1.3946452189065861, 1.3308265877060355, 1.3548427859710599,\n 1.3588718015896448, 1.3628282348400853, 1.366672736225832,\n 1.4652930518912579, 1.9954910663104219, 2.4924846364624273,\n 2.9075743465183366, 3.261046890754919, 3.5616237891147984,\n 3.90750519846529, 4.0167530144468557, 3.7380084557614692,\n 3.4172149917994443, 3.4633450788377629, 3.4315003707559737,\n 3.5392138594642271, 3.5549856117004808, 3.6469399018473312,\n 3.8534409701377266, 4.2338496480817174, 4.1988778641402176,\n 4.2434190220063472, 4.1710674834485006]\n\n self.atr_period_10_expected = [np.nan, np.nan, np.nan, np.nan, np.nan,\n np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan,\n np.nan, np.nan, np.nan, np.nan, 2.7186399732723401, 3.1897417785103217,\n 3.5720003140004968, 3.8243907563025203, 3.9543745819821638,\n 4.14966762493796, 4.1567426181748255, 4.0952266131322199,\n 3.9774055776649679, 3.9005313973245412, 3.8740200478862201,\n 3.7608260441925863, 3.5854123216136311, 3.4311560353359063,\n 3.6153121546372664, 3.6024786252080254, 3.6288964245394193,\n 3.8135185789216313, 4.1288199078722743, 4.4946788398373076,\n 4.9058770233302322, 5.1603606395261004, 5.3529137258920727,\n 5.445088414102206, 5.3705628277712574, 5.2556944589666976,\n 5.2134596631940662, 5.1637259824115125, 5.1838624309051387,\n 5.2791302264027182, 5.4683909738033964, 5.5120358790272803,\n 5.2939445953362574, 5.1115798997200201, 5.0421404425151195,\n 4.9375845577616317, 4.8404418486438017, 4.7051409062810361,\n 4.6372061837056462, 4.432524791048948, 4.3857899092249255,\n 4.2640474851745038, 4.1058549092701808, 3.8557539531622109,\n 3.6529206682490591, 3.4440864860929081, 3.2769697024906264,\n 3.0924922423109011, 2.9510365216850634, 2.8116170875265167,\n 2.6711372457210696, 2.5919272122011878, 2.5577863035116755,\n 2.5192993537277801, 2.5383732677201931, 2.5348033661300149,\n 2.5286463785226361, 2.6292596767417997, 2.7167970175342209,\n 2.7550799044874084, 2.8604804641492096, 2.7345280164604793,\n 2.6868520965130984, 2.6599028030438059, 2.6726181141515055,\n 2.6688858692968398, 2.6540997015450611, 2.6291880295217065,\n 2.6002059836457319, 2.5941937083210851, 2.5700615442727237,\n 2.535308379517017, 2.5013101557717698, 2.339251886928428,\n 2.220771727829487, 2.1516737476710861, 2.1053990350980456,\n 2.0811011642229702, 2.0438008059797284, 2.0320916025384799,\n 2.0069284521724975, 1.9527756899172914, 1.9026101646939952,\n 1.8269161484400693, 1.7312268984763945, 1.7180069351756679,\n 1.6836641042431297, 1.6535040163297123, 1.6269651536833674,\n 1.6987531568883674, 2.136689374875739, 2.5300549519920104,\n 2.9404650888832795, 3.2991327451858363, 3.5341944123641578,\n 3.8180814412294013, 4.0387527580573863, 4.2834486458410144,\n 4.3625514336653879, 4.4307414948453614, 4.3447237418697808,\n 4.3526388061920898, 4.3079912365795749, 4.3466474073655306,\n 4.510844263106514, 4.8245544999792642, 4.9825704530372255,\n 5.1956243905869313, 5.0759722759771062]\n\n def test_atrp_period_6(self):\n period = 6\n atrp = average_true_range_percent.average_true_range_percent(self.close_data, period)\n np.testing.assert_array_equal(atrp, self.atr_period_6_expected)\n\n def test_atrp_period_8(self):\n period = 8\n atrp = average_true_range_percent.average_true_range_percent(self.close_data, period)\n np.testing.assert_array_equal(atrp, self.atr_period_8_expected)\n\n def test_atrp_period_10(self):\n period = 10\n atrp = average_true_range_percent.average_true_range_percent(self.close_data, period)\n np.testing.assert_array_equal(atrp, self.atr_period_10_expected)\n\n def test_atrp_invalid_period(self):\n period = 128\n with self.assertRaises(Exception) as cm:\n average_true_range_percent.average_true_range_percent(self.close_data, period)\n expected = \"Error: data_len < period\"\n self.assertEqual(str(cm.exception), expected)\n" ]
[ [ "numpy.testing.assert_array_equal" ] ]
JaakkoAhola/LES-scripting
[ "1ebe99ce4292e58581bf50615cb8e0aa3d0c0af2" ]
[ "iceScripts/isdacProfileModifications_dry_above.py" ]
[ "#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Aug 28 13:38:05 2018\n\n@author: aholaj\n\"\"\"\n\nimport numpy as np\n\nimport sound_in_profiles as sp\n\nimport PythonMethods as pm\n\nimport ModDataPros as mdp\n\nfrom copy import deepcopy\n\nfrom FindCloudBase import calc_rh_profile\nfrom ECLAIR_calcs import calc_rw\n\nimport time\n\nstart = time.time()\n\n\nisdac = sp.Profiles(\"sound_in3.5\",\"bin/case_isdac\")\n\nrh = isdac.getRH()\n\nq = isdac.getQ()\n\nz = isdac.getZ()\n\nt = isdac.getT()\n\np = isdac.getPS()\n\nu = isdac.getU()\n\nv = isdac.getV()\n\n\nosc=rh-100.*np.ones(np.shape(rh))\nab=osc[0];\nfor s in range(len(osc)):\n if np.sign(ab)*np.sign(osc[s]) == -1:\n print(s)\n ab = osc[s]\n \n\ndry0 = 236\ndryL =364\n#size = 40\nrh_target = 20\nrh_mod = deepcopy(rh)\n\nrh_mod[dry0:(dryL+1)] = rh_target\n#\n#\n\n#for kk in xrange(dry0,(dryL+1)):\n# \n# q_mod[kk] = 1000.*calc_rw( rh[kk], t[kk], p[kk] ) \n\n\nz[0] = isdac.getP0()\n#\nq_mod = deepcopy(q)\nq_mod = np.multiply(q_mod, 1.e-3)\nrh_temp = 100000.*np.ones(np.shape(rh))\n\n\n\nfor i in range(dry0,dryL+1): #size\n k = 1\n incr = 1. #1.e-6\n incr = incr*1.e-3\n etumerkki = 1.\n print('z', i)\n vanha = np.abs( rh_temp[i] - rh_mod[i] )\n switchCount = 0\n while (( vanha > 0.01 ) and (switchCount < 300)): #and (k < 10000)\n \n q_mod[i] = np.max(q_mod[i]-etumerkki*k*incr,0)\n \n rh_temp, b = calc_rh_profile( t, q_mod, z )\n uusi = np.abs( rh_temp[i] - rh_mod[i] )\n \n if uusi-vanha > 0:\n print('switch')\n etumerkki = -1*etumerkki\n incr = incr*1.e-1\n switchCount += 1\n incr = max(incr, 1.e-9) \n vanha = uusi \n k += 1\n print(uusi, rh_temp[i], rh_mod[i])\n \n \n print('q[i]', q[i], 'q_mod[i]', q_mod[i]*1.e+3)\n print(' ') \n\nrh_iter, ps_iter = calc_rh_profile( t, q_mod, z )\n\n\nq_mod = np.multiply(q_mod, 1.e3)\n#isdac.writeNewSoundIn(\"sound_in3.5_rh_dry_above_\"+str(rh_target), z, t, q_mod, u, v)\n \n\n#####################\n### plotting ########\n####################\nz[0] = 0.\n\nfig, ax = mdp.plottaa( rh, z, tit = 'Relative humidity', xl = 'rel. humid. [%]', yl = 'height [m]', markers=False, uusikuva = True, LEGEND = True, omavari = 'k' )\n\nfig, ax = mdp.plottaa( rh_mod[dry0-1:(dryL+1)+1], z[dry0-1:(dryL+1)+1], tit = 'Relative humidity dry-above', xl = 'rel. humid. [%]', yl = 'height [m]', markers=False, uusikuva = False, LEGEND = True, omavari = 'r' )\n\n#mdp.plottaa( rh_iter[dry0:(dryL+1)], z[dry0:(dryL+1)], tit = 'Relative humidity dry-above iterated', xl = 'rel. humid. [%]', yl = 'height [m]', markers=False, uusikuva = False, LEGEND = True, omavari = 'b' )\nxticks = list(range(0, 111, 10))\nxlabels = list(map(str, xticks))\nax.set_xticks( xticks )\nax.set_xticklabels( xlabels )\n\n####################\nmdp.plottaa( q, z, tit = 'Total water mixing ratio', xl = 'mix. rat. [g/kg]', yl = 'height [m]', markers=False, uusikuva = True, LEGEND = True, omavari = 'k' )\n\nmdp.plottaa( q_mod[dry0:(dryL+1)], z[dry0:(dryL+1)], tit = 'Total water mixing ratio dry-below', xl = 'mix. rat. [g/kg]', yl = 'height [m]', markers=False, uusikuva = False, LEGEND = True, omavari = 'b' )\n\n\n\nmdp.plot_lopetus()\n#\n\n\n\n\nend = time.time()\n\nprint('suoritusaika', end-start)" ]
[ [ "numpy.multiply", "numpy.sign", "numpy.abs", "numpy.max", "numpy.shape" ] ]
steerapi/webdnn
[ "1df51cc094e5a528cfd3452c264905708eadb491" ]
[ "test/runtime/frontend_test/chainer_test/functions_test/connection_test/convolution_2d_function_test.py" ]
[ "import chainer\nimport numpy as np\n\nfrom test.util import generate_kernel_test_case, wrap_template\nfrom webdnn.graph.placeholder import Placeholder\nfrom webdnn.frontend.chainer.converter import ChainerConverter\nfrom webdnn.frontend.chainer.placeholder_variable import PlaceholderVariable\n\n\n@wrap_template\ndef template(n=2, c_in=4, h_in=6, w_in=8, c_out=10, ksize=3, stride=1, pad=0, nobias=True, description=\"\"):\n link = chainer.links.Convolution2D(c_in, c_out, ksize=ksize, stride=stride, pad=pad, nobias=nobias)\n vx = chainer.Variable(np.random.rand(n, c_in, h_in, w_in).astype(np.float32))\n vy = link(vx)\n\n graph = ChainerConverter().convert([vx], [vy])\n\n x = graph.inputs[0]\n y = graph.outputs[0]\n\n generate_kernel_test_case(\n description=f\"[chainer] L.Convolution2D {description}\",\n graph=graph,\n inputs={x: vx.data},\n expected={y: vy.data},\n EPS=1e-2\n )\n\n\ndef test():\n template()\n\n\ndef test_nobias():\n template(nobias=True)\n\n\ndef test_nopadding():\n template(pad=0)\n\n\ndef test_irregular_kernel_size():\n template(ksize=(3, 4))\n\n\ndef test_irregular_stride_size():\n template(stride=(2, 3))\n\n\ndef test_irregular_padding_size1():\n template(pad=(1, 2))\n\n\ndef test_irregular_padding_size2():\n template(pad=2)\n\n\ndef test_irregular_padding_size3():\n template(pad=2, ksize=5)\n\n\ndef test_irregular_padding_size4():\n template(pad=(1, 0))\n\n\ndef test_irregular_size():\n template(ksize=(3, 5), stride=(2, 3), pad=(1, 3))\n\n\ndef test_special_size():\n # https://github.com/mil-tokyo/webdnn/issues/525\n # In case that the max position index (=n*c_in*h_in*w_in*ksize*ksize) > 1<<23\n template(n=1, c_in=1 << 6, h_in=1 << 7, w_in=1 << 7, c_out=3, ksize=(1 << 2) + 1, pad=1 << 1)\n\n\ndef test_with_placeholder():\n link = chainer.links.Convolution2D(None, 16, ksize=3, stride=1, pad=1)\n vx = chainer.Variable(np.random.rand(1, 3, 16, 16).astype(np.float32))\n vy = link(vx)\n\n N = Placeholder(label=\"N\")\n H = Placeholder(label=\"H\")\n W = Placeholder(label=\"W\")\n px = PlaceholderVariable([N, 3, H, W])\n py = link(px)\n\n graph = ChainerConverter().convert([px], [py])\n\n x = graph.inputs[0]\n y = graph.outputs[0]\n\n N.value = 1\n H.value = 16\n W.value = 16\n generate_kernel_test_case(\n description=f\"[chainer] L.Convolution2D with placeholder\",\n graph=graph,\n backend=[\"webgpu\", \"webassembly\"],\n inputs={x: vx.data},\n expected={y: vy.data},\n EPS=1e-2\n )\n" ]
[ [ "numpy.random.rand" ] ]
YurongYou/MODEST
[ "cfc0465ed737f6c3166e6b5d08231880073b4552" ]
[ "downstream/OpenPCDet/pcdet/utils/calibration_kitti.py" ]
[ "import numpy as np\n\n\ndef get_calib_from_file(calib_file):\n with open(calib_file) as f:\n lines = f.readlines()\n\n obj = lines[2].strip().split(' ')[1:]\n P2 = np.array(obj, dtype=np.float32)\n obj = lines[3].strip().split(' ')[1:]\n P3 = np.array(obj, dtype=np.float32)\n obj = lines[4].strip().split(' ')[1:]\n R0 = np.array(obj, dtype=np.float32)\n obj = lines[5].strip().split(' ')[1:]\n Tr_velo_to_cam = np.array(obj, dtype=np.float32)\n\n return {'P2': P2.reshape(3, 4),\n 'P3': P3.reshape(3, 4),\n 'R0': R0.reshape(3, 3),\n 'Tr_velo2cam': Tr_velo_to_cam.reshape(3, 4)}\n\n\nclass Calibration(object):\n def __init__(self, calib_file):\n if not isinstance(calib_file, dict):\n calib = get_calib_from_file(calib_file)\n else:\n calib = calib_file\n\n self.P2 = calib['P2'] # 3 x 4\n self.R0 = calib['R0'] # 3 x 3\n self.V2C = calib['Tr_velo2cam'] # 3 x 4\n\n # Camera intrinsics and extrinsics\n self.cu = self.P2[0, 2]\n self.cv = self.P2[1, 2]\n self.fu = self.P2[0, 0]\n self.fv = self.P2[1, 1]\n self.tx = self.P2[0, 3] / (-self.fu)\n self.ty = self.P2[1, 3] / (-self.fv)\n\n def cart_to_hom(self, pts):\n \"\"\"\n :param pts: (N, 3 or 2)\n :return pts_hom: (N, 4 or 3)\n \"\"\"\n pts_hom = np.hstack((pts, np.ones((pts.shape[0], 1), dtype=np.float32)))\n return pts_hom\n\n def rect_to_lidar(self, pts_rect):\n \"\"\"\n :param pts_lidar: (N, 3)\n :return pts_rect: (N, 3)\n \"\"\"\n pts_rect_hom = self.cart_to_hom(pts_rect) # (N, 4)\n R0_ext = np.hstack((self.R0, np.zeros((3, 1), dtype=np.float32))) # (3, 4)\n R0_ext = np.vstack((R0_ext, np.zeros((1, 4), dtype=np.float32))) # (4, 4)\n R0_ext[3, 3] = 1\n V2C_ext = np.vstack((self.V2C, np.zeros((1, 4), dtype=np.float32))) # (4, 4)\n V2C_ext[3, 3] = 1\n\n pts_lidar = np.dot(pts_rect_hom, np.linalg.inv(np.dot(R0_ext, V2C_ext).T))\n return pts_lidar[:, 0:3]\n\n def lidar_to_rect(self, pts_lidar):\n \"\"\"\n :param pts_lidar: (N, 3)\n :return pts_rect: (N, 3)\n \"\"\"\n pts_lidar_hom = self.cart_to_hom(pts_lidar)\n pts_rect = np.dot(pts_lidar_hom, np.dot(self.V2C.T, self.R0.T))\n # pts_rect = reduce(np.dot, (pts_lidar_hom, self.V2C.T, self.R0.T))\n return pts_rect\n\n def rect_to_img(self, pts_rect):\n \"\"\"\n :param pts_rect: (N, 3)\n :return pts_img: (N, 2)\n \"\"\"\n pts_rect_hom = self.cart_to_hom(pts_rect)\n pts_2d_hom = np.dot(pts_rect_hom, self.P2.T)\n # pts_rect_hom[:, 2][np.isclose(pts_rect_hom[:, 2], 0)] = 1e-6\n pts_img = (pts_2d_hom[:, 0:2].T / pts_rect_hom[:, 2]).T # (N, 2)\n pts_rect_depth = pts_2d_hom[:, 2] - self.P2.T[3, 2] # depth in rect camera coord\n return pts_img, pts_rect_depth\n\n def lidar_to_img(self, pts_lidar):\n \"\"\"\n :param pts_lidar: (N, 3)\n :return pts_img: (N, 2)\n \"\"\"\n pts_rect = self.lidar_to_rect(pts_lidar)\n pts_img, pts_depth = self.rect_to_img(pts_rect)\n return pts_img, pts_depth\n\n def img_to_rect(self, u, v, depth_rect):\n \"\"\"\n :param u: (N)\n :param v: (N)\n :param depth_rect: (N)\n :return:\n \"\"\"\n x = ((u - self.cu) * depth_rect) / self.fu + self.tx\n y = ((v - self.cv) * depth_rect) / self.fv + self.ty\n pts_rect = np.concatenate((x.reshape(-1, 1), y.reshape(-1, 1), depth_rect.reshape(-1, 1)), axis=1)\n return pts_rect\n\n def corners3d_to_img_boxes(self, corners3d):\n \"\"\"\n :param corners3d: (N, 8, 3) corners in rect coordinate\n :return: boxes: (None, 4) [x1, y1, x2, y2] in rgb coordinate\n :return: boxes_corner: (None, 8) [xi, yi] in rgb coordinate\n \"\"\"\n sample_num = corners3d.shape[0]\n corners3d_hom = np.concatenate((corners3d, np.ones((sample_num, 8, 1))), axis=2) # (N, 8, 4)\n\n img_pts = np.matmul(corners3d_hom, self.P2.T) # (N, 8, 3)\n\n x, y = img_pts[:, :, 0] / img_pts[:, :, 2], img_pts[:, :, 1] / img_pts[:, :, 2]\n x1, y1 = np.min(x, axis=1), np.min(y, axis=1)\n x2, y2 = np.max(x, axis=1), np.max(y, axis=1)\n\n boxes = np.concatenate((x1.reshape(-1, 1), y1.reshape(-1, 1), x2.reshape(-1, 1), y2.reshape(-1, 1)), axis=1)\n boxes_corner = np.concatenate((x.reshape(-1, 8, 1), y.reshape(-1, 8, 1)), axis=2)\n\n return boxes, boxes_corner\n" ]
[ [ "numpy.ones", "numpy.matmul", "numpy.zeros", "numpy.max", "numpy.min", "numpy.array", "numpy.dot" ] ]
roman-baldaev/course-project
[ "b65ba018c16697224f15916b08ce7f09634d1f8c" ]
[ "model/src/DataModel.py" ]
[ "import numpy as np\nimport pandas as pd\n\nclass DataModel:\n \"\"\"\n This class implements a data model - values at time points and provides methods for working with these data.\n \"\"\"\n\n def __init__(self, n=0, values=None, times=None):\n \"\"\"\n A constructor that takes values and a time point.\n\n :param values: Array of values process\n :param times: Array of a time points\n \"\"\"\n if (values is None) or (times is None):\n self._times = np.zeros((n, ))\n self._values = np.zeros((n, ))\n else:\n if len(values) != len(times):\n print(\"Different size of values and times\")\n else:\n self._times = np.array(times, dtype=float)\n self._values = np.array(values, dtype=float)\n\n def print(self, n=None):\n if n is not None:\n _n = n\n elif self._times.shape:\n _n = self._times.shape[0]\n for i in range(_n):\n print(\"Time: {}___Value: {}\".format(self._times[i], self._values[i]))\n\n @property\n def mean(self):\n \"\"\"\n :return: Mean of values\n \"\"\"\n return self._times.mean()\n\n def get_values(self):\n return self._values\n\n def get_times(self):\n return self._times\n\n def add_value(self, value, index):\n # self._values.__add__(value)\n self._values[index] = value\n\n def add_time(self, time, index):\n # self._times.__add__(time)\n self._times[index] = time\n\n def get_value(self, index):\n return self._values[index]\n\n def get_time(self, index):\n return self._times[index]" ]
[ [ "numpy.array", "numpy.zeros" ] ]
icecube-pixel/grok-auto-complete
[ "747aab90f846410f444914713d238034fcf767a2" ]
[ "get_grok_repos.py" ]
[ "import logging\r\nfrom github import Github\r\nfrom typing import Dict, Tuple, List\r\nimport os\r\nimport argparse\r\nimport traceback\r\nfrom collections import Counter\r\nfrom tenacity import retry, stop_after_attempt, wait_exponential\r\nfrom time import sleep\r\nimport pandas as pd\r\n\r\nlogging.basicConfig(format='%(asctime)s,%(msecs)d %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s',\r\n datefmt='%Y-%m-%d:%H:%M:%S',\r\n level=logging.INFO)\r\n\r\nlogger = logging.getLogger(__name__)\r\n\r\n# https://docs.github.com/en/github/searching-for-information-on-github/searching-on-github/searching-for-repositories#search-by-when-a-repository-was-created-or-last-updated\r\n\r\ndef get_query_string_to_exclude()->str:\r\n \"\"\"\r\n Generates query string instead of hard-coding and appends to the query string\r\n :return:\r\n \"\"\"\r\n logger.info(\"Inside function to generate query to hit API\")\r\n languages_to_exclude = ['Jinja', 'Shell', 'YAML', 'INI', 'Perl', 'Haskell']\r\n exclude_languages = \" \".join([\"NOT language:{}\".format(language) for language in languages_to_exclude])\r\n return \" \" + exclude_languages\r\n\r\n\r\ndef get_matching_code(args: Dict)->None:\r\n \"\"\"\r\n Gets the top matches of code based on pattern where grok is used and is of not YAML etc\r\n \"\"\"\r\n logger.info(\"Inside to get top repositories function\")\r\n master_data = []\r\n observed_licences = []\r\n try:\r\n g_obj = Github(args['token'], timeout=3000) # Overriding timeout of 3000 seconds\r\n pattern_file_extension = '\"grok\" in:file extension:j2'\r\n lang_to_exclude = get_query_string_to_exclude()\r\n _query_str = f\"{pattern_file_extension}{lang_to_exclude}\"\r\n logger.info(f\"Processing query {_query_str}\")\r\n sleep(10)\r\n results = g_obj.search_code(_query_str)\r\n for repo in results:\r\n master_data.append(vars(repo))\r\n\r\n observed_licences.append(repo.license)\r\n file_name = str(repo).split(\"ContentFile(path=\")[1].replace('\"',\"\")[:-1].replace(\"/\", \"_\")\r\n path_to_dump = os.path.join(os.getcwd(), \"data\", file_name)\r\n logger.info(\"Dumping file {}\".format(file_name))\r\n with open(path_to_dump, \"wb\") as f:\r\n f.write(repo.decoded_content)\r\n logger.info(Counter(observed_licences))\r\n except Exception as e:\r\n logger.error(e)\r\n logger.error(traceback.format_exc())\r\n pd.DataFrame(master_data).to_csv(\"RepoData.csv\", index=False)\r\n\r\ndef get_inputs()->Dict:\r\n \"\"\"Gets the username and password from the console \"\"\"\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument(\"--token\", dest=\"token\", help=\"Enter the oAuth token\", required=True)\r\n args = vars(parser.parse_args())\r\n return args\r\n\r\n\r\ndef main():\r\n logger.info(\"Inside Main\")\r\n args = get_inputs()\r\n get_matching_code(args=args)\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n" ]
[ [ "pandas.DataFrame" ] ]
PierreExeter/gym-reacher
[ "d58edeb93b4b703101dc0505232c883fd012dbad" ]
[ "scripts/test_DDPG.py" ]
[ "import gym\nimport numpy as np\nimport gym_reacher\n\nfrom stable_baselines.ddpg.policies import MlpPolicy\nfrom stable_baselines.common.noise import NormalActionNoise, OrnsteinUhlenbeckActionNoise, AdaptiveParamNoiseSpec\nfrom stable_baselines import DDPG\n\n# env = gym.make('MountainCarContinuous-v0')\nenv = gym.make('Reacher1Dof-v0')\n\n# the noise objects for DDPG\nn_actions = env.action_space.shape[-1]\nparam_noise = None\naction_noise = OrnsteinUhlenbeckActionNoise(mean=np.zeros(n_actions), sigma=float(0.5) * np.ones(n_actions))\n\nmodel = DDPG(MlpPolicy, env, verbose=1, param_noise=param_noise, action_noise=action_noise)\nmodel.learn(total_timesteps=10000)\nmodel.save(\"ddpg_mountain\")\n" ]
[ [ "numpy.ones", "numpy.zeros" ] ]
gonzalo-munillag/Exponential_Randomised_Response
[ "1ae2c867d77c6e92f1df0bb7120862e4f9aa15e4" ]
[ "differential-privacy-library-main/tests/tools/test_histogramdd.py" ]
[ "import numpy as np\nfrom unittest import TestCase\n\nfrom diffprivlib.accountant import BudgetAccountant\nfrom diffprivlib.tools.histograms import histogramdd\nfrom diffprivlib.utils import global_seed, PrivacyLeakWarning, BudgetError\n\n\nclass TestHistogramdd(TestCase):\n def test_no_params(self):\n a = np.array([1, 2, 3, 4, 5])\n with self.assertWarns(PrivacyLeakWarning):\n res = histogramdd(a)\n self.assertIsNotNone(res)\n\n def test_no_range(self):\n a = np.array([1, 2, 3, 4, 5])\n with self.assertWarns(PrivacyLeakWarning):\n res = histogramdd(a, epsilon=2)\n self.assertIsNotNone(res)\n\n def test_bins_instead_of_range(self):\n a = np.array([1, 2, 3, 4, 5])\n res = histogramdd([a, a], epsilon=2, bins=([0, 2, 6], [0, 2, 6]))\n self.assertIsNotNone(res)\n\n def test_same_edges(self):\n a = np.array([1, 2, 3, 4, 5])\n _, edges = np.histogramdd(a, bins=3, range=[(0, 10)])\n _, dp_edges = histogramdd(a, epsilon=1, bins=3, range=[(0, 10)])\n\n for i in range(len(edges)):\n self.assertTrue((edges[i] == dp_edges[i]).all())\n\n def test_different_result(self):\n global_seed(3141592653)\n a = np.array([1, 2, 3, 4, 5])\n hist, _ = np.histogramdd(a, bins=3, range=[(0, 10)])\n dp_hist, _ = histogramdd(a, epsilon=0.1, bins=3, range=[(0, 10)])\n\n # print(\"Non-private histogram: %s\" % hist)\n # print(\"Private histogram: %s\" % dp_hist)\n self.assertTrue((hist != dp_hist).any())\n\n def test_density_1d(self):\n global_seed(3141592653)\n a = np.array([1, 2, 3, 4, 5])\n dp_hist, _ = histogramdd(a, epsilon=10, bins=3, range=[(0, 10)], density=True)\n\n # print(dp_hist.sum())\n\n self.assertAlmostEqual(dp_hist.sum(), 1.0 * 3 / 10)\n\n def test_density_2d(self):\n global_seed(3141592653)\n a = np.array([[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]]).T\n dp_hist, _ = histogramdd(a, epsilon=10, bins=3, range=[(0, 10), (0, 10)], density=True)\n\n # print(dp_hist.sum())\n\n self.assertAlmostEqual(dp_hist.sum(), 1.0 * (3 / 10) ** 2)\n\n def test_accountant(self):\n acc = BudgetAccountant(1.5, 0)\n\n a = np.array([[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]]).T\n histogramdd(a, epsilon=1, bins=3, range=[(0, 10), (0, 10)], density=True, accountant=acc)\n\n with self.assertRaises(BudgetError):\n histogramdd(a, epsilon=1, bins=3, range=[(0, 10), (0, 10)], density=True, accountant=acc)\n\n def test_default_accountant(self):\n BudgetAccountant.pop_default()\n\n a = np.array([[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]]).T\n histogramdd(a, epsilon=1, bins=3, range=[(0, 10), (0, 10)], density=True)\n acc = BudgetAccountant.pop_default()\n self.assertEqual((1, 0), acc.total())\n self.assertEqual(acc.epsilon, float(\"inf\"))\n self.assertEqual(acc.delta, 1.0)\n\n histogramdd(a, epsilon=1, bins=3, range=[(0, 10), (0, 10)])\n\n self.assertEqual((1, 0), acc.total())\n" ]
[ [ "numpy.array", "numpy.histogramdd" ] ]
PengningChao/emdb-sphere
[ "d20ac81ab4fd744f87788bda46d3aa19598658ee" ]
[ "dualbound/Lagrangian/spatialProjopt_Zops_numpy.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon May 4 21:39:12 2020\n\n@author: pengning\n\nThis is part of the grad/Hess engine for spatial projection versions of the \noriginal global constraint <S|T>-<T|U|T>. The Lagrangian multipliers are distributed in \nthe order alphaP0_1, alphaP0_2, alphaP1_1, alphaP1_2 ... where P0 is just the identity\n\"\"\"\n\nimport numpy as np\n\n\ndef Z_TT(Lags, O, UPlist):\n #P0 is identity and UP0 is the original U matrix\n ZTT = np.zeros_like(O, dtype=np.complex)\n ZTT[:,:] = O[:,:]\n for i in range(len(UPlist)):\n SymUP = (UPlist[i]+UPlist[i].conj().T)/2\n AsymUP = (UPlist[i]-UPlist[i].conj().T)/(2j)\n ZTT += Lags[2*i]*SymUP + Lags[2*i+1]*AsymUP\n return ZTT\n\ndef grad_Z_TT(Lags, UPlist):\n gradZ = []\n for i in range(len(UPlist)):\n SymUP = (UPlist[i]+UPlist[i].conj().T)/2\n AsymUP = (UPlist[i]-UPlist[i].conj().T)/(2j)\n gradZ.append(SymUP)\n gradZ.append(AsymUP)\n return gradZ\n\n\ndef check_spatialProj_Lags_validity(Lags, Olist, UPlistlist):\n modenum = len(Olist)\n mineig = np.inf\n for mode in range(modenum):\n ZTT = Z_TT(Lags, Olist[mode], UPlistlist[mode])\n eigZTT = np.linalg.eigvalsh(ZTT)\n \n if eigZTT[0]<0:\n print('mineig', eigZTT[0])\n return eigZTT[0]\n mineig = min(mineig,eigZTT[0])\n return mineig\n\n\ndef find_singular_ZTT_eigv(Lags, Olist, UPlistlist):\n modenum = len(Olist)\n mineigw = np.inf\n mineigv = np.zeros(Olist[0].shape[0])\n \n modemineig = -1\n for i in range(modenum):\n ZTT = Z_TT(Lags, Olist[i], UPlistlist[i])\n eigw, eigv = np.linalg.eigh(ZTT)\n if eigw[0]<=0:\n modemineig = i\n mineigv = eigv[:,0]\n return modemineig, mineigv\n elif eigw[0]<mineigw:\n mineigw = eigw[0]\n mineigv = eigv[:,0]\n modemineig = i\n return modemineig, mineigv\n\n\ndef get_ZTT_mineig(Lags, Olist, UPlistlist, eigvals_only=False):\n modenum = len(Olist)\n mineigw = np.inf\n modemineig = -1\n \n if eigvals_only:\n for mode in range(modenum):\n ZTT = Z_TT(Lags, Olist[mode], UPlistlist[mode])\n eigw = np.linalg.eigvalsh(ZTT)\n if eigw[0]<=0:\n return mode, eigw[0]\n elif eigw[0]<mineigw:\n mineigw = eigw[0]\n modemineig = mode\n return modemineig, mineigw\n else:\n for mode in range(modenum):\n ZTT = Z_TT(Lags, Olist[mode], UPlistlist[mode])\n eigw, eigv = np.linalg.eigh(ZTT)\n if eigw[0]<=0:\n return mode, eigw[0], eigv[:,0]\n elif eigw[0]<mineigw:\n mineigw = eigw[0]\n mineigv = eigv[:,0]\n modemineig = mode\n return modemineig, mineigw, mineigv\n\ndef get_inc_ZTT_mineig(incLags, include, Olist, UPlistlist, eigvals_only=False):\n Lags = np.zeros(len(include))\n Lags[include] = incLags[:]\n return get_ZTT_mineig(Lags, Olist, UPlistlist, eigvals_only=eigvals_only)\n\n\n###method for finding derivatives of mineig of ZTT, to use for phase I (entering domain of duality) of optimization\n\ndef get_ZTT_mineig_grad(ZTT, gradZTT):\n eigw, eigv = np.linalg.eigh(ZTT)\n eiggrad = np.zeros(len(gradZTT))\n \n for i in range(len(eiggrad)):\n eiggrad[i] = np.real(np.vdot(eigv[:,0], gradZTT[i] @ eigv[:,0]))\n return eiggrad\n" ]
[ [ "numpy.zeros_like", "numpy.zeros", "numpy.linalg.eigh", "numpy.linalg.eigvalsh", "numpy.vdot" ] ]
juniorcl/virtual-lockin-prototype
[ "5f75897a65620f6180f37bcaa3b4291d605aaf9f" ]
[ "auxiliary.py" ]
[ "############################# Helper #################################\n## This file was created to support the lock-in program ##\n######################################################################\n## These functions can be imported using: import lockin-auxiliary as aux\n## and put aux.<name of the function>\nfrom scipy.signal import bessel, filtfilt, butter\nimport numpy as np\nimport wave\n\ndef lowPassFilter(sinal, REFSIG, RATE, ORDER, ROLL, CUTOFF):\n \n y_fft0, x_fft0 = freq0fftPSD(sinal, REFSIG, RATE, ROLL)\n y_bessel, x_bessel = lowBesselPSD(sinal, REFSIG, RATE, CUTOFF, ORDER, ROLL)\n y_butter, x_butter = lowButterPSD(sinal, REFSIG, RATE, CUTOFF, ORDER, ROLL)\n\n return x_fft0, y_fft0, x_bessel, y_bessel, x_butter, y_butter\n\ndef refSignal(file, chunk):\n wf = wave.open(file, 'rb')\n sinalbit = np.frombuffer(wf.readframes(chunk), np.int16)\n return inVolt(sinalbit[::2])\n\ndef rmsFunction(signal):\n #Root-Mean-Square function\n f = lambda i: i**2/len(signal)\n soma = np.sum(list(map(f, signal)))\n return np.sqrt(soma)\n\ndef sigMultiply(signal, signal_ref, roll):\n #multiply the signal and referency signals\n sin_psd = np.multiply(signal, signal_ref)\n cos_psd = np.multiply(signal, np.roll(signal_ref, roll))\n return sin_psd, cos_psd\n\ndef lowButter(data, fs, cutoff, order):\n #this is a butter lowpass filter\n nyq = 0.5*fs\n normal_cutoff = cutoff/nyq\n b, a = butter(order, normal_cutoff, btype='low', analog=False)\n y = filtfilt(b, a, data)\n return y\n\ndef lowBessel(data, fs, cutoff, order):\n #this is a bessel lowpass filter\n nyq = 0.5*fs\n normal_cutoff = cutoff/nyq\n b, a = bessel(order, normal_cutoff, btype='low', analog=False)\n y = filtfilt(b, a, data)\n return y\n\ndef inVolt(signal):\n #converts bits to volts\n slope = 1.4286015335045335e-4 #slope found with minor error: 7.672327425854542e-09\n intercept = 20.975684328898847e-4 #intercept is the same of slope\n f = lambda bit: round(slope*bit + intercept, 6)\n return list(map(f, signal)) #6 decimal places\n\ndef fftFunction(signal, rate):\n signal_len = len(signal)\n fft = np.abs(np.fft.rfft(signal))/signal_len\n freqs = np.fft.rfftfreq(signal_len)*rate\n return fft, freqs\n\ndef freq0fftPSD(signal, signal_ref, rate, roll):\n #get just the amplitude at freq = 0\n sin_psd, cos_psd = sigMultiply(signal, signal_ref, roll)\n sin_psd_fft, _ = fftFunction(sin_psd, rate)\n cos_psd_fft, _ = fftFunction(cos_psd, rate)\n return sin_psd_fft[0], cos_psd_fft[0]\n\ndef fftPSD(signal, signal_ref, freq, rate, roll):\n #get the amplitude at freq = 0 and 2 * freq\n sin_psd, cos_psd = sigMultiply(signal, signal_ref, roll)\n sin_psd_fft, sin_psd_freqs = fftFunction(sin_psd, rate)\n cos_psd_fft, cos_psd_freqs = fftFunction(cos_psd, rate)\n y = sin_psd_fft[0] + dict(zip(sin_psd_freqs, sin_psd_fft))[2*freq]\n x = cos_psd_fft[0] + dict(zip(cos_psd_freqs, cos_psd_fft))[2*freq]\n return y, x\n\ndef lowButterPSD(signal, signal_ref, rate, cutoff, order, roll):\n #PSD using the butter low pass filter\n sin_psd, cos_psd = sigMultiply(signal, signal_ref, roll)\n y = rmsFunction(lowButter(sin_psd, rate, cutoff, order))\n x = rmsFunction(lowButter(cos_psd, rate, cutoff, order))\n return y, x\n\ndef lowBesselPSD(signal, signal_ref, rate, cutoff, order, roll):\n #PSD using the bessel low pass filter\n sin_psd, cos_psd = sigMultiply(signal, signal_ref, roll)\n y = rmsFunction(lowBessel(sin_psd, rate, cutoff, order))\n x = rmsFunction(lowBessel(cos_psd, rate, cutoff, order))\n return y, x\n" ]
[ [ "numpy.multiply", "numpy.roll", "scipy.signal.butter", "scipy.signal.filtfilt", "numpy.fft.rfftfreq", "numpy.sqrt", "scipy.signal.bessel", "numpy.fft.rfft" ] ]
yuyunliuhen/automatic-text-categorization
[ "6f8ca4f26d2ac684439cc265a4ec468ad9d30d20" ]
[ "cosine_categorize.py" ]
[ "# encoding=utf-8\nimport os\nimport sys\nimport math\nfrom util_tool import *\nimport numpy\n\ndef categorization_files(path):\n\tfiles = search_directory(path,'vec')\n\tfor input_name in files:\n\t\tcategorization_file(input_name)\n\t\t# compute only once, the same to them if using topic model for sample feather\n\t\tbreak\n\t\ndef categorization_file(vec_file):\n\thandle_froms = open(vec_file,'r')\t\n\tfinal_file = vec_file.replace('vec','final')\n\thandle_final = open(final_file,'w')\t\n\tresult_list = []\n\ttotal = 0\n\tfor from_line in handle_froms:\n\t\tfrom_data = from_line.split()\n\t\thandle_tos = open(vec_file,'r')\n\t\tfor to_line in handle_tos:\n\t\t\tto_data = to_line.split()\n\t\t\tif from_data[0] == to_data[0]:\n\t\t\t\tcontinue\n\t\t\tif from_data[0].split('/')[2][0:7] == to_data[0].split('/')[2][0:7]:\n\t\t\t\ttotal += 1\n\t\t\t# the first element is file name, skip it\n\t\t\tlen_from_data = len(from_data) - 1\n\t\t\tlen_to_data = len(to_data) - 1\n\t\t\tfrom_vec = transfer_vec(from_data[1:len_from_data])\n\t\t\tto_vec = transfer_vec(to_data[1:len_to_data])\n\t\t\tcosine_value = compute_cosine_value(from_vec,to_vec)\n\t\t\ttmp = [from_data[0],to_data[0],cosine_value]\n\t\t\tresult_list.append(tmp)\n\n\taccuracy_count = 0\n\tresult_list = sorted(result_list,key=lambda x:x[2],reverse=True)\n\tfor result in result_list:\n\n\t\tif result[0].split('/')[2][0:7] == result[1].split('/')[2][0:7] and result[2] > 0:\n\t\t\taccuracy_count += 1\n\n\taccuracy_rate = round(round(float(accuracy_count) / float(total),4) * 100 ,4) \n\thandle_final.write(\"total: \" + str(total) + \" accuracy_count: \" + str(accuracy_count) + \" accuracy_rate: \" + str(accuracy_rate) + \"%\\n\")\n\tfor result in result_list:\n\t\thandle_final.write(result[0] + \"\\t\" + result[1] + \"\\t\" + str(result[2]) + \"\\n\")\n\n\thandle_final.close()\n\ndef transfer_vec(vec):\n\t# conver string to int\n\tvec = [ int (vec) for vec in vec if vec ]\n\t# conver array to vector, if not do this, TypeError: can't multiply sequence by non-int of type 'list'\n\tvec = numpy.array(vec)\n\treturn vec\n\ndef compute_cosine_value(vec_a,vec_b):\n\t#\tcos(a,b)=a*b/(|a|+|b|)\n\tnumerator = numpy.sum(vec_a*vec_b) \n\tdenominator = float(numpy.sqrt(sum(numpy.square(vec_a))) * numpy.sqrt(sum(numpy.square(vec_b))))\n\tif 0 == denominator:\n\t\treturn 0\n\ttheta = round(numerator / denominator,4)\n\treturn theta\n\n#categorization_file(\"./text/C00000810.vec\")\ncategorization_files(\"./text\")\n\n" ]
[ [ "numpy.array", "numpy.sum", "numpy.square" ] ]
LucasHaug/MAP3121
[ "90b69c5db20e6d56c0c3e3dd969d9e41d804e9be" ]
[ "EP2/tests.py" ]
[ "#!/usr/bin/env python3\n\nimport numpy as np\nfrom random import random\n\nimport crank_nicolson\n\n#################################################\n### Functions Definitions\n#################################################\n\ndef get_data(test_letter):\n if test_letter == \"a\":\n ut_array, uk_matrix, x_array, N = test_a()\n elif test_letter == \"b\":\n ut_array, uk_matrix, x_array, N = test_b()\n elif test_letter == \"c\":\n ut_array, uk_matrix, x_array, N = test_c()\n else:\n ut_array, uk_matrix, x_array, N = test_d()\n\n return ut_array, uk_matrix, x_array, N\n\n\n\ndef test_a():\n N = 128\n\n # Create heat sources positions array\n heat_sources_positions_array = [0.35]\n\n # Calculate uk matrix\n uk_matrix, scale_array = crank_nicolson.generate_uk(heat_sources_positions_array, N)\n\n uk_matrix = np.delete(uk_matrix, [0, N], axis=1)\n\n # Calculate ut array\n ut_array = np.array(uk_matrix[0]) * 7\n\n # Delete extremes from scale array\n scale_array = np.delete(scale_array, [0, N])\n\n return ut_array, uk_matrix, scale_array, N\n\n\n\ndef test_b():\n N = 128\n\n # Create heat sources positions array\n heat_sources_positions_array = [0.15, 0.3, 0.7, 0.8]\n\n # Calculate uk matrix\n uk_matrix, scale_array = crank_nicolson.generate_uk(heat_sources_positions_array, N)\n\n uk_matrix = np.delete(uk_matrix, [0, N], axis=1)\n\n # Calculate ut array\n ut_array = (np.array(uk_matrix[0]) * 2.3 + np.array(uk_matrix[1]) * 3.7 +\n np.array(uk_matrix[2]) * 0.3 + np.array(uk_matrix[3]) * 4.2)\n\n # Delete extremes from scale array\n scale_array = np.delete(scale_array, [0, N])\n\n return ut_array, uk_matrix, scale_array, N\n\n\n\ndef test_c():\n # Configuration\n N = int(input(\"Insira o valor de N: \"))\n\n mesh_size = 2048\n\n mesh_relation = int(mesh_size / N)\n\n test_file_name = \"teste.txt\"\n\n test_file = open(test_file_name, \"r\")\n file_lines = test_file.readlines()\n\n test_file.close()\n\n # Create heat sources positions array\n heat_sources_positions_array = [float(item) for item in (file_lines.pop(0).split())]\n\n # Calculate uk matrix\n uk_matrix, scale_array = crank_nicolson.generate_uk(heat_sources_positions_array, N)\n\n uk_matrix = np.delete(uk_matrix, [0, N], axis=1)\n\n # Create ut array\n ut_array = np.zeros(N - 1, dtype=float)\n\n for i in range(0, N - 1):\n ut_array[i] = file_lines[(i + 1) * mesh_relation]\n\n # Delete extremes from scale array\n scale_array = np.delete(scale_array, [0, N])\n\n return ut_array, uk_matrix, scale_array, N\n\n\n\ndef test_d():\n ut_array, uk_matrix, scale_array, N = test_c()\n\n ε = 0.01\n\n for i in range(0, N - 1):\n random_num = (random() - 0.5) * 2\n\n ut_array[i] *= (1 + random_num * ε)\n\n return ut_array, uk_matrix, scale_array, N\n" ]
[ [ "numpy.array", "numpy.delete", "numpy.zeros" ] ]
Crazychicken563/RhythmGameCharterAI
[ "d9647007010ecc9a7ecc93d998527e578d4b12c6" ]
[ "CloneHero/clone_hero_to_generic.py" ]
[ "import os\nimport re\nimport pickle as pkl\nimport soundfile as sf\nimport numpy as np\n\ndef safeAdd(src, key, val):\n if key in src:\n src[key].update(val)\n else:\n src[key] = val\n\nsource_dir = \"clone_hero_data/clonehero-win64/songs\"\ndef main():\n for (dirpath, dirnames, filenames) in os.walk(source_dir):\n name = os.path.relpath(dirpath, source_dir)\n audioFilePath = None\n if not filenames:\n continue\n if \"notes.mid\" in filenames:\n print(\"we aren't parsing midi files right now\")\n continue\n if not \"notes.chart\" in filenames:\n print(\"Chart data not found! \" + name)\n print(filenames)\n continue\n else:\n print(\"Parsing \" + name)\n foundOGG = False\n for filename in filenames:\n if (filename.endswith(\".ogg\")):\n foundOGG = True\n audioFilePath = os.path.abspath(source_dir + \"\\\\\" + name + \"\\\\\" + os.path.join(filename))\n if foundOGG == False:\n print(\"NO AUDIO FILE FOUND\")\n continue\n with open(os.path.join(dirpath, \"notes.chart\"), encoding=\"utf-8\") as notes:\n scanningHeader = False\n currSong = None\n currSongName = None\n try:\n currLine = notes.readline().strip()\n except UnicodeDecodeError as e:\n print(e)\n continue\n while currLine:\n if scanningHeader:\n if currLine == \"}\":\n scanningHeader = False\n samplerate = currSong['sr']\n songlength = currSong['sd'].shape[0]/samplerate\n # yeah not dealing with 48000 right now\n if samplerate == 44100:\n try:\n os.mkdir(\"clone_hero_data/output/\"+currSongName)\n timestamps = list(currSong['ts'].keys())\n for i in range(0, len(timestamps)) : \n timestamps[i] = int(timestamps[i])\n timestamps.sort() \n print(name, samplerate)\n beatrate = 441\n mapping = np.zeros(int(np.ceil(songlength*beatrate)))\n currBPM = 0\n for timestamp in timestamps:\n data = currSong['ts'][str(timestamp)]\n #print(\"{}\".format(data))\n if \"B\" in data:\n currBPM = data[\"B\"]\n print(\"currBPM {}\".format(currBPM))\n \n time = float(timestamp)/float(currBPM) * 60 #static \"60\" BPM to match up to music\n if \"N\" in data:\n #mapping[int(np.round(time*beatrate)), data[\"N\"][\"v\"]] = 1\n mapping[int(np.round(time*beatrate))] = data[\"N\"][\"v\"] + 1\n #print(int(np.round(time*beatrate)))\n for time in range(int(np.floor(songlength))):\n songwindow = currSong['sd'][time*samplerate:(time+1)*samplerate]\n mapwindow = mapping[time*beatrate:(time+1)*beatrate]\n \n with open(\"clone_hero_data/output/\"+currSongName+\"/\"+str(time)+\".pkl\", 'wb+') as f:\n pkl.dump({'name':name, 'time':time, 'window':songwindow, 'label':mapwindow}, f)\n except:\n print(\"We done Fucked up :(\")\n \n for timestamp in currSong['ts']:\n currSong['ts'][timestamp].pop(\"N\", None)\n currSong['ts'][timestamp].pop(\"S\", None)\n\n for timestamp in list(currSong['ts'].keys()):\n if len(currSong['ts'][timestamp].keys()) == 0:\n currSong['ts'].pop(str(timestamp))\n\n print(\"end of header for {}\".format(currSongName))\n else:\n (timestamp, data) = currLine.split(\"=\")\n timestamp = timestamp.strip()\n datums = data.strip().split(\" \")\n if datums[0] == \"N\":\n #These are the only things we care about for now\n value = int(datums[1].strip())\n duration = datums[2].strip()\n if value <= 4:\n # mnd will always be defined by this point since scanningHeader\n # can never be true without mnd being instantiated\n safeAdd(currSong['ts'], str(timestamp), {\n \"N\": {\n 'v': value,\n 'd': int(duration)\n }\n })\n #else:\n #print(\"Unknown value note {}\".format(datums))\n elif datums[0] == \"S\":\n # augments over 4 denote a unique type of note / note modifier\n # augment 7 means that the previous note has star power.\n # other augments currently unknown...\n #print(\"star power for duration: {}\".format(duration))\n safeAdd(currSong['ts'], str(timestamp), {\n \"S\": {\n 'v': 2,\n 'd': int(duration)\n }\n })\n else:\n #if any(header in currLine for header in [\"[Song]\"]):\n # print(\"Found Song header\")\n if any(header in currLine for header in [\"[SyncTrack]\"]):\n notes.readline() #Skip the \"{\"\n\n print(audioFilePath)\n songdata, samplerate = sf.read(audioFilePath)\n print(\"sample rate: {}\".format(samplerate))\n currSong = {\n 'ts': {},\n 'sd': np.asarray(songdata),\n 'sr': samplerate\n }\n\n currLine = notes.readline().strip()\n while currLine != \"}\":\n (timestamp, data) = currLine.split(\"=\")\n timestamp = timestamp.strip()\n datums = data.strip().split(\" \")\n if datums[0] == \"B\":\n #print(\"{}\".format(datums))\n #print(currSong)\n safeAdd(currSong['ts'], str(timestamp), {\n \"B\": int(datums[1].strip())\n })\n\n currLine = notes.readline().strip()\n elif any(header in currLine for header in [\"[ExpertSingle]\", \"[HardSingle]\", \"[MediumSingle]\", \"[EasySingle]\"]):\n print(\"Now scanning \" + currLine)\n notes.readline() #Skip the \"{\"\n scanningHeader = True\n mergedPathIntoName = name.replace(\"\\\\\", \"_\")\n currSongName = os.path.join(currLine + \"_\" + mergedPathIntoName)\n print(currSongName)\n\n currLine = notes.readline().strip()\n\nmain()" ]
[ [ "numpy.floor", "numpy.round", "numpy.ceil", "numpy.asarray" ] ]
DonghyunAhn/sadvirus
[ "cdcc98812d613962a7003ff0c6013d0805bde024" ]
[ "utils/siScore_utils.py" ]
[ "import glob\nimport torch\nimport numpy as np\nfrom skimage import io, transform\nfrom torchvision import transforms\nimport torchvision.transforms.functional as F \nfrom torch.utils.data import Dataset\nfrom PIL import Image\nimport random\n\nclass ClusterDataset(Dataset):\n def __init__(self, cluster_list, dir_name, transform=None):\n self.file_list = []\n self.transform = transform \n for cluster_num in cluster_list:\n self.file_list.extend(glob.glob('../data/{}/{}/*.png'.format(dir_name, cluster_num)))\n\n def __len__(self):\n return len(self.file_list)\n\n def __getitem__(self, idx):\n image = io.imread(self.file_list[idx]) / 255.0\n if self.transform:\n image = self.transform(np.stack([image])).squeeze()\n return image\n\n \nclass RandomRotate(object):\n def __call__(self, images):\n rotated = np.stack([self.random_rotate(x) for x in images])\n return rotated\n \n def random_rotate(self, image):\n rand_num = np.random.randint(0, 4)\n if rand_num == 0:\n return np.rot90(image, k=1, axes=(0, 1))\n elif rand_num == 1:\n return np.rot90(image, k=2, axes=(0, 1))\n elif rand_num == 2:\n return np.rot90(image, k=3, axes=(0, 1)) \n else:\n return image\n \n \nclass Normalize(object):\n def __init__(self, mean, std, inplace=False):\n self.mean = mean\n self.std = std\n self.inplace = inplace\n\n def __call__(self, images):\n normalized = np.stack([F.normalize(x, self.mean, self.std, self.inplace) for x in images]) \n return normalized\n \n\n \nclass Grayscale(object):\n def __init__(self, prob = 1):\n self.prob = prob\n\n def __call__(self, images): \n random_num = np.random.randint(100, size=1)[0]\n if random_num <= self.prob * 100:\n gray_images = (images[:, 0, :, :] + images[:, 1, :, :] + images[:, 2, :, :]) / 3\n gray_scaled = gray_images.unsqueeze(1).repeat(1, 3, 1, 1)\n return gray_scaled\n else:\n return images\n \n\n \n\nclass ToTensor(object):\n def __call__(self, images):\n images = images.transpose((0, 3, 1, 2))\n return torch.from_numpy(images).float()\n\nclass AverageMeter(object):\n def __init__(self):\n self.reset()\n \n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0 \n\n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n self.avg = self.sum / self.count" ]
[ [ "numpy.rot90", "numpy.stack", "numpy.random.randint", "torch.from_numpy" ] ]
kiuthed/qutip
[ "b6fb8e5bbd9ffeae117b54e56313e8617038deab" ]
[ "qutip/tests/test_tensor.py" ]
[ "# This file is part of QuTiP: Quantum Toolbox in Python.\n#\n# Copyright (c) 2011 and later, Paul D. Nation and Robert J. Johansson.\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are\n# met:\n#\n# 1. Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n#\n# 2. Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n#\n# 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names\n# of its contributors may be used to endorse or promote products derived\n# from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A\n# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n###############################################################################\n\nfrom numpy.testing import assert_equal, assert_, run_module_suite\n\nfrom qutip.operators import identity\nfrom qutip.superop_reps import to_super\nfrom qutip.tensor import (\n tensor_contract, flatten, enumerate_flat, deep_remove, unflatten\n)\n\n\ndef test_flatten():\n l = [[[0], 1], 2]\n assert_equal(flatten(l), [0, 1, 2])\n\n\ndef test_enumerate_flat():\n l = [[[10], [20, 30]], 40]\n labels = enumerate_flat(l)\n assert_equal(labels, [[[0], [1, 2]], 3])\n\n\ndef test_deep_remove():\n l = [[[0], 1], 2]\n l = deep_remove(l, 1)\n assert_equal(l, [[[0]], 2])\n\n # Harder case...\n l = [[[[0, 1, 2]], [3, 4], [5], [6, 7]]]\n l = deep_remove(l, 0, 5)\n assert l == [[[[1, 2]], [3, 4], [], [6, 7]]]\n\n\ndef test_unflatten():\n l = [[[10, 20, 30], [40, 50, 60]], [[70, 80, 90], [100, 110, 120]]]\n labels = enumerate_flat(l)\n assert unflatten(flatten(l), labels) == l\n\n\ndef test_tensor_contract():\n qobj = identity([2, 3, 4])\n ans = 3 * identity([2, 4])\n\n assert_(ans == tensor_contract(qobj, (1, 4)))\n\n # Now try for superoperators.\n # For now, we just ensure the dims are correct.\n sqobj = to_super(qobj)\n correct_dims = [[[2, 4], [2, 4]], [[2, 4], [2, 4]]]\n assert_equal(correct_dims, tensor_contract(sqobj, (1, 4), (7, 10)).dims)\n\nif __name__ == \"__main__\":\n run_module_suite()" ]
[ [ "numpy.testing.assert_equal", "numpy.testing.run_module_suite" ] ]
BastienArcelin/IPU-GPU
[ "dde946686478ce77a06821a1517b5b8206ab8de9" ]
[ "scripts/ipu/inference_gen_galaxy.py" ]
[ "## Load necessary librairies\nimport numpy as np\n\nimport tensorflow as tf\nimport tensorflow_probability as tfp\ntfk = tf.keras\ntfkl = tfk.layers\ntfd = tfp.distributions\ntfb = tfp.bijectors\n\nimport time\nimport sys\nsys.path.insert(0,'')\nfrom flow import *\nimport utils_vae\n\n# IPU \nfrom tensorflow.compiler.plugin.poplar.ops import gen_ipu_ops\nfrom tensorflow.python import ipu\nfrom tensorflow.python.ipu.scopes import ipu_scope\ncfg = ipu.utils.create_ipu_config()#profiling=True,\n #profile_execution=True,\n #report_directory='fixed_fullModel'\ncfg = ipu.utils.auto_select_ipus(cfg, 1)\nipu.utils.configure_ipu_system(cfg)\n\n\n## Define the normalizing flow\nhidden_dim = [256,256]\nlayers =8\nbijectors = []\n\n# IPU\n# Create an IPU distribution strategy\nstrategy = ipu.ipu_strategy.IPUStrategy()\n#with ipu_scope(\"/device:IPU:0\"):\nwith strategy.scope():\n for i in range(0, layers):\n made = make_network(32, hidden_dim,2)\n bijectors.append(MAF(made))\n bijectors.append(tfb.Permute(permutation=[31,30,29,28,27,26,25,24,23,22,21,20,19,18,17,16,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0]))\n \n bijectors = tfb.Chain(bijectors=list(reversed(bijectors[:-1])))\n\n distribution = tfd.TransformedDistribution(\n distribution=tfd.Normal(loc=0., scale=1.),\n bijector=bijectors,\n event_shape=[32]\n )\n\n x_ = tfkl.Input(shape=(32,), dtype=tf.float32)\n log_prob_ = distribution.log_prob(x_)\n model = tfk.Model(x_, log_prob_)\n\n model.compile(optimizer=tf.optimizers.Adam(), loss=lambda _, log_prob: -log_prob)\n print('flow defined')\n\n ## Load weights\n loading_path = '../../nflow_weights/'\n latest = tf.train.latest_checkpoint(loading_path)\n model.load_weights(latest)\n\n ## Define VAE and load weights decoder VAE\n vae_lsst_conv,vae_lsst_utils, encoder_LSST, decoder_LSST, Dkl = utils_vae.load_vae_full('../../vae_weights/weights_mse_noisy_v4.513-0.00.ckpt',6, folder= False)\n\n ### Do inference\n ## Warm-up \n samples = distribution.sample(100)\n out = decoder_LSST(samples)\n print('warm-up over')\n n_gal = 1000\n print(n_gal)\n ## Actual inference\n t0 = time.time()\n samples = distribution.sample(n_gal)\n out = decoder_LSST(samples)\n t1 = time.time()\n\nprint('time for inference:' + str(t1-t0))\n" ]
[ [ "tensorflow.optimizers.Adam", "tensorflow.python.ipu.utils.create_ipu_config", "tensorflow.train.latest_checkpoint", "tensorflow.python.ipu.utils.auto_select_ipus", "tensorflow.python.ipu.ipu_strategy.IPUStrategy", "tensorflow.python.ipu.utils.configure_ipu_system" ] ]
ibm-developer-skills-network/oroir-Build-a-Personal-Movie-Recommender-with-Django
[ "fbc681cdea067c0cee91c158c632f83cff9db936" ]
[ "recommender/movierecommender/management/commands/load_movies.py" ]
[ "import csv\nimport pandas as pd\nfrom django.core.management import BaseCommand\nfrom ...models import Movie\n\n\nclass Command(BaseCommand):\n help = 'Load a movie csv file into the database'\n\n def add_arguments(self, parser):\n parser.add_argument('--path', type=str)\n\n def handle(self, *args, **kwargs):\n print(\"Clean old movie data\")\n Movie.objects.all().delete()\n path = kwargs['path']\n movie_df = pd.read_csv(path)\n for index, row in movie_df.iterrows():\n imdb_id = row[\"imdb_id\"]\n genres = row[\"genres\"]\n release_date = row[\"release_date\"]\n original_language = row[\"original_language\"]\n original_title = row[\"original_title\"]\n overview = row[\"overview\"]\n vote_average = row[\"vote_average\"]\n vote_count = row[\"vote_count\"]\n poster_path = row[\"poster_path\"]\n #print(f\"{imdb_id} {original_title} {genres} {overview} {vote_average} {poster_path}\")\n movie = Movie(imdb_id=imdb_id,\n genres=genres,\n original_title=original_title,\n original_language=original_language,\n release_date=release_date,\n overview=overview,\n vote_average=vote_average,\n vote_count=vote_count,\n poster_path=poster_path)\n movie.save()\n print(f\"{imdb_id} saved...\")\n\n# python manage.py load_movies --path movies.csv" ]
[ [ "pandas.read_csv" ] ]
ContactEngineering/Adhesion
[ "acc46ad9bfe49fec667cb9a116ebde426faa38c4" ]
[ "helpers/Testing_augmented_Lagrangian.py" ]
[ "#\n# Copyright 2020 Antoine Sanner\n# 2020 Lars Pastewka\n# 2015-2016 Till Junge\n#\n# ### MIT license\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n#\n\n# coding: utf-8\n\n## Testing the Augmented Lagrangian of Adhesion\n\n# The implementation of the augmented Lagrangian in Tools follows closely the description of the `LANCELOT` algorithm described in Bierlaire (2006)\n\n# The function `augmented_lagrangian` has the form of custom minimizer for [scipy.optimize.minimize](http://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.optimize.minimize.html)\n\n# In[4]:\n\nimport sys\nimport os\nimport numpy as np\n\nimport scipy.optimize\nsys.path.append(os.path.join(os.getcwd(), \"../PyCo/Tools/\"))\nfrom AugmentedLagrangian import augmented_lagrangian\n\n\n### Book example\n\n# Example 20.5: Minimise the fuction $f(x)$\n# $$\\min_{x\\in\\mathbb{R}^2} 2(x_1^2+x_2^2 -1)-x_1$$\n# under the constraint\n# $$ x_1^2 + x_2^2 = 1$$\n\n# ugly workaround to get a fresh AugmentedLagrangian without module loads\n\n# In[9]:\n\n# fname = \"../PyCo/Tools/AugmentedLagrangian.py\"\n# with open(fname) as filehandle:\n# content = ''.join((line for line in filehandle))\n# exec(content)\n\n\n# In[11]:\n\ndef fun(x):\n return (x[0]**2 + x[1]**2 - 1) - x[0]\ndef constraint(x):\n return x[0]**2 + x[1]**2 - 1\ntol = 1.e-2\nresult = scipy.optimize.minimize(fun, x0=np.array((-1, .1)),\n \t constraints={'type':'eq','fun':constraint},\n\t method=augmented_lagrangian, tol=tol,\n\t options={'multiplier0': np.array((0.)),\n 'disp': True,\n 'store_iterates': 'iterate'})\n\nprint(result)\n\n" ]
[ [ "numpy.array" ] ]
ian-shepherd/bball_sim
[ "119696eda8d1c1c96da4113c3a41659e1472ebc2" ]
[ "bball_sim/app.py" ]
[ "# Packages\r\nimport streamlit as st\r\nimport pandas as pd\r\nimport seaborn as sns\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib.patches as mpatches\r\nimport util\r\n\r\n# Configure page\r\nst.set_page_config(page_title='End of Game NBA Simulator',\r\n page_icon='https://raw.githubusercontent.com/papagorgio23/Python101/master/newlogo.png',\r\n layout=\"centered\")\r\n\r\n\r\n# Load data and convert to list of players\r\ncols = ['Player', 'bbref_id']\r\nplayers = pd.read_csv('./player_data.csv', usecols=cols)\r\nplayerList = players['Player'].tolist()\r\n\r\n\r\n# Simulation function\r\ndef baseSimulation(n, t, diff, fouls1, fouls2, ot_prob):\r\n \"\"\"\r\n primary simulation to determine number of games won by each strategy\r\n returns a dataframe of strategy, result (number of won wins), number of sims, and mean point difference\r\n \"\"\"\r\n\r\n # Generate empty lists\r\n simTypeList = []\r\n resultList = []\r\n overtimeList = []\r\n pointDiffList = []\r\n\r\n # Simulation\r\n for i in range(0, n):\r\n\r\n # 2 pt simulation\r\n result, overtime, pointDiff = util.runSim(2, \r\n df1, \r\n df2, \r\n rbPct1, \r\n rbPct2, \r\n timeLeftInitial=t, \r\n pointDiffInitial=diff, \r\n teamFouls1Initial=fouls1, \r\n teamFouls2Initial=fouls2, \r\n overtimeProb=ot_prob)\r\n simTypeList.append('2pt')\r\n resultList.append(result)\r\n overtimeList.append(overtime)\r\n pointDiffList.append(pointDiff)\r\n\r\n # 3 pt simulation\r\n result, overtime, pointDiff = util.runSim(3, \r\n df1, \r\n df2, \r\n rbPct1, \r\n rbPct2, \r\n timeLeftInitial=t, \r\n pointDiffInitial=diff, \r\n teamFouls1Initial=fouls1, \r\n teamFouls2Initial=fouls2, \r\n overtimeProb=ot_prob)\r\n simTypeList.append('3pt')\r\n resultList.append(result)\r\n overtimeList.append(overtime)\r\n pointDiffList.append(pointDiff)\r\n\r\n\r\n # Output dataframe\r\n df = pd.DataFrame(zip(simTypeList, resultList, overtimeList, pointDiffList),\r\n columns=['Strategy', 'Result', 'Overtime', 'Point_diff'])\r\n df = df.groupby(['Strategy'])[['Result']].sum().reset_index()\r\n df['Sims'] = n\r\n\r\n\r\n # Generate plot\r\n # set plot style: grey grid in the background:\r\n sns.set(style=\"darkgrid\")\r\n\r\n # set the figure size\r\n # plt.figure(figsize=(14, 10))\r\n fig = plt.figure(figsize=(12, 8))\r\n\r\n # plot bars\r\n bar1 = sns.barplot(x='Strategy', y='Sims', data=df, estimator=sum, ci=None, color='lightcoral')\r\n bar2 = sns.barplot(x='Strategy', y='Result', data=df, color='dodgerblue')\r\n\r\n # legend\r\n top_bar = mpatches.Patch(color='lightcoral', label='Loss')\r\n bottom_bar = mpatches.Patch(color='dodgerblue', label='Win')\r\n plt.legend(bbox_to_anchor=(1,1), borderaxespad=0, frameon=False, ncol=2, handles=[bottom_bar, top_bar])\r\n\r\n # formatting\r\n plt.ylabel(\"# of Simulations\")\r\n plt.title(\"Result of \" + str(n) + \" Simulations by Strategy\")\r\n\r\n st.pyplot(fig)\r\n\r\n # Print % of sims won\r\n st.write(str(round(df.loc[0,'Result'] / n * 100, 1)) + '% of 2pt strategy similations won')\r\n st.write(str(round(df.loc[1,'Result'] / n * 100, 1)) + '% of 3pt strategy similations won')\r\n\r\n return df\r\n\r\n\r\n# Configure page\r\nst.title(\"End of NBA Game Simulator\")\r\nst.subheader(\r\n \"_Adjust the inputs in the sidebar and click apply to view the results of the simulation_\"\r\n)\r\n\r\n\r\n\r\n# Configure sidebar\r\nbuton1 = st.sidebar.button(\"Run\")\r\n\r\n# game state inputs\r\nn = st.sidebar.number_input(\"number of simulations\", min_value=100, max_value=1000000, value=1000)\r\nt = st.sidebar.number_input(\"seconds remaining\", min_value=1, max_value=60, value=30)\r\ndiff = st.sidebar.number_input(\"point differential\", min_value=-10, max_value=0, value=-3)\r\nfouls1 = st.sidebar.number_input(\"fouls committed by leading team\", min_value=0, max_value=10, value=5)\r\nfouls2 = st.sidebar.number_input(\"fouls committed by trailing team\", min_value=0, max_value=10, value=5)\r\not_prob = st.sidebar.number_input(\"overtime win probability (%)\", min_value=0, max_value=100, value=50) / 100\r\n\r\n# trailing team players\r\nst.sidebar.write(\"\")\r\nst.sidebar.write(\"Trailing Team\")\r\nplayer1 = st.sidebar.selectbox(\"player1\", playerList, playerList.index(\"Kemba Walker\\\\walkeke02\"))\r\nplayer2 = st.sidebar.selectbox(\"player2\", playerList, playerList.index(\"Marcus Smart\\\\smartma01\"))\r\nplayer3 = st.sidebar.selectbox(\"player3\", playerList, playerList.index(\"Jaylen Brown\\\\brownja02\"))\r\nplayer4 = st.sidebar.selectbox(\"player4\", playerList, playerList.index(\"Jayson Tatum\\\\tatumja01\"))\r\nplayer5 = st.sidebar.selectbox(\"player5\", playerList, playerList.index(\"Grant Williams\\\\willigr01\"))\r\n\r\n# leading team players\r\nst.sidebar.write(\"Leading Team\")\r\nplayer6 = st.sidebar.selectbox(\"player6\", playerList, playerList.index(\"Ben Simmons\\\\simmobe01\"))\r\nplayer7 = st.sidebar.selectbox(\"player7\", playerList, playerList.index(\"Seth Curry\\\\curryse01\"))\r\nplayer8 = st.sidebar.selectbox(\"player8\", playerList, playerList.index(\"Danny Green\\\\greenda02\"))\r\nplayer9 = st.sidebar.selectbox(\"player9\", playerList, playerList.index(\"Tobias Harris\\\\harrito02\"))\r\nplayer10 = st.sidebar.selectbox(\"player10\", playerList, playerList.index(\"Joel Embiid\\\\embiijo01\"))\r\n\r\n\r\n# Run simulations\r\n# if st.sidebar.button('Apply'):\r\nif buton1:\r\n with st.spinner(\"Running simulations...\"):\r\n team1 = [player1.rsplit('\\\\',1)[1], player2.rsplit('\\\\',1)[1], player3.rsplit('\\\\',1)[1], player4.rsplit('\\\\',1)[1], player5.rsplit('\\\\',1)[1]]\r\n team2 = [player6.rsplit('\\\\',1)[1], player7.rsplit('\\\\',1)[1], player8.rsplit('\\\\',1)[1], player9.rsplit('\\\\',1)[1], player10.rsplit('\\\\',1)[1]]\r\n df1, df2, rbPct1, rbPct2 = util.prepSim(team1, team2)\r\n baseSimulation(n, t, diff, fouls1, fouls2, ot_prob)\r\n\r\n\r\nabout = st.expander('Simulation Info')\r\nwith about:\r\n \"\"\"\r\n This is an end of NBA game simulator based on player statistics for the 2020-2021 NBA season. You can select the same \r\n player to both teams but you cannot put a player on the same team twice. There are also dummy players that act as a \r\n representative player of that position. The simulator assumes the outcome of every possession is a made shot, missed \r\n shot with the potential of a rebound, or intentional foul. It will not account for turnovers or blocks. The time taken \r\n by each possession is based on a normal distribution accounting for what is in the best interest of the team. For example, \r\n the simulation assumes the trailing team will take an average of 4 seconds but if the game is tied, that team will try \r\n and maximize the amount of time taken so that mean is changed to the time remaining - 1.5 seconds. The shooter is also \r\n determined by a composite rating that ranks players by number of that specific shot (free throw, 2 pt, 3 pt) taken per \r\n game and their success rate. Players are then assigned a probability of being the selected shooter. Rebounds on the other \r\n hand are determined by a team liklihood that compares the rebounding of the two teams to determine each team's liklihood \r\n of successfully getting a rebound.\r\n \"\"\"" ]
[ [ "matplotlib.pyplot.legend", "pandas.read_csv", "matplotlib.pyplot.figure", "matplotlib.pyplot.ylabel", "matplotlib.patches.Patch" ] ]
poltextlab/nyt_hybrid_classification_workflow
[ "3f676938b08f4373be3a83e975ee51dfa5ce6bf5" ]
[ "spark_cluster/04_5_HV_activeLearn/HV_v4_activeLearn_NYT_sim2_and_sim3_to_sim1/6100_ML2_HV_v4_activeLearn_NYT_sim2_and_sim3_to_sim1_round5.py" ]
[ "# import libraries\nfrom pyspark.sql import SparkSession\nfrom pyspark import SparkConf\nfrom pyspark.sql.types import *\n\nfrom pyspark.sql.functions import col, count, when\n\nfrom pyspark.ml.classification import LinearSVC\n\nimport pandas as pd\n\n#################################################\n# spark config\n#################################################\nmtaMaster = \"spark://192.168.0.182:7077\"\n\nconf = SparkConf()\nconf.setMaster(mtaMaster)\n\nconf.set(\"spark.executor.memory\", \"24g\")\nconf.set(\"spark.driver.memory\", \"26g\")\nconf.set(\"spark.cores.max\", 96)\nconf.set(\"spark.driver.cores\", 8)\n\nconf.set(\"spark.serializer\", \"org.apache.spark.serializer.KryoSerializer\")\nconf.set(\"spark.kryoserializer.buffer\", \"256m\")\nconf.set(\"spark.kryoserializer.buffer.max\", \"256m\")\n\nconf.set(\"spark.default.parallelism\", 24)\n\nconf.set(\"spark.eventLog.enabled\", \"true\")\nconf.set(\"spark.eventLog.dir\", \"hdfs://192.168.0.182:9000/eventlog\")\nconf.set(\"spark.history.fs.logDirectory\", \"hdfs://192.168.0.182:9000/eventlog\")\n\nconf.set(\"spark.driver.maxResultSize\", \"2g\")\n\nconf.getAll()\n\n#################################################\n# create spark session\n#################################################\nspark = SparkSession.builder.appName('ML2_HV_v4_activeLearn_NYT_sim2_and_sim3_to_sim1_round5').config(conf=conf).getOrCreate()\n\nsc = spark.sparkContext\n\n# check things are working\nprint(sc)\nprint(sc.defaultParallelism)\nprint(\"SPARK CONTEXT IS RUNNING\")\n\n\n#################################################\n# define major topic codes\n#################################################\n\n# major topic codes for loop (NO 23 IN THE NYT CORPUS)\nmajortopic_codes = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 100]\n#majortopic_codes = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 23, 100]\n\n\n#################################################\n# loop starts here\n#################################################\n\nfor h in range(3):\n # read table from hdfs\n df_original = spark.read.parquet(\"hdfs://192.168.0.182:9000/input/ML2_HV_v4_activeLearn_NYT_round5_start.parquet\").repartition(50)\n\n # check loaded data \n print(df_original.printSchema())\n print(df_original.show())\n df_original.groupBy(\"majortopic\").count().show(30, False)\n\n #################################################\n # prepare to log sample numbers\n #################################################\n\n columns = [\"label\", \"non_label_all\", \"non_label_sample\", \"train_all\"]\n\n df_numbers = pd.DataFrame(index=majortopic_codes, columns=columns)\n\n for i in majortopic_codes:\n #################################################\n # prepare df for svm requirements\n #################################################\n print(\"majortopic is:\", i)\n\n # separate majortopic\n df_original = df_original.withColumn(\"label\", when(df_original[\"majortopic\"] == i, 1).otherwise(0))\n\n # label has to be double for SVM\n df_original = df_original.withColumn('label', df_original.label.cast(DoubleType()))\n\n #################################################\n # separate training and test sets\n #################################################\n\n df_train = df_original.where((col('train_r5') == 1) | (col('train_r2_neg') == i) | (col('train_r3_neg') == i) | (col('train_r4_neg') == i) | (col('train_r5_neg') == i))\n df_test = df_original.where((col('train_r5') == 0) & (col('train_r2_neg') != i) & (col('train_r3_neg') != i) & (col('train_r4_neg') != i) & (col('train_r5_neg') != i))\n\n # make training data proportional with regards to label occurrence frequency\n df_train_mtc = df_train.where(col('label') == 1)\n df_train_non_mtc = df_train.where(col('label') == 0)\n\n df_train_count = df_train.count()\n df_train_mtc_count = df_train_mtc.count()\n df_train_non_mtc_count = df_train_non_mtc.count()\n print(\"Rows in training DataFrame with label = \", df_train_mtc_count)\n print(\"Rows in training DataFrame without label = \", df_train_non_mtc_count)\n\n if df_train_mtc_count/df_train_non_mtc_count < 0.1:\n if df_train_mtc_count*10 < df_train_count//10:\n sample_num = df_train_count//10\n else: sample_num = df_train_mtc_count*10\n print(\"sample_num = \", sample_num)\n print(\"df_train_non_mtc = \", df_train_non_mtc_count)\n sampling_fraction = sample_num/df_train_non_mtc_count\n print(\"sampling_fraction = \", sampling_fraction)\n df_train_non_mtc = df_train_non_mtc.sample(False, sampling_fraction)\n df_train_non_mtc_sample = df_train_non_mtc.count()\n print(\"Rows in training DataFrame without label = \", df_train_non_mtc_sample)\n df_train = df_train_mtc.union(df_train_non_mtc)\n # numbers to logtable\n df_numbers[\"non_label_sample\"].loc[i] = df_train_non_mtc_sample\n df_numbers[\"train_all\"].loc[i] = df_train_mtc_count + df_train_non_mtc_sample\n else:\n # numbers to logtable\n df_numbers[\"non_label_sample\"].loc[i] = df_train_non_mtc_count\n df_numbers[\"train_all\"].loc[i] = df_train_count\n\n # numbers to logtable\n df_numbers[\"label\"].loc[i] = df_train_mtc_count\n df_numbers[\"non_label_all\"].loc[i] = df_train_non_mtc_count\n print(df_numbers)\n\n # NOTE: this type of copying wouldn't work in python, but does work in pyspark!\n df_train_orig = df_train\n df_test_orig = df_test\n df_loop = 0\n df_train_mtc = 0\n df_train_non_mtc = 0\n\n print(\"Rows in training DataFrame = \", df_train.count())\n print(\"Rows in test DataFrame = \", df_test.count())\n\n\n #################################################\n # SVM\n #################################################\n\n for j in range(3):\n df_train = df_train_orig\n df_test = df_test_orig\n\n # define svm\n lsvc = LinearSVC(featuresCol='features', labelCol='label', maxIter=10, regParam=0.1)\n\n # train the model.\n lsvcModel = lsvc.fit(df_train)\n\n print(\"fit model finished, starting scoring:\", j)\n\n # score the model on test data.\n predictions = lsvcModel.transform(df_test)\n\n df_train = 0\n df_test = 0\n lsvcModel = 0\n\n print(predictions.printSchema())\n print(predictions.show())\n\n df_write = predictions.select(\"doc_id\", \"prediction\")\n\n predictions = 0\n\n df_write = df_write.withColumn('prediction', df_write.prediction.cast(IntegerType()))\n df_write = df_write.withColumn('prediction', df_write.prediction * i)\n new_col_name = 'prediction_{i}'.format(i=i)\n df_write = df_write.withColumnRenamed('prediction', new_col_name)\n\n # write partial result to parquet\n dest_name = \"hdfs://192.168.0.182:9000/input/NYT_prediction_mtc{i}_{j}.parquet\".format(i=i, j=j)\n df_write.write.parquet(dest_name, mode=\"overwrite\")\n\n df_write = 0\n\n print(\"DONE\")\n\n print(\"ALL SVM DONE round5_{h}\".format(h=h+1))\n\n df_numbers.to_csv(\"ML2_HV_v4_activeLearn_NYT_round5_sample{h}_sample_numbers.csv\".format(h=h+1), index=False)\n\n # empty memory\n spark.catalog.clearCache()\n print(\"cache cleared\")\n\n #######################################################\n ### parquet to pandas\n #######################################################\n\n for j in range(3):\n # read from parquet format\n for i in majortopic_codes:\n source_name = \"hdfs://192.168.0.182:9000/input/NYT_prediction_mtc{i}_{j}.parquet\".format(i=i, j=j)\n df = spark.read.parquet(source_name).repartition(50)\n if i == 1:\n df_results = df\n else:\n df_results = df_results.join(df, 'doc_id', 'inner')\n\n df = df_results\n df_results = 0\n\n # convert prediction results to pandas df\n df = df.toPandas()\n\n df.to_csv(\"ML2_HV_v4_activeLearn_NYT_round5_sample{h}_svm{j}.csv\".format(h=h+1,j=j), index=False)\n\n\n#########################################################################\n# create results and leftovers tables\n#########################################################################\n\n# all of the following happen in pandas outside the spark context\nfor i in range(3):\n for j in range(3):\n df = pd.read_csv(\"ML2_HV_v4_activeLearn_NYT_round5_sample{i}_svm{j}.csv\".format(i=i+1, j=j))\n df = df.sort_values(by=['doc_id'])\n df = df.reset_index(drop=True)\n #print(df.head())\n if i == 0 and j == 0:\n df_results = df\n else:\n df_lemma = df_results.iloc[:,1:].add(df.iloc[:,1:])\n df_results = pd.concat([df_results[['doc_id']], df_lemma], axis=1)\n #print(df_results.head())\n\nfor i in majortopic_codes:\n df_results[[\"prediction_{i}\".format(i=i)]] = df_results[[\"prediction_{i}\".format(i=i)]].floordiv(i)\n\ndf_results[\"max_value\"] = df_results.iloc[:,1:].max(axis = 1, numeric_only = True)\ndf_results[\"how_many_9votes\"] = df_results.iloc[:,:-1].isin([9]).sum(1)\n\nprint(df_results.shape)\ndf_results = df_results.loc[df_results[\"max_value\"]==9]\nprint(df_results.shape)\n# first get table of multiple nine votes for active learning\ndf_activeLearn = df_results.loc[df_results[\"how_many_9votes\"]>1]\n# then get all simple verdicts\ndf_results = df_results.loc[df_results[\"how_many_9votes\"]==1]\nprint(df_results.shape)\n\n# prepare table for active learning\n# first get the full result table for further analysis later\ndf_activeLearn.to_csv(\"ML2_v4_activeLearn_NYT_r5_activeLearn_raw.csv\", index=False)\n\n# since this is a simulation a dummy value will suffice here\ndf_activeLearn[\"verdict\"] = \"dummy_value\"\ndf_activeLearn = df_activeLearn[[\"doc_id\", \"verdict\"]]\n\n# prepare table of single verdicts\ndf_results = df_results.drop(['max_value', 'how_many_9votes'], axis=1)\n\nprint(df_results.head())\n\nfor i in majortopic_codes:\n df_results[[\"prediction_{i}\".format(i=i)]] = df_results[[\"prediction_{i}\".format(i=i)]].floordiv(9)\n\nprint(df_results.head())\n\nfor i in majortopic_codes:\n df_results[[\"prediction_{i}\".format(i=i)]] = df_results[[\"prediction_{i}\".format(i=i)]]*i\n\n\ndf_results[\"verdict\"] = df_results.iloc[:,1:].sum(1)\n\ndf_results = df_results[[\"doc_id\", \"verdict\"]]\n\n# now we move back to the spark context!!\n# for that we need to move the pandas df into a spark df\ndf = spark.createDataFrame(df_results)\n# if there are no elements selected for active learning trying to move the empty pandas df into the\n# spark context will throw an error\nif df_activeLearn.empty:\n print(\"no elements selected for active learning\")\n df_al = pd.DataFrame({'col1': [1]})\n df_al = spark.createDataFrame(df_al)\nelse:\n df_al = spark.createDataFrame(df_activeLearn)\n\n# load df_original\ndf_original = spark.read.parquet(\"hdfs://192.168.0.182:9000/input/ML2_HV_v4_activeLearn_NYT_round5_start.parquet\").repartition(50)\n\n# create results table\ndf_results = df_original.join(df, \"doc_id\", \"inner\")\nif len(df_al.columns) == 1:\n df_results_al = df_al\nelse:\n df_results_al = df_original.join(df_al, \"doc_id\", \"inner\")\n\n# create table of non-classified and training elements\nids_drop = df.select(\"doc_id\")\ndf_original = df_original.join(ids_drop, \"doc_id\", \"left_anti\")\n# once more for those selected for active learning\nif len(df_al.columns) == 1:\n print(\"no elements selected for active learning\")\nelse:\n ids_drop = df_al.select(\"doc_id\")\n df_original = df_original.join(ids_drop, \"doc_id\", \"left_anti\")\n\n# write to parquet for use in human validation script\ndf_original.write.parquet(\"hdfs://192.168.0.182:9000/input/ML2_HV_v4_activeLearn_NYT_r5_train_and_remaining_NOTclassified.parquet\", mode=\"overwrite\")\ndf_results.write.parquet(\"hdfs://192.168.0.182:9000/input/ML2_HV_v4_activeLearn_NYT_r5_classified.parquet\", mode=\"overwrite\")\ndf_results_al.write.parquet(\"hdfs://192.168.0.182:9000/input/ML2_HV_v4_activeLearn_NYT_r5_activeLearn.parquet\", mode=\"overwrite\")\n\n# convert tables to pandas df and write to csv\ndf_original = df_original.drop(\"text\", \"words\", \"raw_features\", \"features\").toPandas()\ndf_results = df_results.drop(\"text\", \"words\", \"raw_features\", \"features\").toPandas()\nif len(df_al.columns) != 1:\n df_results_al = df_results_al.drop(\"text\", \"words\", \"raw_features\", \"features\").toPandas()\n\ndf_original.to_csv(\"ML2_HV_v4_activeLearn_NYT_r5_train_and_remaining_NOTclassified.csv\", index=False)\ndf_results.to_csv(\"ML2_HV_v4_activeLearn_NYT_r5_classified.csv\", index=False)\nif len(df_al.columns) != 1:\n df_results_al.to_csv(\"ML2_HV_v4_activeLearn_NYT_r5_activeLearn.csv\", index=False)\n\nprint(\"df_original: \", df_original.shape[0])\nprint(\"df_results: \", df_results.shape[0])\nif len(df_al.columns) != 1:\n print(\"df_results_activeLearn: \", df_results_al.shape[0])\nelse:\n print(\"df_results_activeLearn: 0\")\n\nsc.stop()\nspark.stop()\n" ]
[ [ "pandas.DataFrame", "pandas.concat" ] ]
wolf-zchen/CarND-capstone
[ "b6b768bfd01f03a5256c2db4b84f9d7a42149de2" ]
[ "ros/src/waypoint_updater/waypoint_updater.py" ]
[ "#!/usr/bin/env python\nimport numpy as np\nimport rospy\nfrom std_msgs.msg import Int32\nfrom geometry_msgs.msg import PoseStamped\nfrom styx_msgs.msg import Lane, Waypoint\nfrom scipy.spatial import KDTree\n\nimport math\n\n'''\nThis node will publish waypoints from the car's current position to some `x` distance ahead.\n\nAs mentioned in the doc, you should ideally first implement a version which does not care\nabout traffic lights or obstacles.\n\nOnce you have created dbw_node, you will update this node to use the status of traffic lights too.\n\nPlease note that our simulator also provides the exact location of traffic lights and their\ncurrent status in `/vehicle/traffic_lights` message. You can use this message to build this node\nas well as to verify your TL classifier.\n\nTODO (for Yousuf and Aaron): Stopline location for each traffic light.\n'''\n\nLOOKAHEAD_WPS = 30 # Number of waypoints we will publish. You can change this number\nMAX_DECEL = 1\n\nclass WaypointUpdater(object):\n def __init__(self):\n rospy.init_node('waypoint_updater')\n\n rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb,queue_size = 1)\n rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb, queue_size = 1)\n rospy.Subscriber('/traffic_waypoint', Int32, self.traffic_cb, queue_size = 1)\n # TODO: Add a subscriber for /traffic_waypoint and /obstacle_waypoint below\n \n\n self.final_waypoints_pub = rospy.Publisher('final_waypoints', Lane, queue_size=1)\n\n # TODO: Add other member variables you need below\n self.pose = None\n self.base_waypoints = None\n self.waypoints_2d = None\n self.waypoint_tree = None\n self.stopline_wp_idx = -1\n \n self.loop()\n \n #rospy.spin()\n\n def loop(self):\n rate = rospy.Rate(50)\n while not rospy.is_shutdown():\n if self.pose and self.base_waypoints:\n #Get closest waypoint\n closest_waypoint_idx = self.get_closest_waypoint_idx()\n self.publish_waypoints(closest_waypoint_idx)\n rate.sleep()\n\n def get_closest_waypoint_idx(self):\n x = self.pose.pose.position.x\n y = self.pose.pose.position.y\n closest_idx = self.waypoint_tree.query([x,y],1)[1] \n \n #check if closet is ahead or behind vehicle\n closest_coord = self.waypoints_2d[closest_idx]\n prev_coord = self.waypoints_2d[closest_idx - 1]\n\n # Equation for hyperplane through closest_coords\n cl_vect = np.array(closest_coord)\n prev_vect = np.array(prev_coord)\n pos_vect = np.array([x,y])\n\n val = np.dot(cl_vect - prev_vect, pos_vect -cl_vect)\n\n if val > 0:\n closest_idx = (closest_idx + 1) % len(self.waypoints_2d)\n return closest_idx\n\n def publish_waypoints(self,closest_idx):\n #lane = Lane()\n #lane.header = self.base_waypoints.header\n #lane.waypoints = self.base_waypoints.waypoints[closest_idx:closest_idx + LOOKAHEAD_WPS]\n final_lane = self.generate_lane()\n self.final_waypoints_pub.publish(final_lane)\n\n def generate_lane(self):\n lane = Lane()\n closest_idx = self.get_closest_waypoint_idx()\n farthest_idx = closest_idx + LOOKAHEAD_WPS\n base_waypoints = self.base_waypoints.waypoints[closest_idx:farthest_idx]\n\n if self.stopline_wp_idx == -1 or (self.stopline_wp_idx >= farthest_idx):\n lane.waypoints = base_waypoints\n else:\n lane.waypoints = self.decelerate_waypoints(base_waypoints,closest_idx)\n\n return lane\n\n def decelerate_waypoints(self,waypoints,closest_idx):\n temp = []\n for i, wp in enumerate(waypoints):\n p = Waypoint()\n p.pose = wp.pose\n stop_idx = max(self.stopline_wp_idx - closest_idx - 3, 0)\n dist = self.distance(waypoints, i, stop_idx)\n vel = math.sqrt(2 * MAX_DECEL * dist)\n if vel < 1.0:\n vel = 0\n\n p.twist.twist.linear.x = min(vel, wp.twist.twist.linear.x)\n temp.append(p)\n return temp\n\n def pose_cb(self, msg):\n # TODO: Implement\n self.pose = msg\n\n def waypoints_cb(self, waypoints):\n # TODO: Implement\n self.base_waypoints = waypoints\n if not self.waypoints_2d:\n self.waypoints_2d = [[waypoint.pose.pose.position.x, waypoint.pose.pose.position.y] for waypoint in waypoints.waypoints]\n self.waypoint_tree = KDTree(self.waypoints_2d)\n\n def traffic_cb(self, msg):\n # TODO: Callback for /traffic_waypoint message. Implement\n self.stopline_wp_idx = msg.data\n\n def obstacle_cb(self, msg):\n # TODO: Callback for /obstacle_waypoint message. We will implement it later\n pass\n\n def get_waypoint_velocity(self, waypoint):\n return waypoint.twist.twist.linear.x\n\n def set_waypoint_velocity(self, waypoints, waypoint, velocity):\n waypoints[waypoint].twist.twist.linear.x = velocity\n\n def distance(self, waypoints, wp1, wp2):\n dist = 0\n dl = lambda a, b: math.sqrt((a.x-b.x)**2 + (a.y-b.y)**2 + (a.z-b.z)**2)\n for i in range(wp1, wp2+1):\n dist += dl(waypoints[wp1].pose.pose.position, waypoints[i].pose.pose.position)\n wp1 = i\n return dist\n\n\nif __name__ == '__main__':\n try:\n WaypointUpdater()\n except rospy.ROSInterruptException:\n rospy.logerr('Could not start waypoint updater node.')\n" ]
[ [ "numpy.array", "numpy.dot", "scipy.spatial.KDTree" ] ]
undeadyequ/espnet
[ "8c3f85ce695153abcb9cf365180b1d7554ad565e" ]
[ "espnet/nets/pytorch_backend/e2e_vc_transformer.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n# Copyright 2020 Nagoya University (Wen-Chin Huang)\n# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)\n\n\"\"\"Voice Transformer Network (Transformer-VC) related modules.\"\"\"\n\nimport logging\n\nimport torch\nimport torch.nn.functional as F\n\nfrom espnet.nets.pytorch_backend.e2e_asr_transformer import subsequent_mask\nfrom espnet.nets.pytorch_backend.e2e_tts_tacotron2 import (\n Tacotron2Loss as TransformerLoss, # noqa: H301\n)\nfrom espnet.nets.pytorch_backend.nets_utils import make_non_pad_mask\nfrom espnet.nets.pytorch_backend.tacotron2.decoder import Postnet\nfrom espnet.nets.pytorch_backend.tacotron2.decoder import Prenet as DecoderPrenet\nfrom espnet.nets.pytorch_backend.tacotron2.encoder import Encoder as EncoderPrenet\nfrom espnet.nets.pytorch_backend.transformer.attention import MultiHeadedAttention\nfrom espnet.nets.pytorch_backend.transformer.decoder import Decoder\nfrom espnet.nets.pytorch_backend.transformer.embedding import PositionalEncoding\nfrom espnet.nets.pytorch_backend.transformer.embedding import ScaledPositionalEncoding\nfrom espnet.nets.pytorch_backend.transformer.encoder import Encoder\nfrom espnet.nets.pytorch_backend.transformer.initializer import initialize\nfrom espnet.nets.tts_interface import TTSInterface\nfrom espnet.utils.cli_utils import strtobool\nfrom espnet.utils.fill_missing_args import fill_missing_args\nfrom espnet.nets.pytorch_backend.e2e_tts_transformer import (\n GuidedMultiHeadAttentionLoss, # noqa: H301\n TTSPlot, # noqa: H301\n)\n\n\nclass Transformer(TTSInterface, torch.nn.Module):\n \"\"\"VC Transformer module.\n\n This is a module of the Voice Transformer Network\n (a.k.a. VTN or Transformer-VC) described in\n `Voice Transformer Network: Sequence-to-Sequence\n Voice Conversion Using Transformer with\n Text-to-Speech Pretraining`_,\n which convert the sequence of acoustic features\n into the sequence of acoustic features.\n\n .. _`Voice Transformer Network: Sequence-to-Sequence\n Voice Conversion Using Transformer with\n Text-to-Speech Pretraining`:\n https://arxiv.org/pdf/1912.06813.pdf\n\n \"\"\"\n\n @staticmethod\n def add_arguments(parser):\n \"\"\"Add model-specific arguments to the parser.\"\"\"\n group = parser.add_argument_group(\"transformer model setting\")\n # network structure related\n group.add_argument(\n \"--eprenet-conv-layers\",\n default=0,\n type=int,\n help=\"Number of encoder prenet convolution layers\",\n )\n group.add_argument(\n \"--eprenet-conv-chans\",\n default=0,\n type=int,\n help=\"Number of encoder prenet convolution channels\",\n )\n group.add_argument(\n \"--eprenet-conv-filts\",\n default=0,\n type=int,\n help=\"Filter size of encoder prenet convolution\",\n )\n group.add_argument(\n \"--transformer-input-layer\",\n default=\"linear\",\n type=str,\n help=\"Type of input layer (linear or conv2d)\",\n )\n group.add_argument(\n \"--dprenet-layers\",\n default=2,\n type=int,\n help=\"Number of decoder prenet layers\",\n )\n group.add_argument(\n \"--dprenet-units\",\n default=256,\n type=int,\n help=\"Number of decoder prenet hidden units\",\n )\n group.add_argument(\n \"--elayers\", default=3, type=int, help=\"Number of encoder layers\"\n )\n group.add_argument(\n \"--eunits\", default=1536, type=int, help=\"Number of encoder hidden units\"\n )\n group.add_argument(\n \"--adim\",\n default=384,\n type=int,\n help=\"Number of attention transformation dimensions\",\n )\n group.add_argument(\n \"--aheads\",\n default=4,\n type=int,\n help=\"Number of heads for multi head attention\",\n )\n group.add_argument(\n \"--dlayers\", default=3, type=int, help=\"Number of decoder layers\"\n )\n group.add_argument(\n \"--dunits\", default=1536, type=int, help=\"Number of decoder hidden units\"\n )\n group.add_argument(\n \"--positionwise-layer-type\",\n default=\"linear\",\n type=str,\n choices=[\"linear\", \"conv1d\", \"conv1d-linear\"],\n help=\"Positionwise layer type.\",\n )\n group.add_argument(\n \"--positionwise-conv-kernel-size\",\n default=1,\n type=int,\n help=\"Kernel size of positionwise conv1d layer\",\n )\n group.add_argument(\n \"--postnet-layers\", default=5, type=int, help=\"Number of postnet layers\"\n )\n group.add_argument(\n \"--postnet-chans\", default=256, type=int, help=\"Number of postnet channels\"\n )\n group.add_argument(\n \"--postnet-filts\", default=5, type=int, help=\"Filter size of postnet\"\n )\n group.add_argument(\n \"--use-scaled-pos-enc\",\n default=True,\n type=strtobool,\n help=\"Use trainable scaled positional encoding\"\n \"instead of the fixed scale one.\",\n )\n group.add_argument(\n \"--use-batch-norm\",\n default=True,\n type=strtobool,\n help=\"Whether to use batch normalization\",\n )\n group.add_argument(\n \"--encoder-normalize-before\",\n default=False,\n type=strtobool,\n help=\"Whether to apply layer norm before encoder block\",\n )\n group.add_argument(\n \"--decoder-normalize-before\",\n default=False,\n type=strtobool,\n help=\"Whether to apply layer norm before decoder block\",\n )\n group.add_argument(\n \"--encoder-concat-after\",\n default=False,\n type=strtobool,\n help=\"Whether to concatenate attention layer's input and output in encoder\",\n )\n group.add_argument(\n \"--decoder-concat-after\",\n default=False,\n type=strtobool,\n help=\"Whether to concatenate attention layer's input and output in decoder\",\n )\n group.add_argument(\n \"--reduction-factor\",\n default=1,\n type=int,\n help=\"Reduction factor (for decoder)\",\n )\n group.add_argument(\n \"--encoder-reduction-factor\",\n default=1,\n type=int,\n help=\"Reduction factor (for encoder)\",\n )\n group.add_argument(\n \"--spk-embed-dim\",\n default=None,\n type=int,\n help=\"Number of speaker embedding dimensions\",\n )\n group.add_argument(\n \"--spk-embed-integration-type\",\n type=str,\n default=\"add\",\n choices=[\"add\", \"concat\"],\n help=\"How to integrate speaker embedding\",\n )\n # training related\n group.add_argument(\n \"--transformer-init\",\n type=str,\n default=\"pytorch\",\n choices=[\n \"pytorch\",\n \"xavier_uniform\",\n \"xavier_normal\",\n \"kaiming_uniform\",\n \"kaiming_normal\",\n ],\n help=\"How to initialize transformer parameters\",\n )\n group.add_argument(\n \"--initial-encoder-alpha\",\n type=float,\n default=1.0,\n help=\"Initial alpha value in encoder's ScaledPositionalEncoding\",\n )\n group.add_argument(\n \"--initial-decoder-alpha\",\n type=float,\n default=1.0,\n help=\"Initial alpha value in decoder's ScaledPositionalEncoding\",\n )\n group.add_argument(\n \"--transformer-lr\",\n default=1.0,\n type=float,\n help=\"Initial value of learning rate\",\n )\n group.add_argument(\n \"--transformer-warmup-steps\",\n default=4000,\n type=int,\n help=\"Optimizer warmup steps\",\n )\n group.add_argument(\n \"--transformer-enc-dropout-rate\",\n default=0.1,\n type=float,\n help=\"Dropout rate for transformer encoder except for attention\",\n )\n group.add_argument(\n \"--transformer-enc-positional-dropout-rate\",\n default=0.1,\n type=float,\n help=\"Dropout rate for transformer encoder positional encoding\",\n )\n group.add_argument(\n \"--transformer-enc-attn-dropout-rate\",\n default=0.1,\n type=float,\n help=\"Dropout rate for transformer encoder self-attention\",\n )\n group.add_argument(\n \"--transformer-dec-dropout-rate\",\n default=0.1,\n type=float,\n help=\"Dropout rate for transformer decoder \"\n \"except for attention and pos encoding\",\n )\n group.add_argument(\n \"--transformer-dec-positional-dropout-rate\",\n default=0.1,\n type=float,\n help=\"Dropout rate for transformer decoder positional encoding\",\n )\n group.add_argument(\n \"--transformer-dec-attn-dropout-rate\",\n default=0.1,\n type=float,\n help=\"Dropout rate for transformer decoder self-attention\",\n )\n group.add_argument(\n \"--transformer-enc-dec-attn-dropout-rate\",\n default=0.1,\n type=float,\n help=\"Dropout rate for transformer encoder-decoder attention\",\n )\n group.add_argument(\n \"--eprenet-dropout-rate\",\n default=0.5,\n type=float,\n help=\"Dropout rate in encoder prenet\",\n )\n group.add_argument(\n \"--dprenet-dropout-rate\",\n default=0.5,\n type=float,\n help=\"Dropout rate in decoder prenet\",\n )\n group.add_argument(\n \"--postnet-dropout-rate\",\n default=0.5,\n type=float,\n help=\"Dropout rate in postnet\",\n )\n group.add_argument(\n \"--pretrained-model\", default=None, type=str, help=\"Pretrained model path\"\n )\n\n # loss related\n group.add_argument(\n \"--use-masking\",\n default=True,\n type=strtobool,\n help=\"Whether to use masking in calculation of loss\",\n )\n group.add_argument(\n \"--use-weighted-masking\",\n default=False,\n type=strtobool,\n help=\"Whether to use weighted masking in calculation of loss\",\n )\n group.add_argument(\n \"--loss-type\",\n default=\"L1\",\n choices=[\"L1\", \"L2\", \"L1+L2\"],\n help=\"How to calc loss\",\n )\n group.add_argument(\n \"--bce-pos-weight\",\n default=5.0,\n type=float,\n help=\"Positive sample weight in BCE calculation \"\n \"(only for use-masking=True)\",\n )\n group.add_argument(\n \"--use-guided-attn-loss\",\n default=False,\n type=strtobool,\n help=\"Whether to use guided attention loss\",\n )\n group.add_argument(\n \"--guided-attn-loss-sigma\",\n default=0.4,\n type=float,\n help=\"Sigma in guided attention loss\",\n )\n group.add_argument(\n \"--guided-attn-loss-lambda\",\n default=1.0,\n type=float,\n help=\"Lambda in guided attention loss\",\n )\n group.add_argument(\n \"--num-heads-applied-guided-attn\",\n default=2,\n type=int,\n help=\"Number of heads in each layer to be applied guided attention loss\"\n \"if set -1, all of the heads will be applied.\",\n )\n group.add_argument(\n \"--num-layers-applied-guided-attn\",\n default=2,\n type=int,\n help=\"Number of layers to be applied guided attention loss\"\n \"if set -1, all of the layers will be applied.\",\n )\n group.add_argument(\n \"--modules-applied-guided-attn\",\n type=str,\n nargs=\"+\",\n default=[\"encoder-decoder\"],\n help=\"Module name list to be applied guided attention loss\",\n )\n return parser\n\n @property\n def attention_plot_class(self):\n \"\"\"Return plot class for attention weight plot.\"\"\"\n return TTSPlot\n\n def __init__(self, idim, odim, args=None):\n \"\"\"Initialize Transformer-VC module.\n\n Args:\n idim (int): Dimension of the inputs.\n odim (int): Dimension of the outputs.\n args (Namespace, optional):\n - eprenet_conv_layers (int):\n Number of encoder prenet convolution layers.\n - eprenet_conv_chans (int):\n Number of encoder prenet convolution channels.\n - eprenet_conv_filts (int):\n Filter size of encoder prenet convolution.\n - transformer_input_layer (str): Input layer before the encoder.\n - dprenet_layers (int): Number of decoder prenet layers.\n - dprenet_units (int): Number of decoder prenet hidden units.\n - elayers (int): Number of encoder layers.\n - eunits (int): Number of encoder hidden units.\n - adim (int): Number of attention transformation dimensions.\n - aheads (int): Number of heads for multi head attention.\n - dlayers (int): Number of decoder layers.\n - dunits (int): Number of decoder hidden units.\n - postnet_layers (int): Number of postnet layers.\n - postnet_chans (int): Number of postnet channels.\n - postnet_filts (int): Filter size of postnet.\n - use_scaled_pos_enc (bool):\n Whether to use trainable scaled positional encoding.\n - use_batch_norm (bool):\n Whether to use batch normalization in encoder prenet.\n - encoder_normalize_before (bool):\n Whether to perform layer normalization before encoder block.\n - decoder_normalize_before (bool):\n Whether to perform layer normalization before decoder block.\n - encoder_concat_after (bool): Whether to concatenate\n attention layer's input and output in encoder.\n - decoder_concat_after (bool): Whether to concatenate\n attention layer's input and output in decoder.\n - reduction_factor (int): Reduction factor (for decoder).\n - encoder_reduction_factor (int): Reduction factor (for encoder).\n - spk_embed_dim (int): Number of speaker embedding dimenstions.\n - spk_embed_integration_type: How to integrate speaker embedding.\n - transformer_init (float): How to initialize transformer parameters.\n - transformer_lr (float): Initial value of learning rate.\n - transformer_warmup_steps (int): Optimizer warmup steps.\n - transformer_enc_dropout_rate (float):\n Dropout rate in encoder except attention & positional encoding.\n - transformer_enc_positional_dropout_rate (float):\n Dropout rate after encoder positional encoding.\n - transformer_enc_attn_dropout_rate (float):\n Dropout rate in encoder self-attention module.\n - transformer_dec_dropout_rate (float):\n Dropout rate in decoder except attention & positional encoding.\n - transformer_dec_positional_dropout_rate (float):\n Dropout rate after decoder positional encoding.\n - transformer_dec_attn_dropout_rate (float):\n Dropout rate in deocoder self-attention module.\n - transformer_enc_dec_attn_dropout_rate (float):\n Dropout rate in encoder-deocoder attention module.\n - eprenet_dropout_rate (float): Dropout rate in encoder prenet.\n - dprenet_dropout_rate (float): Dropout rate in decoder prenet.\n - postnet_dropout_rate (float): Dropout rate in postnet.\n - use_masking (bool):\n Whether to apply masking for padded part in loss calculation.\n - use_weighted_masking (bool):\n Whether to apply weighted masking in loss calculation.\n - bce_pos_weight (float): Positive sample weight in bce calculation\n (only for use_masking=true).\n - loss_type (str): How to calculate loss.\n - use_guided_attn_loss (bool): Whether to use guided attention loss.\n - num_heads_applied_guided_attn (int):\n Number of heads in each layer to apply guided attention loss.\n - num_layers_applied_guided_attn (int):\n Number of layers to apply guided attention loss.\n - modules_applied_guided_attn (list):\n List of module names to apply guided attention loss.\n - guided-attn-loss-sigma (float) Sigma in guided attention loss.\n - guided-attn-loss-lambda (float): Lambda in guided attention loss.\n\n \"\"\"\n # initialize base classes\n TTSInterface.__init__(self)\n torch.nn.Module.__init__(self)\n\n # fill missing arguments\n args = fill_missing_args(args, self.add_arguments)\n\n # store hyperparameters\n self.idim = idim\n self.odim = odim\n self.spk_embed_dim = args.spk_embed_dim\n if self.spk_embed_dim is not None:\n self.spk_embed_integration_type = args.spk_embed_integration_type\n self.use_scaled_pos_enc = args.use_scaled_pos_enc\n self.reduction_factor = args.reduction_factor\n self.encoder_reduction_factor = args.encoder_reduction_factor\n self.transformer_input_layer = args.transformer_input_layer\n self.loss_type = args.loss_type\n self.use_guided_attn_loss = args.use_guided_attn_loss\n if self.use_guided_attn_loss:\n if args.num_layers_applied_guided_attn == -1:\n self.num_layers_applied_guided_attn = args.elayers\n else:\n self.num_layers_applied_guided_attn = (\n args.num_layers_applied_guided_attn\n )\n if args.num_heads_applied_guided_attn == -1:\n self.num_heads_applied_guided_attn = args.aheads\n else:\n self.num_heads_applied_guided_attn = args.num_heads_applied_guided_attn\n self.modules_applied_guided_attn = args.modules_applied_guided_attn\n\n # use idx 0 as padding idx\n padding_idx = 0\n\n # get positional encoding class\n pos_enc_class = (\n ScaledPositionalEncoding if self.use_scaled_pos_enc else PositionalEncoding\n )\n\n # define transformer encoder\n if args.eprenet_conv_layers != 0:\n # encoder prenet\n encoder_input_layer = torch.nn.Sequential(\n EncoderPrenet(\n idim=idim,\n elayers=0,\n econv_layers=args.eprenet_conv_layers,\n econv_chans=args.eprenet_conv_chans,\n econv_filts=args.eprenet_conv_filts,\n use_batch_norm=args.use_batch_norm,\n dropout_rate=args.eprenet_dropout_rate,\n padding_idx=padding_idx,\n input_layer=torch.nn.Linear(\n idim * args.encoder_reduction_factor, idim\n ),\n ),\n torch.nn.Linear(args.eprenet_conv_chans, args.adim),\n )\n elif args.transformer_input_layer == \"linear\":\n encoder_input_layer = torch.nn.Linear(\n idim * args.encoder_reduction_factor, args.adim\n )\n else:\n encoder_input_layer = args.transformer_input_layer\n self.encoder = Encoder(\n idim=idim,\n attention_dim=args.adim,\n attention_heads=args.aheads,\n linear_units=args.eunits,\n num_blocks=args.elayers,\n input_layer=encoder_input_layer,\n dropout_rate=args.transformer_enc_dropout_rate,\n positional_dropout_rate=args.transformer_enc_positional_dropout_rate,\n attention_dropout_rate=args.transformer_enc_attn_dropout_rate,\n pos_enc_class=pos_enc_class,\n normalize_before=args.encoder_normalize_before,\n concat_after=args.encoder_concat_after,\n positionwise_layer_type=args.positionwise_layer_type,\n positionwise_conv_kernel_size=args.positionwise_conv_kernel_size,\n )\n\n # define projection layer\n if self.spk_embed_dim is not None:\n if self.spk_embed_integration_type == \"add\":\n self.projection = torch.nn.Linear(self.spk_embed_dim, args.adim)\n else:\n self.projection = torch.nn.Linear(\n args.adim + self.spk_embed_dim, args.adim\n )\n\n # define transformer decoder\n if args.dprenet_layers != 0:\n # decoder prenet\n decoder_input_layer = torch.nn.Sequential(\n DecoderPrenet(\n idim=odim,\n n_layers=args.dprenet_layers,\n n_units=args.dprenet_units,\n dropout_rate=args.dprenet_dropout_rate,\n ),\n torch.nn.Linear(args.dprenet_units, args.adim),\n )\n else:\n decoder_input_layer = \"linear\"\n self.decoder = Decoder(\n odim=-1,\n attention_dim=args.adim,\n attention_heads=args.aheads,\n linear_units=args.dunits,\n num_blocks=args.dlayers,\n dropout_rate=args.transformer_dec_dropout_rate,\n positional_dropout_rate=args.transformer_dec_positional_dropout_rate,\n self_attention_dropout_rate=args.transformer_dec_attn_dropout_rate,\n src_attention_dropout_rate=args.transformer_enc_dec_attn_dropout_rate,\n input_layer=decoder_input_layer,\n use_output_layer=False,\n pos_enc_class=pos_enc_class,\n normalize_before=args.decoder_normalize_before,\n concat_after=args.decoder_concat_after,\n )\n\n # define final projection\n self.feat_out = torch.nn.Linear(args.adim, odim * args.reduction_factor)\n self.prob_out = torch.nn.Linear(args.adim, args.reduction_factor)\n\n # define postnet\n self.postnet = (\n None\n if args.postnet_layers == 0\n else Postnet(\n idim=idim,\n odim=odim,\n n_layers=args.postnet_layers,\n n_chans=args.postnet_chans,\n n_filts=args.postnet_filts,\n use_batch_norm=args.use_batch_norm,\n dropout_rate=args.postnet_dropout_rate,\n )\n )\n\n # define loss function\n self.criterion = TransformerLoss(\n use_masking=args.use_masking,\n use_weighted_masking=args.use_weighted_masking,\n bce_pos_weight=args.bce_pos_weight,\n )\n if self.use_guided_attn_loss:\n self.attn_criterion = GuidedMultiHeadAttentionLoss(\n sigma=args.guided_attn_loss_sigma, alpha=args.guided_attn_loss_lambda,\n )\n\n # initialize parameters\n self._reset_parameters(\n init_type=args.transformer_init,\n init_enc_alpha=args.initial_encoder_alpha,\n init_dec_alpha=args.initial_decoder_alpha,\n )\n\n # load pretrained model\n if args.pretrained_model is not None:\n self.load_pretrained_model(args.pretrained_model)\n\n def _reset_parameters(self, init_type, init_enc_alpha=1.0, init_dec_alpha=1.0):\n # initialize parameters\n initialize(self, init_type)\n\n # initialize alpha in scaled positional encoding\n if self.use_scaled_pos_enc:\n self.encoder.embed[-1].alpha.data = torch.tensor(init_enc_alpha)\n self.decoder.embed[-1].alpha.data = torch.tensor(init_dec_alpha)\n\n def _add_first_frame_and_remove_last_frame(self, ys):\n ys_in = torch.cat(\n [ys.new_zeros((ys.shape[0], 1, ys.shape[2])), ys[:, :-1]], dim=1\n )\n return ys_in\n\n def forward(self, xs, ilens, ys, labels, olens, spembs=None, *args, **kwargs):\n \"\"\"Calculate forward propagation.\n\n Args:\n xs (Tensor): Batch of padded acoustic features (B, Tmax, idim).\n ilens (LongTensor): Batch of lengths of each input batch (B,).\n ys (Tensor): Batch of padded target features (B, Lmax, odim).\n olens (LongTensor): Batch of the lengths of each target (B,).\n spembs (Tensor, optional): Batch of speaker embedding vectors\n (B, spk_embed_dim).\n\n Returns:\n Tensor: Loss value.\n\n \"\"\"\n # remove unnecessary padded part (for multi-gpus)\n max_ilen = max(ilens)\n max_olen = max(olens)\n if max_ilen != xs.shape[1]:\n xs = xs[:, :max_ilen]\n if max_olen != ys.shape[1]:\n ys = ys[:, :max_olen]\n labels = labels[:, :max_olen]\n\n # thin out input frames for reduction factor\n # (B, Lmax, idim) -> (B, Lmax // r, idim * r)\n if self.encoder_reduction_factor > 1:\n B, Lmax, idim = xs.shape\n if Lmax % self.encoder_reduction_factor != 0:\n xs = xs[:, : -(Lmax % self.encoder_reduction_factor), :]\n xs_ds = xs.contiguous().view(\n B,\n int(Lmax / self.encoder_reduction_factor),\n idim * self.encoder_reduction_factor,\n )\n ilens_ds = ilens.new(\n [ilen // self.encoder_reduction_factor for ilen in ilens]\n )\n else:\n xs_ds, ilens_ds = xs, ilens\n\n # forward encoder\n x_masks = self._source_mask(ilens_ds)\n hs, hs_masks = self.encoder(xs_ds, x_masks)\n\n # integrate speaker embedding\n if self.spk_embed_dim is not None:\n hs_int = self._integrate_with_spk_embed(hs, spembs)\n else:\n hs_int = hs\n\n # thin out frames for reduction factor (B, Lmax, odim) -> (B, Lmax//r, odim)\n if self.reduction_factor > 1:\n ys_in = ys[:, self.reduction_factor - 1 :: self.reduction_factor]\n olens_in = olens.new([olen // self.reduction_factor for olen in olens])\n else:\n ys_in, olens_in = ys, olens\n\n # add first zero frame and remove last frame for auto-regressive\n ys_in = self._add_first_frame_and_remove_last_frame(ys_in)\n\n # if conv2d, modify mask. Use ceiling division here\n if \"conv2d\" in self.transformer_input_layer:\n ilens_ds_st = ilens_ds.new(\n [((ilen - 2 + 1) // 2 - 2 + 1) // 2 for ilen in ilens_ds]\n )\n else:\n ilens_ds_st = ilens_ds\n\n # forward decoder\n y_masks = self._target_mask(olens_in)\n zs, _ = self.decoder(ys_in, y_masks, hs_int, hs_masks)\n # (B, Lmax//r, odim * r) -> (B, Lmax//r * r, odim)\n before_outs = self.feat_out(zs).view(zs.size(0), -1, self.odim)\n # (B, Lmax//r, r) -> (B, Lmax//r * r)\n logits = self.prob_out(zs).view(zs.size(0), -1)\n\n # postnet -> (B, Lmax//r * r, odim)\n if self.postnet is None:\n after_outs = before_outs\n else:\n after_outs = before_outs + self.postnet(\n before_outs.transpose(1, 2)\n ).transpose(1, 2)\n\n # modifiy mod part of groundtruth\n if self.reduction_factor > 1:\n olens = olens.new([olen - olen % self.reduction_factor for olen in olens])\n max_olen = max(olens)\n ys = ys[:, :max_olen]\n labels = labels[:, :max_olen]\n labels[:, -1] = 1.0 # make sure at least one frame has 1\n\n # caluculate loss values\n l1_loss, l2_loss, bce_loss = self.criterion(\n after_outs, before_outs, logits, ys, labels, olens\n )\n if self.loss_type == \"L1\":\n loss = l1_loss + bce_loss\n elif self.loss_type == \"L2\":\n loss = l2_loss + bce_loss\n elif self.loss_type == \"L1+L2\":\n loss = l1_loss + l2_loss + bce_loss\n else:\n raise ValueError(\"unknown --loss-type \" + self.loss_type)\n report_keys = [\n {\"l1_loss\": l1_loss.item()},\n {\"l2_loss\": l2_loss.item()},\n {\"bce_loss\": bce_loss.item()},\n {\"loss\": loss.item()},\n ]\n\n # calculate guided attention loss\n if self.use_guided_attn_loss:\n # calculate for encoder\n if \"encoder\" in self.modules_applied_guided_attn:\n att_ws = []\n for idx, layer_idx in enumerate(\n reversed(range(len(self.encoder.encoders)))\n ):\n att_ws += [\n self.encoder.encoders[layer_idx].self_attn.attn[\n :, : self.num_heads_applied_guided_attn\n ]\n ]\n if idx + 1 == self.num_layers_applied_guided_attn:\n break\n att_ws = torch.cat(att_ws, dim=1) # (B, H*L, T_in, T_in)\n enc_attn_loss = self.attn_criterion(\n att_ws, ilens_ds_st, ilens_ds_st\n ) # TODO(unilight): is changing to ilens_ds_st right?\n loss = loss + enc_attn_loss\n report_keys += [{\"enc_attn_loss\": enc_attn_loss.item()}]\n # calculate for decoder\n if \"decoder\" in self.modules_applied_guided_attn:\n att_ws = []\n for idx, layer_idx in enumerate(\n reversed(range(len(self.decoder.decoders)))\n ):\n att_ws += [\n self.decoder.decoders[layer_idx].self_attn.attn[\n :, : self.num_heads_applied_guided_attn\n ]\n ]\n if idx + 1 == self.num_layers_applied_guided_attn:\n break\n att_ws = torch.cat(att_ws, dim=1) # (B, H*L, T_out, T_out)\n dec_attn_loss = self.attn_criterion(att_ws, olens_in, olens_in)\n loss = loss + dec_attn_loss\n report_keys += [{\"dec_attn_loss\": dec_attn_loss.item()}]\n # calculate for encoder-decoder\n if \"encoder-decoder\" in self.modules_applied_guided_attn:\n att_ws = []\n for idx, layer_idx in enumerate(\n reversed(range(len(self.decoder.decoders)))\n ):\n att_ws += [\n self.decoder.decoders[layer_idx].src_attn.attn[\n :, : self.num_heads_applied_guided_attn\n ]\n ]\n if idx + 1 == self.num_layers_applied_guided_attn:\n break\n att_ws = torch.cat(att_ws, dim=1) # (B, H*L, T_out, T_in)\n enc_dec_attn_loss = self.attn_criterion(\n att_ws, ilens_ds_st, olens_in\n ) # TODO(unilight): is changing to ilens_ds_st right?\n loss = loss + enc_dec_attn_loss\n report_keys += [{\"enc_dec_attn_loss\": enc_dec_attn_loss.item()}]\n\n # report extra information\n if self.use_scaled_pos_enc:\n report_keys += [\n {\"encoder_alpha\": self.encoder.embed[-1].alpha.data.item()},\n {\"decoder_alpha\": self.decoder.embed[-1].alpha.data.item()},\n ]\n self.reporter.report(report_keys)\n\n return loss\n\n def inference(self, x, inference_args, spemb=None, *args, **kwargs):\n \"\"\"Generate the sequence of features given the sequences of acoustic features.\n\n Args:\n x (Tensor): Input sequence of acoustic features (T, idim).\n inference_args (Namespace):\n - threshold (float): Threshold in inference.\n - minlenratio (float): Minimum length ratio in inference.\n - maxlenratio (float): Maximum length ratio in inference.\n spemb (Tensor, optional): Speaker embedding vector (spk_embed_dim).\n\n Returns:\n Tensor: Output sequence of features (L, odim).\n Tensor: Output sequence of stop probabilities (L,).\n Tensor: Encoder-decoder (source) attention weights (#layers, #heads, L, T).\n\n \"\"\"\n # get options\n threshold = inference_args.threshold\n minlenratio = inference_args.minlenratio\n maxlenratio = inference_args.maxlenratio\n use_att_constraint = getattr(\n inference_args, \"use_att_constraint\", False\n ) # keep compatibility\n if use_att_constraint:\n logging.warning(\n \"Attention constraint is not yet supported in Transformer. Not enabled.\"\n )\n\n # thin out input frames for reduction factor\n # (B, Lmax, idim) -> (B, Lmax // r, idim * r)\n if self.encoder_reduction_factor > 1:\n Lmax, idim = x.shape\n if Lmax % self.encoder_reduction_factor != 0:\n x = x[: -(Lmax % self.encoder_reduction_factor), :]\n x_ds = x.contiguous().view(\n int(Lmax / self.encoder_reduction_factor),\n idim * self.encoder_reduction_factor,\n )\n else:\n x_ds = x\n\n # forward encoder\n x_ds = x_ds.unsqueeze(0)\n hs, _ = self.encoder(x_ds, None)\n\n # integrate speaker embedding\n if self.spk_embed_dim is not None:\n spembs = spemb.unsqueeze(0)\n hs = self._integrate_with_spk_embed(hs, spembs)\n\n # set limits of length\n maxlen = int(hs.size(1) * maxlenratio / self.reduction_factor)\n minlen = int(hs.size(1) * minlenratio / self.reduction_factor)\n\n # initialize\n idx = 0\n ys = hs.new_zeros(1, 1, self.odim)\n outs, probs = [], []\n\n # forward decoder step-by-step\n z_cache = self.decoder.init_state(x)\n while True:\n # update index\n idx += 1\n\n # calculate output and stop prob at idx-th step\n y_masks = subsequent_mask(idx).unsqueeze(0).to(x.device)\n z, z_cache = self.decoder.forward_one_step(\n ys, y_masks, hs, cache=z_cache\n ) # (B, adim)\n outs += [\n self.feat_out(z).view(self.reduction_factor, self.odim)\n ] # [(r, odim), ...]\n probs += [torch.sigmoid(self.prob_out(z))[0]] # [(r), ...]\n\n # update next inputs\n ys = torch.cat(\n (ys, outs[-1][-1].view(1, 1, self.odim)), dim=1\n ) # (1, idx + 1, odim)\n\n # get attention weights\n att_ws_ = []\n for name, m in self.named_modules():\n if isinstance(m, MultiHeadedAttention) and \"src\" in name:\n att_ws_ += [m.attn[0, :, -1].unsqueeze(1)] # [(#heads, 1, T),...]\n if idx == 1:\n att_ws = att_ws_\n else:\n # [(#heads, l, T), ...]\n att_ws = [\n torch.cat([att_w, att_w_], dim=1)\n for att_w, att_w_ in zip(att_ws, att_ws_)\n ]\n\n # check whether to finish generation\n if int(sum(probs[-1] >= threshold)) > 0 or idx >= maxlen:\n # check mininum length\n if idx < minlen:\n continue\n outs = (\n torch.cat(outs, dim=0).unsqueeze(0).transpose(1, 2)\n ) # (L, odim) -> (1, L, odim) -> (1, odim, L)\n if self.postnet is not None:\n outs = outs + self.postnet(outs) # (1, odim, L)\n outs = outs.transpose(2, 1).squeeze(0) # (L, odim)\n probs = torch.cat(probs, dim=0)\n break\n\n # concatenate attention weights -> (#layers, #heads, L, T)\n att_ws = torch.stack(att_ws, dim=0)\n\n return outs, probs, att_ws\n\n def calculate_all_attentions(\n self,\n xs,\n ilens,\n ys,\n olens,\n spembs=None,\n skip_output=False,\n keep_tensor=False,\n *args,\n **kwargs\n ):\n \"\"\"Calculate all of the attention weights.\n\n Args:\n xs (Tensor): Batch of padded acoustic features (B, Tmax, idim).\n ilens (LongTensor): Batch of lengths of each input batch (B,).\n ys (Tensor): Batch of padded target features (B, Lmax, odim).\n olens (LongTensor): Batch of the lengths of each target (B,).\n spembs (Tensor, optional): Batch of speaker embedding vectors\n (B, spk_embed_dim).\n skip_output (bool, optional): Whether to skip calculate the final output.\n keep_tensor (bool, optional): Whether to keep original tensor.\n\n Returns:\n dict: Dict of attention weights and outputs.\n\n \"\"\"\n with torch.no_grad():\n # thin out input frames for reduction factor\n # (B, Lmax, idim) -> (B, Lmax // r, idim * r)\n if self.encoder_reduction_factor > 1:\n B, Lmax, idim = xs.shape\n if Lmax % self.encoder_reduction_factor != 0:\n xs = xs[:, : -(Lmax % self.encoder_reduction_factor), :]\n xs_ds = xs.contiguous().view(\n B,\n int(Lmax / self.encoder_reduction_factor),\n idim * self.encoder_reduction_factor,\n )\n ilens_ds = ilens.new(\n [ilen // self.encoder_reduction_factor for ilen in ilens]\n )\n else:\n xs_ds, ilens_ds = xs, ilens\n\n # forward encoder\n x_masks = self._source_mask(ilens_ds)\n hs, hs_masks = self.encoder(xs_ds, x_masks)\n\n # integrate speaker embedding\n if self.spk_embed_dim is not None:\n hs = self._integrate_with_spk_embed(hs, spembs)\n\n # thin out frames for reduction factor\n # (B, Lmax, odim) -> (B, Lmax//r, odim)\n if self.reduction_factor > 1:\n ys_in = ys[:, self.reduction_factor - 1 :: self.reduction_factor]\n olens_in = olens.new([olen // self.reduction_factor for olen in olens])\n else:\n ys_in, olens_in = ys, olens\n\n # add first zero frame and remove last frame for auto-regressive\n ys_in = self._add_first_frame_and_remove_last_frame(ys_in)\n\n # forward decoder\n y_masks = self._target_mask(olens_in)\n zs, _ = self.decoder(ys_in, y_masks, hs, hs_masks)\n\n # calculate final outputs\n if not skip_output:\n before_outs = self.feat_out(zs).view(zs.size(0), -1, self.odim)\n if self.postnet is None:\n after_outs = before_outs\n else:\n after_outs = before_outs + self.postnet(\n before_outs.transpose(1, 2)\n ).transpose(1, 2)\n\n # modifiy mod part of output lengths due to reduction factor > 1\n if self.reduction_factor > 1:\n olens = olens.new([olen - olen % self.reduction_factor for olen in olens])\n\n # store into dict\n att_ws_dict = dict()\n if keep_tensor:\n for name, m in self.named_modules():\n if isinstance(m, MultiHeadedAttention):\n att_ws_dict[name] = m.attn\n if not skip_output:\n att_ws_dict[\"before_postnet_fbank\"] = before_outs\n att_ws_dict[\"after_postnet_fbank\"] = after_outs\n else:\n for name, m in self.named_modules():\n if isinstance(m, MultiHeadedAttention):\n attn = m.attn.cpu().numpy()\n if \"encoder\" in name:\n attn = [a[:, :l, :l] for a, l in zip(attn, ilens.tolist())]\n elif \"decoder\" in name:\n if \"src\" in name:\n attn = [\n a[:, :ol, :il]\n for a, il, ol in zip(\n attn, ilens.tolist(), olens_in.tolist()\n )\n ]\n elif \"self\" in name:\n attn = [\n a[:, :l, :l] for a, l in zip(attn, olens_in.tolist())\n ]\n else:\n logging.warning(\"unknown attention module: \" + name)\n else:\n logging.warning(\"unknown attention module: \" + name)\n att_ws_dict[name] = attn\n if not skip_output:\n before_outs = before_outs.cpu().numpy()\n after_outs = after_outs.cpu().numpy()\n att_ws_dict[\"before_postnet_fbank\"] = [\n m[:l].T for m, l in zip(before_outs, olens.tolist())\n ]\n att_ws_dict[\"after_postnet_fbank\"] = [\n m[:l].T for m, l in zip(after_outs, olens.tolist())\n ]\n\n return att_ws_dict\n\n def _integrate_with_spk_embed(self, hs, spembs):\n \"\"\"Integrate speaker embedding with hidden states.\n\n Args:\n hs (Tensor): Batch of hidden state sequences (B, Tmax, adim).\n spembs (Tensor): Batch of speaker embeddings (B, spk_embed_dim).\n\n Returns:\n Tensor: Batch of integrated hidden state sequences (B, Tmax, adim)\n\n \"\"\"\n if self.spk_embed_integration_type == \"add\":\n # apply projection and then add to hidden states\n spembs = self.projection(F.normalize(spembs))\n hs = hs + spembs.unsqueeze(1)\n elif self.spk_embed_integration_type == \"concat\":\n # concat hidden states with spk embeds and then apply projection\n spembs = F.normalize(spembs).unsqueeze(1).expand(-1, hs.size(1), -1)\n hs = self.projection(torch.cat([hs, spembs], dim=-1))\n else:\n raise NotImplementedError(\"support only add or concat.\")\n\n return hs\n\n def _source_mask(self, ilens):\n \"\"\"Make masks for self-attention.\n\n Args:\n ilens (LongTensor or List): Batch of lengths (B,).\n\n Returns:\n Tensor: Mask tensor for self-attention.\n dtype=torch.uint8 in PyTorch 1.2-\n dtype=torch.bool in PyTorch 1.2+ (including 1.2)\n\n Examples:\n >>> ilens = [5, 3]\n >>> self._source_mask(ilens)\n tensor([[[1, 1, 1, 1, 1],\n [[1, 1, 1, 0, 0]]], dtype=torch.uint8)\n\n \"\"\"\n x_masks = make_non_pad_mask(ilens).to(next(self.parameters()).device)\n return x_masks.unsqueeze(-2)\n\n def _target_mask(self, olens):\n \"\"\"Make masks for masked self-attention.\n\n Args:\n olens (LongTensor or List): Batch of lengths (B,).\n\n Returns:\n Tensor: Mask tensor for masked self-attention.\n dtype=torch.uint8 in PyTorch 1.2-\n dtype=torch.bool in PyTorch 1.2+ (including 1.2)\n\n Examples:\n >>> olens = [5, 3]\n >>> self._target_mask(olens)\n tensor([[[1, 0, 0, 0, 0],\n [1, 1, 0, 0, 0],\n [1, 1, 1, 0, 0],\n [1, 1, 1, 1, 0],\n [1, 1, 1, 1, 1]],\n [[1, 0, 0, 0, 0],\n [1, 1, 0, 0, 0],\n [1, 1, 1, 0, 0],\n [1, 1, 1, 0, 0],\n [1, 1, 1, 0, 0]]], dtype=torch.uint8)\n\n \"\"\"\n y_masks = make_non_pad_mask(olens).to(next(self.parameters()).device)\n s_masks = subsequent_mask(y_masks.size(-1), device=y_masks.device).unsqueeze(0)\n return y_masks.unsqueeze(-2) & s_masks\n\n @property\n def base_plot_keys(self):\n \"\"\"Return base key names to plot during training.\n\n keys should match what `chainer.reporter` reports.\n If you add the key `loss`, the reporter will report `main/loss`\n and `validation/main/loss` values.\n also `loss.png` will be created as a figure visulizing `main/loss`\n and `validation/main/loss` values.\n\n Returns:\n list: List of strings which are base keys to plot during training.\n\n \"\"\"\n plot_keys = [\"loss\", \"l1_loss\", \"l2_loss\", \"bce_loss\"]\n if self.use_scaled_pos_enc:\n plot_keys += [\"encoder_alpha\", \"decoder_alpha\"]\n if self.use_guided_attn_loss:\n if \"encoder\" in self.modules_applied_guided_attn:\n plot_keys += [\"enc_attn_loss\"]\n if \"decoder\" in self.modules_applied_guided_attn:\n plot_keys += [\"dec_attn_loss\"]\n if \"encoder-decoder\" in self.modules_applied_guided_attn:\n plot_keys += [\"enc_dec_attn_loss\"]\n\n return plot_keys\n" ]
[ [ "torch.stack", "torch.nn.Linear", "torch.nn.Module.__init__", "torch.nn.functional.normalize", "torch.no_grad", "torch.tensor", "torch.cat" ] ]
mpsilfve/fairseq
[ "eb228ee74c6bc9803eb7dbd398d8cda16c55ccd2" ]
[ "fairseq/optim/adam.py" ]
[ "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport logging\nimport math\nfrom collections.abc import Collection\nfrom dataclasses import dataclass, field\nfrom typing import List\n\nimport torch\nimport torch.distributed as dist\nimport torch.optim\nfrom fairseq.dataclass import FairseqDataclass\nfrom fairseq.optim import FairseqOptimizer, register_optimizer\nfrom fairseq.optim.fused_adam import get_fused_adam_class\nfrom omegaconf import II, DictConfig\n\n\nlogger = logging.getLogger(__name__)\n\n\n@dataclass\nclass FairseqAdamConfig(FairseqDataclass):\n adam_betas: str = field(\n default=\"(0.9, 0.999)\", metadata={\"help\": \"betas for Adam optimizer\"}\n )\n adam_eps: float = field(\n default=1e-8, metadata={\"help\": \"epsilon for Adam optimizer\"}\n )\n weight_decay: float = field(default=0.0, metadata={\"help\": \"weight decay\"})\n use_old_adam: bool = field(\n default=False, metadata={\"help\": \"Use fairseq.optim.adam.Adam\"}\n )\n # TODO common vars below in parent\n tpu: bool = II(\"common.tpu\")\n lr: List[float] = II(\"optimization.lr\")\n\n\n@register_optimizer(\"adam\", dataclass=FairseqAdamConfig)\nclass FairseqAdam(FairseqOptimizer):\n \"\"\"Adam optimizer for fairseq.\n\n Important note: this optimizer corresponds to the \"AdamW\" variant of\n Adam in its weight decay behavior. As such, it is most closely\n analogous to torch.optim.AdamW from PyTorch.\n \"\"\"\n\n def __init__(self, cfg: DictConfig, params):\n super().__init__(cfg)\n fused_adam_cls = get_fused_adam_class()\n use_fused_adam = (\n not getattr(cfg, \"use_old_adam\", False)\n and fused_adam_cls is not None\n and torch.cuda.is_available()\n )\n if getattr(cfg, \"tpu\", False):\n # on TPUs we use the Adam defined here, since it\n # automatically casts gradients to FP32\n self._optimizer = Adam(params, **self.optimizer_config)\n elif use_fused_adam:\n logger.info(\"using FusedAdam\")\n self._optimizer = fused_adam_cls(params, **self.optimizer_config)\n else:\n self._optimizer = Adam(params, **self.optimizer_config)\n\n @property\n def optimizer_config(self):\n \"\"\"\n Return a kwarg dictionary that will be used to override optimizer\n args stored in checkpoints. This allows us to load a checkpoint and\n resume training using a different set of optimizer args, e.g., with a\n different learning rate.\n \"\"\"\n return {\n \"lr\": self.cfg.lr[0]\n if isinstance(self.cfg.lr, Collection)\n else self.cfg.lr,\n \"betas\": eval(self.cfg.adam_betas),\n \"eps\": self.cfg.adam_eps,\n \"weight_decay\": self.cfg.weight_decay,\n }\n\n def average_params(self):\n \"\"\"Reduce Params is only used during BMUF distributed training.\"\"\"\n state_dict = self.optimizer.state_dict()\n total_gpus = float(dist.get_world_size())\n\n for _, value in state_dict[\"state\"].items():\n value[\"exp_avg\"] /= total_gpus\n value[\"exp_avg_sq\"] /= total_gpus\n dist.all_reduce(value[\"exp_avg\"], op=dist.ReduceOp.SUM)\n dist.all_reduce(value[\"exp_avg_sq\"], op=dist.ReduceOp.SUM)\n\n\nclass Adam(torch.optim.Optimizer):\n r\"\"\"Implements Adam algorithm.\n\n This implementation is modified from torch.optim.Adam based on:\n `Fixed Weight Decay Regularization in Adam`\n (see https://arxiv.org/abs/1711.05101)\n\n It has been proposed in `Adam: A Method for Stochastic Optimization`_.\n\n Args:\n params (iterable): iterable of parameters to optimize or dicts defining\n parameter groups\n lr (float, optional): learning rate (default: 1e-3)\n betas (Tuple[float, float], optional): coefficients used for computing\n running averages of gradient and its square (default: (0.9, 0.999))\n eps (float, optional): term added to the denominator to improve\n numerical stability (default: 1e-8)\n weight_decay (float, optional): weight decay (L2 penalty) (default: 0)\n amsgrad (boolean, optional): whether to use the AMSGrad variant of this\n algorithm from the paper `On the Convergence of Adam and Beyond`_\n\n .. _Adam\\: A Method for Stochastic Optimization:\n https://arxiv.org/abs/1412.6980\n .. _On the Convergence of Adam and Beyond:\n https://openreview.net/forum?id=ryQu7f-RZ\n \"\"\"\n\n def __init__(\n self,\n params,\n lr=1e-3,\n betas=(0.9, 0.999),\n eps=1e-8,\n weight_decay=0,\n amsgrad=False,\n ):\n defaults = dict(\n lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, amsgrad=amsgrad\n )\n super(Adam, self).__init__(params, defaults)\n\n @property\n def supports_memory_efficient_fp16(self):\n return True\n\n @property\n def supports_flat_params(self):\n return True\n\n def step(self, closure=None):\n \"\"\"Performs a single optimization step.\n\n Args:\n closure (callable, optional): A closure that reevaluates the model\n and returns the loss.\n \"\"\"\n loss = None\n if closure is not None:\n loss = closure()\n\n for group in self.param_groups:\n for p in group[\"params\"]:\n if p.grad is None:\n continue\n grad = p.grad.data\n if grad.dtype in {torch.float16, torch.bfloat16}:\n grad = grad.float()\n if grad.is_sparse:\n raise RuntimeError(\n \"Adam does not support sparse gradients, please consider SparseAdam instead\"\n )\n amsgrad = group.get(\"amsgrad\", False)\n\n p_data_fp32 = p.data\n if p.data.dtype in {torch.float16, torch.bfloat16}:\n p_data_fp32 = p_data_fp32.float()\n\n state = self.state[p]\n\n # State initialization\n if len(state) == 0:\n state[\"step\"] = 0\n # Exponential moving average of gradient values\n state[\"exp_avg\"] = torch.zeros_like(p_data_fp32)\n # Exponential moving average of squared gradient values\n state[\"exp_avg_sq\"] = torch.zeros_like(p_data_fp32)\n if amsgrad:\n # Maintains max of all exp. moving avg. of sq. grad. values\n state[\"max_exp_avg_sq\"] = torch.zeros_like(p_data_fp32)\n else:\n state[\"exp_avg\"] = state[\"exp_avg\"].to(p_data_fp32)\n state[\"exp_avg_sq\"] = state[\"exp_avg_sq\"].to(p_data_fp32)\n if amsgrad:\n state[\"max_exp_avg_sq\"] = state[\"max_exp_avg_sq\"].to(\n p_data_fp32\n )\n\n exp_avg, exp_avg_sq = state[\"exp_avg\"], state[\"exp_avg_sq\"]\n if amsgrad:\n max_exp_avg_sq = state[\"max_exp_avg_sq\"]\n beta1, beta2 = group[\"betas\"]\n\n state[\"step\"] += 1\n\n # Decay the first and second moment running average coefficient\n exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1)\n exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2)\n if amsgrad:\n # Maintains the maximum of all 2nd moment running avg. till now\n torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq)\n # Use the max. for normalizing running avg. of gradient\n denom = max_exp_avg_sq.sqrt().add_(group[\"eps\"])\n else:\n denom = exp_avg_sq.sqrt().add_(group[\"eps\"])\n\n bias_correction1 = 1 - beta1 ** state[\"step\"]\n bias_correction2 = 1 - beta2 ** state[\"step\"]\n step_size = group[\"lr\"] * math.sqrt(bias_correction2) / bias_correction1\n\n if group[\"weight_decay\"] != 0:\n p_data_fp32.add_(\n p_data_fp32, alpha=-group[\"weight_decay\"] * group[\"lr\"]\n )\n\n p_data_fp32.addcdiv_(exp_avg, denom, value=-step_size)\n\n if p.data.dtype in {torch.float16, torch.bfloat16}:\n p.data.copy_(p_data_fp32)\n\n return loss\n" ]
[ [ "torch.distributed.get_world_size", "torch.zeros_like", "torch.cuda.is_available", "torch.distributed.all_reduce", "torch.max" ] ]
sdjohnson-astro/redshifting
[ "6073123bf3ea6e48de410d99521e418abc980c99" ]
[ "cubs_compare_spec1D.py" ]
[ "#!/usr/bin/env python\nimport glob\nimport argparse\nfrom astropy.table import Table\nimport numpy as np\n\n# Set up the command line argument parser\nparser = argparse.ArgumentParser(description='Compare two versions of spec1D files from CUBS IMACS or LDSS3')\nparser.add_argument('-d1', metavar='directory 1', type=str, help='Parent directory 1', required=True)\nparser.add_argument('-d2', metavar='directory 2', type=str, help='Parent directory 2', required=True)\nparser.add_argument('-m', metavar='maskname', type=str, help='mask name', required=True)\n\nargs = parser.parse_args()\n\n\nmask = Table.read('{}/{}_spec1D/{}_objects.fits'.format(args.d1, args.m, args.m))\nmask['maxabsDflux'] = 0.0\n\nfor object in mask:\n \n try:\n \n filename1 = '{}/{}_spec1D/{}_{}_{}.fits'.format(args.d1, args.m, args.m, object['row'], object['id'])\n spec1 = Table.read(filename1)\n \n filename2 = '{}/{}_spec1D/{}_{}_{}.fits'.format(args.d2, args.m, args.m, object['row'], object['id'])\n spec2 = Table.read(filename2)\n \n print(np.max(np.abs(spec1['flux'] - spec2['flux'])))\n object['maxabsDflux'] = np.max(np.abs(spec1['flux'] - spec2['flux']))\n \n except:\n \n print('file not found')\n \nprint(mask)\n\nmaxabsDiff = np.max(mask['maxabsDflux'])\n\nif maxabsDiff > 0.0:\n \n print('Differences found!!!!!!!!!!!')\n \nelse:\n \n print('No difference -- ok')" ]
[ [ "numpy.max", "numpy.abs" ] ]
gelijergensen/PermutationImportance
[ "7a09a407e42745c223055e0597c5226ff64b2f3c" ]
[ "PermutationImportance/abstract_runner.py" ]
[ "\"\"\"The general algorithm for all of the data-based variable importance methods\nis the same, regardless of whether the method is Sequential Selection or \nPermutation Importance or something else. This is represented in the \n``abstract_variable_importance`` function. All of the different methods we \nprovide use this function under the hood and the only difference between them is\nthe ``selection_strategy`` object, which is detailed in \n:mod:`PermutationImportance.selection_strategies`. Typically, you will not need \nto use this method but can instead use one of the methods imported directly into \nthe top package of **PermutationImportance**.\n\nIf you wish to implement your own variable importance method, you will need to\ndevise your own ``selection_strategy``. We recommend using\n:mod:`PermutationImportance.selection_strategies` as a template for implementing \nyour own variable importance method.\"\"\"\n\nimport numpy as np\nimport multiprocessing as mp\n\nfrom .data_verification import verify_data, determine_variable_names\nfrom .multiprocessing_utils import pool_imap_unordered\nfrom .result import ImportanceResult\nfrom .scoring_strategies import verify_scoring_strategy\nfrom .utils import add_ranks_to_dict, get_data_subset\n\n\ndef abstract_variable_importance(training_data, scoring_data, scoring_fn, scoring_strategy, selection_strategy, variable_names=None, nimportant_vars=None, method=None, njobs=1):\n \"\"\"Performs an abstract variable importance over data given a particular\n set of functions for scoring, determining optimal variables, and selecting\n data\n\n :param training_data: a 2-tuple ``(inputs, outputs)`` for training in the\n ``scoring_fn``\n :param scoring_data: a 2-tuple ``(inputs, outputs)`` for scoring in the\n ``scoring_fn``\n :param scoring_fn: a function to be used for scoring. Should be of the form\n ``(training_data, scoring_data) -> some_value``\n :param scoring_strategy: a function to be used for determining optimal\n variables. Should be of the form ``([some_value]) -> index``\n :param variable_names: an optional list for variable names. If not given,\n will use names of columns of data (if pandas dataframe) or column\n indices\n :param nimportant_vars: number of variables to compute importance for.\n Defaults to all variables\n :param method: a string for the name of the method used. Defaults to the\n name of the ``selection_strategy`` if not given\n :param njobs: an integer for the number of threads to use. If negative, will\n use ``num_cpus + njobs``. Defaults to 1\n :returns: :class:`PermutationImportance.result.ImportanceResult` object \n which contains the results for each run\n \"\"\"\n\n training_data = verify_data(training_data)\n scoring_data = verify_data(scoring_data)\n scoring_strategy = verify_scoring_strategy(scoring_strategy)\n variable_names = determine_variable_names(scoring_data, variable_names)\n nimportant_vars = len(\n variable_names) if nimportant_vars is None else nimportant_vars\n method = getattr(selection_strategy, \"name\", getattr(\n selection_strategy, \"__name__\")) if method is None else method\n njobs = mp.cpu_count() + njobs if njobs <= 0 else njobs\n\n important_vars = list()\n num_vars = len(variable_names)\n\n # Compute the original score over all the data\n original_score = scoring_fn(training_data, scoring_data)\n result_obj = ImportanceResult(method, variable_names, original_score)\n for _ in range(nimportant_vars):\n selection_iter = selection_strategy(\n training_data, scoring_data, num_vars, important_vars)\n if njobs == 1:\n result = _singlethread_iteration(\n selection_iter, scoring_fn)\n else:\n result = _multithread_iteration(\n selection_iter, scoring_fn, njobs)\n next_result = add_ranks_to_dict(\n result, variable_names, scoring_strategy)\n best_var = min(\n next_result.keys(), key=lambda key: next_result[key][0])\n best_index = np.flatnonzero(variable_names == best_var)[0]\n result_obj.add_new_results(\n next_result, next_important_variable=best_var)\n important_vars.append(best_index)\n\n return result_obj\n\n\ndef _singlethread_iteration(selection_iterator, scoring_fn):\n \"\"\"Handles a single pass of the abstract variable importance algorithm, \n assuming a single worker thread\n\n :param selection_iterator: an iterator which yields triples\n ``(variable, training_data, scoring_data)``. Typically a \n :class:`PermutationImportance.selection_strategies.SelectionStrategy`\n :param scoring_fn: a function to be used for scoring. Should be of the form\n ``(training_data, scoring_data) -> float``\n :returns: a dict of ``{var: score}``\n \"\"\"\n result = dict()\n for var, training_data, scoring_data in selection_iterator:\n score = scoring_fn(training_data, scoring_data)\n result[var] = score\n return result\n\n\ndef _multithread_iteration(selection_iterator, scoring_fn, njobs):\n \"\"\"Handles a single pass of the abstract variable importance algorithm using\n multithreading\n\n :param selection_iterator: an iterator which yields triples\n ``(variable, training_data, scoring_data)``. Typically a \n :class:`PermutationImportance.selection_strategies.SelectionStrategy`\n :param scoring_fn: a function to be used for scoring. Should be of the form\n ``(training_data, scoring_data) -> float``\n :param num_jobs: number of processes to use\n :returns: a dict of ``{var: score}``\n \"\"\"\n result = dict()\n for index, score in pool_imap_unordered(scoring_fn, selection_iterator, njobs):\n result[index] = score\n return result\n" ]
[ [ "numpy.flatnonzero" ] ]
ben15021999/fairseq_rl
[ "89f3c1123052927f67c008f01f3ffa4383f90150" ]
[ "fairseq/tasks/online_backtranslation.py" ]
[ "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport contextlib\nimport json\nimport logging\nimport math\nimport os\nfrom argparse import Namespace\nfrom collections import OrderedDict, defaultdict\nfrom pathlib import Path\nfrom typing import Dict, Sequence, Tuple\nfrom argparse import ArgumentError\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nimport fairseq\nfrom fairseq import metrics, options, utils\nfrom fairseq.data import (\n FairseqDataset,\n LanguagePairDataset,\n NoisingDataset,\n PrependTokenDataset,\n RoundRobinZipDatasets,\n TransformEosLangPairDataset,\n data_utils,\n encoders,\n)\nfrom fairseq.sequence_generator_rl import SequenceGenerator\nfrom fairseq.tasks import register_task\nfrom fairseq.tasks.translation import TranslationTask, load_langpair_dataset\n\nlogger = logging.getLogger(__name__)\n\n\nclass PiecewiseLinearFn:\n \"\"\"Piecewise linear function. Can be configured with a string.\"\"\"\n\n def __init__(self, pieces: Sequence[Tuple[int, float]]):\n assert pieces == sorted(\n pieces\n ), f\"PiecewiseLinearFn configuration should be sorted, received: {pieces}\"\n\n self.pieces = pieces\n\n def __call__(self, x: int) -> float:\n for i, (x_a, y_a) in enumerate(self.pieces[:-1]):\n x_b, y_b = self.pieces[i + 1]\n if x_a <= x <= x_b:\n return y_a + (x - x_a) * (y_b - y_a) / (x_b - x_a)\n\n return self.pieces[-1][1]\n\n @staticmethod\n def from_string(configuration: str) -> \"PiecewiseLinearFn\":\n \"\"\"\n Parse the configuration of lambda coefficient (for scheduling).\n x = \"3\" # lambda will be a constant equal to x\n x = \"0:1,1000:0\" # lambda will start from 1 and linearly decrease\n # to 0 during the first 1000 iterations\n x = \"0:0,1000:0,2000:1\" # lambda will be equal to 0 for the first 1000\n # iterations, then will linearly increase to 1 until iteration 2000\n \"\"\"\n if isinstance(configuration, float):\n return PiecewiseLinearFn([(0, configuration)])\n\n try:\n parts = configuration.split(\",\")\n if len(parts) == 1:\n v = float(configuration)\n return PiecewiseLinearFn([(0, v)])\n\n split = [s.split(\":\") for s in parts]\n pieces = [(int(t), float(v)) for t, v in split]\n return PiecewiseLinearFn(pieces)\n except Exception:\n raise ValueError(\n f\"Invalid PiecewiseLinearFn configuration: {configuration!r}\"\n )\n\n @staticmethod\n def one() -> \"PiecewiseLinearFn\":\n return PiecewiseLinearFn([(0, 1.0)])\n\n\n@register_task(\"online_backtranslation\")\nclass OnlineBackTranslationTask(TranslationTask):\n @staticmethod\n def add_args(parser):\n \"\"\"Add task-specific arguments to the parser.\"\"\"\n # fmt: off\n # Generic translation args\n parser.add_argument('data', help='colon separated path to data directories list, \\\n will be iterated upon during epochs in round-robin manner; \\\n however, valid and test data are always in the first directory to \\\n avoid the need for repeating them in all directories')\n parser.add_argument('--mono-langs', metavar='MONO_LANGS',\n help='monolingual languages for training')\n parser.add_argument('--valid-lang-pairs', default=None, metavar='VALID_LANG_PAIRS',\n help='language pairs for validation')\n parser.add_argument('--load-alignments', action='store_true',\n help='load the binarized alignments')\n parser.add_argument('--left-pad-source', default='False', type=str, metavar='BOOL',\n help='pad the source on the left')\n parser.add_argument('--left-pad-target', default='False', type=str, metavar='BOOL',\n help='pad the target on the left')\n parser.add_argument('--upsample-primary', default=1, type=int,\n help='amount to upsample primary dataset')\n try:\n parser.add_argument('--max-source-positions', default=1024, type=int, metavar='N',\n help='max number of tokens in the source sequence')\n parser.add_argument('--max-target-positions', default=1024, type=int, metavar='N',\n help='max number of tokens in the target sequence')\n except ArgumentError:\n # this might have already been defined. Once we transition this to hydra it should be fine to add it here.\n pass\n parser.add_argument('--truncate-source', action='store_true', default=False,\n help='truncate source to max-source-positions')\n parser.add_argument('--num-batch-buckets', default=0, type=int, metavar='N',\n help='if >0, then bucket source and target lengths into N '\n 'buckets and pad accordingly; this is useful on TPUs '\n 'to minimize the number of compilations')\n\n # Denoising args\n parser.add_argument('--max-word-shuffle-distance', default=3.0, type=float, metavar='N',\n help='maximum word shuffle distance for denoising autoencoding data generation')\n parser.add_argument('--word-dropout-prob', default=0.1, type=float, metavar='N',\n help='word dropout probability for denoising autoencoding data generation')\n parser.add_argument('--word-blanking-prob', default=0.2, type=float, metavar='N',\n help='word blanking probability for denoising autoencoding data generation')\n\n # Backtranslation args\n parser.add_argument('--lambda-bt', default=\"1.0\", type=str, metavar='N',\n help='back-translation weight')\n parser.add_argument('--lambda-dae', default=\"1.0\", type=str, metavar='N',\n help='denoising auto-encoder weight')\n\n # Evaluation args\n parser.add_argument('--generate-one-by-one', action='store_true',\n help='generate one sentence at a time for backtranslation')\n\n parser.add_argument('--eval-bleu', action='store_true',\n help='evaluation with BLEU scores')\n parser.add_argument('--eval-bleu-detok', type=str, default=\"space\",\n help='detokenize before computing BLEU (e.g., \"moses\"); '\n 'required if using --eval-bleu; use \"space\" to '\n 'disable detokenization; see fairseq.data.encoders '\n 'for other options')\n parser.add_argument('--eval-bleu-detok-args', type=str, metavar='JSON',\n help='args for building the tokenizer, if needed')\n parser.add_argument('--eval-tokenized-bleu', action='store_true', default=False,\n help='compute tokenized BLEU instead of sacrebleu')\n parser.add_argument('--eval-bleu-remove-bpe', nargs='?', const='@@ ', default=None,\n help='remove BPE before computing BLEU')\n parser.add_argument('--eval-bleu-args', type=str, metavar='JSON',\n help='generation args for BLUE scoring, '\n 'e.g., \\'{\"beam\": 4, \"lenpen\": 0.6}\\'')\n parser.add_argument('--eval-bleu-print-samples', action='store_true',\n help='print sample generations during validation')\n # fmt: on\n\n def __init__(self, args, common_dict, mono_langs, valid_lang_pairs):\n super().__init__(args, common_dict, common_dict)\n self.common_dict = common_dict\n self.mono_langs = mono_langs\n self.valid_lang_pairs = valid_lang_pairs\n\n self.SHOW_SAMPLES_INTERVAL = 1000\n # Start by showing samples\n self._show_samples_ctr = self.SHOW_SAMPLES_INTERVAL\n self.SHOW_SAMPLES_NUMBER = 5\n self.lambda_bt = PiecewiseLinearFn.from_string(args.lambda_bt)\n self.lambda_dae = PiecewiseLinearFn.from_string(args.lambda_dae)\n\n self.args = args\n self.data = utils.split_paths(self.args.data)\n if len(self.data) == 1:\n shards = list(Path(self.data[0]).glob(\"shard*\"))\n if len(shards) > 0:\n # keep this as strings, since it can also be a manifold path\n old_data = self.data\n self.data = [str(shard) for shard in shards]\n logging.warning(f\"Expanded data directory {old_data} to {self.data}\")\n\n @classmethod\n def setup_task(cls, args, **kwargs):\n \"\"\"Setup the task (e.g., load dictionaries).\n\n Args:\n args (argparse.Namespace): parsed command-line arguments\n \"\"\"\n args.left_pad_source = options.eval_bool(args.left_pad_source)\n args.left_pad_target = options.eval_bool(args.left_pad_target)\n\n paths = utils.split_paths(args.data)\n assert len(paths) > 0\n assert args.mono_langs is not None\n\n mono_langs = args.mono_langs.split(\",\")\n valid_lang_pairs = args.valid_lang_pairs.split(\",\")\n\n # load dictionary\n dict_path = os.path.join(paths[0], \"dict.txt\")\n common_dict = cls.load_dictionary(dict_path)\n\n return cls(args, common_dict, mono_langs, valid_lang_pairs)\n\n def load_dataset(self, split, epoch=1, combine=False, **kwargs) -> FairseqDataset:\n \"\"\"Load a given dataset split.\n\n Args:\n split (str): name of the split (e.g., train, valid, test)\n \"\"\"\n if split == \"train\":\n data_path = self.data[(epoch - 1) % len(self.data)]\n dataset = self.load_train_dataset(data_path)\n else:\n # valid/test should always be the same.\n dataset = self.load_translation_dataset(split, self.data[0])\n\n self.datasets[split] = dataset\n return dataset\n\n def load_train_dataset(self, data_path: str) -> FairseqDataset:\n \"\"\"The training dataset is made of backtranslation dataset and denoising dataset.\"\"\"\n data = []\n for lang in self.mono_langs:\n train_path = os.path.join(data_path, lang, \"train\")\n # TODO: could we do the BT using denoise sample ?\n # this would half the data loading work\n data.append((f\"{lang}-BT\", self.load_bt_dataset(train_path, lang)))\n data.append(\n (f\"{lang}-DENOISE\", self.load_denoise_dataset(train_path, lang))\n )\n\n return RoundRobinZipDatasets(OrderedDict(data))\n\n def _langpair_dataset(\n self, src: FairseqDataset, tgt: FairseqDataset\n ) -> LanguagePairDataset:\n return LanguagePairDataset(\n src,\n src.sizes,\n self.dictionary,\n tgt=tgt,\n tgt_sizes=tgt.sizes,\n tgt_dict=self.dictionary,\n left_pad_source=self.args.left_pad_source,\n left_pad_target=self.args.left_pad_target,\n # TODO: should we shuffle ? we are already sorting batch by sizes so ?\n # shuffle=True,\n )\n\n def _prepend_lang_bos_to_target(\n self, dataset: LanguagePairDataset, lang: str\n ) -> LanguagePairDataset:\n bos = _lang_token_index(self.dictionary, lang)\n return TransformEosLangPairDataset(\n dataset,\n src_eos=self.dictionary.eos(),\n new_src_eos=self.dictionary.eos(),\n tgt_bos=self.dictionary.eos(),\n new_tgt_bos=bos,\n )\n\n def load_bt_dataset(self, data_path: str, lang: str) -> FairseqDataset:\n \"\"\"The BT dataset is generated with (tgt, tgt) pairs.\n The actual translation to a (generated_src, tgt) pair\n is done on the fly during training.\n \"\"\"\n mono_dataset = data_utils.load_indexed_dataset(\n data_path, self.common_dict, self.args.dataset_impl\n )\n assert mono_dataset is not None, f\"No dataset found for {lang}\"\n\n mono_dataset_src = PrependTokenDataset(\n mono_dataset, _lang_token_index(self.dictionary, lang)\n )\n\n mono_dataset_bt = self._langpair_dataset(mono_dataset_src, mono_dataset)\n logger.info(\n f\"mono_lang = {lang} \"\n f\"lang token index = {_lang_token_index(self.dictionary, lang)} \"\n f\"lang token = {_lang_token(lang)}\"\n )\n\n mono_dataset_bt = self._prepend_lang_bos_to_target(mono_dataset_bt, lang)\n return mono_dataset_bt\n\n def load_denoise_dataset(self, data_path: str, lang: str) -> FairseqDataset:\n \"\"\"Classic denoising dataset\"\"\"\n dataset = data_utils.load_indexed_dataset(\n data_path, self.common_dict, self.args.dataset_impl\n )\n noisy_dataset = NoisingDataset(\n dataset,\n self.dictionary,\n seed=1,\n max_word_shuffle_distance=self.args.max_word_shuffle_distance,\n word_dropout_prob=self.args.word_dropout_prob,\n word_blanking_prob=self.args.word_blanking_prob,\n )\n noisy_dataset = PrependTokenDataset(\n noisy_dataset, _lang_token_index(self.dictionary, lang)\n )\n\n clean_dataset = data_utils.load_indexed_dataset(\n data_path, self.common_dict, self.args.dataset_impl\n )\n denoising_dataset = self._langpair_dataset(noisy_dataset, clean_dataset)\n denoising_dataset = self._prepend_lang_bos_to_target(denoising_dataset, lang)\n return denoising_dataset\n\n def load_translation_dataset(\n self, split: str, data_path: str, combine: bool = False\n ):\n # only judging with one language pair for the moment,\n # since ConcatDataset doesn't work as expected\n assert len(self.valid_lang_pairs) == 1, \"For now...\"\n valid_lang_pair = self.valid_lang_pairs[0]\n src, tgt = valid_lang_pair.split(\"-\")\n\n # use the same function than TranslationTask\n src_tgt_dt = load_langpair_dataset(\n data_path,\n split,\n src,\n self.common_dict,\n tgt,\n self.common_dict,\n combine=combine,\n dataset_impl=self.args.dataset_impl,\n upsample_primary=self.args.upsample_primary,\n left_pad_source=self.args.left_pad_source,\n left_pad_target=self.args.left_pad_target,\n max_source_positions=self.args.max_source_positions,\n max_target_positions=self.args.max_target_positions,\n load_alignments=self.args.load_alignments,\n truncate_source=self.args.truncate_source,\n num_buckets=self.args.num_batch_buckets,\n shuffle=(split != \"test\"),\n prepend_bos_src=_lang_token_index(self.dictionary, src),\n )\n\n src_tgt_eos_dt = self._prepend_lang_bos_to_target(src_tgt_dt, tgt)\n src_tgt_eos_dt.args = self.args\n return src_tgt_eos_dt\n\n def build_dataset_for_inference(self, src_tokens, src_lengths, constraints=None):\n raise NotImplementedError\n\n def build_model(self, args, from_checkpoint=False):\n # torch.autograd.set_detect_anomaly(True)\n model = super().build_model(args, from_checkpoint)\n\n add_secial_tokens_to_dict_and_model(self.common_dict, model, self.mono_langs)\n\n self.sequence_generators = {}\n for mono_lang in self.mono_langs:\n self.sequence_generators[mono_lang] = SequenceGenerator(\n [model],\n tgt_dict=self.dictionary,\n beam_size=1,\n max_len_a=1.3,\n max_len_b=5,\n min_len=5,\n # keep 1 to be able to prepend bos\n max_len=model.max_decoder_positions() - 1,\n )\n\n if getattr(args, \"eval_bleu\", False):\n assert getattr(args, \"eval_bleu_detok\", None) is not None, (\n \"--eval-bleu-detok is required if using --eval-bleu; \"\n \"try --eval-bleu-detok=moses (or --eval-bleu-detok=space \"\n \"to disable detokenization, e.g., when using sentencepiece)\"\n )\n detok_args = json.loads(getattr(args, \"eval_bleu_detok_args\", \"{}\") or \"{}\")\n self.tokenizer = encoders.build_tokenizer(\n Namespace(\n tokenizer=getattr(args, \"eval_bleu_detok\", None), **detok_args\n )\n )\n\n gen_args = json.loads(getattr(args, \"eval_bleu_args\", \"{}\") or \"{}\")\n self.bleu_sequence_generator = self.build_generator(\n [model], Namespace(**gen_args)\n )\n\n return model\n\n def max_positions(self):\n \"\"\"Return the max sentence length allowed by the task.\"\"\"\n return (self.args.max_source_positions, self.args.max_target_positions)\n\n @property\n def dictionary(self):\n \"\"\"Return the source :class:`~fairseq.data.Dictionary`.\"\"\"\n return self.common_dict\n\n def display_samples_once_in_a_while(self, smp, mono_lang, other_lang):\n self._show_samples_ctr += 1\n if self._show_samples_ctr < self.SHOW_SAMPLES_INTERVAL:\n return\n self._show_samples_ctr = 0\n\n ln = smp[\"net_input\"][\"src_tokens\"].shape[0]\n\n logger.info(\n f\"(r:{self.args.distributed_rank}) : \"\n f\"{other_lang} ---> {mono_lang} \"\n f\"({other_lang} was generated by back-translation.) {ln} samples\"\n )\n\n for i in range(min(ln, self.SHOW_SAMPLES_NUMBER)):\n src_tokens = smp[\"net_input\"][\"src_tokens\"][i]\n tgt_tokens = smp[\"target\"][i]\n\n src_str = self.dictionary.string(src_tokens, \"sentencepiece\")\n tgt_str = self.dictionary.string(tgt_tokens, \"sentencepiece\")\n logger.info(\n f\"\\n{i}\\t\\t[{other_lang} generated] {src_str}\\n\"\n f\"\\t\\t[{mono_lang} original ] {tgt_str}\\n\"\n f\"\\t\\t[ src tokens] {src_tokens}\\n\"\n )\n\n def backtranslate_sample(self, smp, orig_lang, other_lang) -> None:\n \"\"\"\n * WARNING: smp is modified in place.\n * At the start of this function, `smp` has the same input and target:\n |--------------------------------------------------------|\n | smp['net_input']['src_tokens'] | smp['target'] |\n | (from data) __en__ hello world | __en__ hello world |\n |--------------------------------------------------------|\n\n * We call generator.generate(smp, bos_token = token(\"ro\")),\n and copy the result as input\n * At the end, `smp` has the translation to other language.\n |--------------------------------------------------------|\n | smp['net_input']['src_tokens'] | smp['target'] |\n | (generated) __ro__ salut lume | __en__ hello world |\n |--------------------------------------------------------|\n\n \"\"\"\n bos_token = _lang_token_index(self.dictionary, other_lang)\n generated = self.sequence_generators[orig_lang].generate(\n models=[], sample=smp, bos_token=bos_token\n )\n\n max_lngth = max([gn[0][\"tokens\"].size(0) for gn in generated])\n net_input = smp[\"net_input\"]\n n_src_tokens = torch.empty(\n size=(len(generated), max_lngth + 1), dtype=net_input[\"src_tokens\"].dtype\n )\n n_src_lengths = torch.empty(\n len(generated), dtype=net_input[\"src_lengths\"].dtype\n )\n\n for i, gn in enumerate(generated):\n tokens = gn[0][\"tokens\"]\n tokens_size = tokens.size(0)\n padding_needed = max_lngth - tokens_size\n tokens = torch.cat([tokens.new([bos_token]), tokens])\n tokens = F.pad(tokens, (0, padding_needed), value=self.dictionary.pad())\n n_src_tokens[i] = tokens\n n_src_lengths[i] = tokens_size + 1\n\n device = net_input[\"src_tokens\"].device\n # This seems to be important\n del net_input[\"src_tokens\"]\n del net_input[\"src_lengths\"]\n net_input[\"src_tokens\"] = n_src_tokens.to(device)\n net_input[\"src_lengths\"] = n_src_lengths.to(device)\n\n def generate(self, smp, model):\n model.eval()\n orig_lang = (\n self.dictionary[smp[\"net_input\"][\"src_tokens\"][0][0]]\n .replace(\" \", \"\")\n .replace(\"_\", \"\")\n )\n bos_token = smp[\"net_input\"][\"prev_output_tokens\"][0][0]\n with torch.no_grad():\n generated = self.sequence_generators[orig_lang].generate(\n models=[model], sample=smp, bos_token=bos_token\n )\n return generated\n\n def get_other_lang(self, lang):\n # TODO: allow more complex mapping\n if lang != self.mono_langs[0]:\n return self.mono_langs[0]\n if len(self.mono_langs) == 2:\n return self.mono_langs[1]\n return self.mono_langs[np.random.randint(1, len(self.mono_langs))]\n\n def train_step(\n self, sample, model, criterion, optimizer, update_num, ignore_grad=False\n ):\n\n model.train()\n model.set_num_updates(update_num)\n\n agg_loss, agg_sample_size = 0.0, 0.0\n agg_logging_output: Dict[str, float] = defaultdict(float)\n\n dataset_keys = self.datasets[\"train\"].datasets.keys()\n\n weights = {\n \"BT\": self.lambda_bt(update_num),\n \"DENOISE\": self.lambda_dae(update_num),\n }\n log_keys = {\"BT\": \"bt_\", \"DENOISE\": \"dae_\"}\n\n for dataset_key in dataset_keys:\n smp = sample[dataset_key]\n mono_lang, task_subtype = dataset_key.split(\"-\")\n if weights[task_subtype] == 0:\n continue\n\n if task_subtype == \"BT\":\n with torch.autograd.profiler.record_function(\"backtranslation\"):\n model.eval()\n # TODO: Could we translate to several language at once ?\n # this would allow to share encoder_out and maximize GPU usage.\n other_lang = self.get_other_lang(mono_lang)\n self.backtranslate_sample(smp, mono_lang, other_lang)\n self.display_samples_once_in_a_while(smp, mono_lang, other_lang)\n model.train()\n\n # Like in FairseqTask.train_step\n with torch.autograd.profiler.record_function(\"forward\"):\n loss, sample_size, logging_output = criterion(model, smp)\n loss *= weights[task_subtype]\n if ignore_grad:\n loss *= 0\n with torch.autograd.profiler.record_function(\"backward\"):\n optimizer.backward(loss)\n\n agg_loss += loss.item()\n agg_sample_size += sample_size\n for k in logging_output:\n agg_logging_output[log_keys[task_subtype] + k] += logging_output[k]\n agg_logging_output[k] += logging_output[k]\n\n return agg_loss, agg_sample_size, agg_logging_output\n\n def get_bos_token_from_sample(self, sample):\n net_input = sample[\"net_input\"]\n source_lang_token_id = torch.unique(net_input[\"src_tokens\"][:, 0]).item()\n source_lang_token = self.dictionary[source_lang_token_id].replace(\"_\", \"\")\n target_lang_token_id = _lang_token_index(\n self.dictionary, self.get_other_lang(source_lang_token)\n )\n\n return target_lang_token_id\n\n def reduce_metrics(self, logging_outputs, criterion):\n super().reduce_metrics(logging_outputs, criterion)\n bt_sample_size = sum(x.get(\"bt_sample_size\", 0) for x in logging_outputs)\n if bt_sample_size:\n bt_loss_sum = sum(x.get(\"bt_loss\", 0) for x in logging_outputs)\n bt_loss_sum *= 1 / bt_sample_size / math.log(2)\n metrics.log_scalar(\"bt_loss\", bt_loss_sum, bt_sample_size, round=3)\n\n bt_nll_loss_sum = sum(x.get(\"bt_nll_loss\", 0) for x in logging_outputs)\n bt_ntokens = sum(x.get(\"bt_ntokens\", 0) for x in logging_outputs)\n bt_nll_loss_sum *= 1 / bt_ntokens / math.log(2)\n metrics.log_scalar(\"bt_nll_loss\", bt_nll_loss_sum, bt_ntokens, round=3)\n metrics.log_derived(\n \"bt_ppl\", lambda meters: utils.get_perplexity(meters[\"bt_nll_loss\"].avg)\n )\n\n dae_sample_size = sum(x.get(\"dae_sample_size\", 0) for x in logging_outputs)\n if dae_sample_size:\n dae_loss_sum = sum(x.get(\"dae_loss\", 0) for x in logging_outputs)\n dae_loss_sum *= 1 / dae_sample_size / math.log(2)\n metrics.log_scalar(\"dae_loss\", dae_loss_sum, dae_sample_size, round=3)\n\n dae_nll_loss_sum = sum(x.get(\"dae_nll_loss\", 0) for x in logging_outputs)\n dae_ntokens = sum(x.get(\"dae_ntokens\", 0) for x in logging_outputs)\n dae_nll_loss_sum *= 1 / dae_ntokens / math.log(2)\n metrics.log_scalar(\"dae_nll_loss\", dae_nll_loss_sum, dae_ntokens, round=3)\n metrics.log_derived(\n \"dae_ppl\",\n lambda meters: utils.get_perplexity(meters[\"dae_nll_loss\"].avg),\n )\n\n\[email protected]_grad()\ndef extend_embedding(\n emb: nn.Module, new_vocab_size: int, copy_from_token_id: int\n) -> None:\n old_emb_data = emb.weight.data\n (old_vocab_size, dim) = old_emb_data.shape\n assert new_vocab_size >= old_vocab_size\n\n if new_vocab_size > old_vocab_size:\n emb.weight.data = torch.zeros((new_vocab_size, dim))\n emb.weight.data[:old_vocab_size, :] = old_emb_data\n # initialize new embeddings\n emb.weight.data[old_vocab_size:, :] = old_emb_data[copy_from_token_id]\n if hasattr(emb, \"num_embeddings\"):\n emb.num_embeddings = new_vocab_size\n if hasattr(emb, \"out_features\"):\n emb.out_features = new_vocab_size\n\n if getattr(emb, \"bias\", None) is None:\n return\n\n # Fix the bias.\n # Bias shape can be different from the previous vocab size\n # if the weight matrix was shared and alread extended but not the bias.\n (old_vocab_size,) = emb.bias.shape\n assert new_vocab_size >= old_vocab_size\n if new_vocab_size > old_vocab_size:\n old_bias = emb.bias.data\n new_bias = torch.zeros(\n (new_vocab_size,), dtype=old_bias.dtype, device=old_bias.device\n )\n new_bias[:old_vocab_size] = old_bias\n emb.bias.data = new_bias\n\n\ndef add_secial_tokens_to_dict_and_model(\n dictionary: \"fairseq.data.Dictionary\",\n model: nn.Module,\n mono_langs: Sequence[str],\n) -> None:\n embs = model.encoder.embed_tokens\n vocab_size, embedding_dim = embs.weight.shape\n\n # The model may or may not have a '<mask>' embedding yet\n assert (\n len(dictionary) <= vocab_size <= len(dictionary) + 1\n ), f\"Dictionary len ({len(dictionary)}) doesn't match embs shape ({embs.weight.shape})\"\n # TODO: we should reuse the pretrained model dict which already has <mask>\n dictionary.add_symbol(\"<mask>\")\n\n for lang in mono_langs:\n lang_token = _lang_token(lang)\n dictionary.add_symbol(lang_token)\n logger.info(\n f\"dictionary: {len(dictionary)} -> {vocab_size} tokens \"\n f\"after adding {len(mono_langs)} lang tokens.\"\n )\n\n if len(dictionary) <= vocab_size:\n return\n\n extend_embedding(embs, len(dictionary), dictionary.bos())\n dec_embs = model.decoder.embed_tokens\n extend_embedding(dec_embs, len(dictionary), dictionary.bos())\n lm_head = model.decoder.output_projection\n extend_embedding(lm_head, len(dictionary), dictionary.bos())\n assert lm_head.weight.shape == (len(dictionary), embedding_dim)\n\n\ndef _lang_token(lang: str) -> str:\n return f\"__{lang}__\"\n\n\ndef _lang_token_index(dictionary, lang: str) -> int:\n return dictionary.index(_lang_token(lang))\n\n\[email protected]\ndef assert_weights_have_changed(model: nn.Module):\n def checksum(model: nn.Module) -> float:\n return sum(p.sum().item() for p in model.parameters())\n\n initial_checksum = checksum(model)\n yield model\n final_checksum = checksum(model)\n logger.info(\n f\"initial_checksum={initial_checksum} -> final_checksum={final_checksum}\"\n )\n assert initial_checksum != final_checksum, \"Model hasn't changed !\"\n" ]
[ [ "torch.zeros", "torch.no_grad", "torch.autograd.profiler.record_function", "torch.unique" ] ]
ZhaoChuyang/dgreid
[ "ee1d7af74b796f2f194307ab023e43ecc3d3d525" ]
[ "reid/models/resnet_mldg_smm.py" ]
[ "from __future__ import absolute_import\n\nimport torch\nfrom torch import nn\nfrom torch.nn import functional as F\nfrom torch.nn import init\nimport torchvision\nfrom collections import OrderedDict\n\nfrom ..models.layers.adain import SMMBlock\n\n\n__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',\n 'resnet152', 'resnet50_mldg_smm']\n\n\nclass ResNet(nn.Module):\n __factory = {\n 18: torchvision.models.resnet18,\n 34: torchvision.models.resnet34,\n 50: torchvision.models.resnet50,\n 101: torchvision.models.resnet101,\n 152: torchvision.models.resnet152,\n }\n\n def __init__(self, depth, pretrained=True, cut_at_pooling=False,\n num_features=0, norm=False, dropout=0, num_classes=None):\n super(ResNet, self).__init__()\n self.pretrained = pretrained\n self.depth = depth\n self.cut_at_pooling = cut_at_pooling\n\n # Construct base (pretrained) resnet\n if depth not in ResNet.__factory:\n raise KeyError(\"Unsupported depth:\", depth)\n resnet = ResNet.__factory[depth](pretrained=pretrained)\n resnet.layer4[0].conv2.stride = (1,1)\n resnet.layer4[0].downsample[0].stride = (1,1)\n # self.base = nn.Sequential(\n # resnet.conv1, resnet.bn1, resnet.relu, resnet.maxpool,\n # resnet.layer1, resnet.layer2, resnet.layer3, resnet.layer4)\n\n self.conv = nn.Sequential(OrderedDict([\n ('conv1', resnet.conv1),\n ('bn1', resnet.bn1),\n ('relu', resnet.relu),\n ('maxpool', resnet.maxpool)]))\n\n self.layer1 = resnet.layer1\n self.layer2 = resnet.layer2\n self.layer3 = resnet.layer3\n self.layer4 = resnet.layer4\n\n self.gap = nn.AdaptiveAvgPool2d(1)\n\n self.smm_block = SMMBlock(1, rand=False, learnable=False)\n\n if not self.cut_at_pooling:\n self.num_features = num_features\n self.norm = norm\n self.dropout = dropout\n self.has_embedding = num_features > 0\n self.num_classes = num_classes\n\n out_planes = resnet.fc.in_features\n\n # Append new layers\n if self.has_embedding:\n self.feat = nn.Linear(out_planes, self.num_features)\n self.feat_bn = nn.BatchNorm1d(self.num_features)\n init.kaiming_normal_(self.feat.weight, mode='fan_out')\n init.constant_(self.feat.bias, 0)\n else:\n # Change the num_features to CNN output channels\n self.num_features = out_planes\n self.feat_bn = nn.BatchNorm1d(self.num_features)\n self.feat_bn.bias.requires_grad_(False)\n if self.dropout > 0:\n self.drop = nn.Dropout(self.dropout)\n\n self.classifier = nn.Linear(self.num_features, self.num_classes, bias=False)\n init.normal_(self.classifier.weight, std=0.001)\n\n init.constant_(self.feat_bn.weight, 1)\n init.constant_(self.feat_bn.bias, 0)\n\n if not pretrained:\n self.reset_params()\n\n def forward(self, x, meta_train=True, output_prob=False, return_featuremaps=False):\n if self.training:\n num_domains = len(x)\n x = torch.cat(x, dim=0)\n\n x = self.conv(x)\n\n # NOTE: change to 'if self.training and meta_train:'\n if meta_train:\n mixed_x, _ = self.smm_block(x)\n if return_featuremaps:\n return [x, mixed_x]\n x = mixed_x\n\n\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n\n x = self.gap(x)\n x = x.view(x.size(0), -1)\n\n if self.cut_at_pooling:\n return x\n\n if self.has_embedding:\n bn_x = self.feat_bn(self.feat(x))\n else:\n bn_x = self.feat_bn(x)\n\n if self.training is False and output_prob is False:\n bn_x = F.normalize(bn_x)\n return bn_x\n\n if self.norm:\n norm_bn_x = F.normalize(bn_x)\n elif self.has_embedding:\n bn_x = F.relu(bn_x)\n\n if self.dropout > 0:\n bn_x = self.drop(bn_x)\n\n prob = self.classifier(bn_x)\n\n # prob, mixed_prob = torch.chunk(prob, 2, dim=0)\n prob = torch.chunk(prob, num_domains, dim=0)\n # mixed_prob = torch.chunk(mixed_prob, num_domains, dim=0)\n # x, mixed_x = torch.chunk(x, 2, dim=0)\n x = torch.chunk(x, num_domains, dim=0)\n # mixed_x = torch.chunk(mixed_x, num_domains, dim=0)\n\n return prob, x\n\n def reset_params(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n init.kaiming_normal_(m.weight, mode='fan_out')\n if m.bias is not None:\n init.constant_(m.bias, 0)\n elif isinstance(m, nn.BatchNorm2d):\n init.constant_(m.weight, 1)\n init.constant_(m.bias, 0)\n elif isinstance(m, nn.BatchNorm1d):\n init.constant_(m.weight, 1)\n init.constant_(m.bias, 0)\n elif isinstance(m, nn.Linear):\n init.normal_(m.weight, std=0.001)\n if m.bias is not None:\n init.constant_(m.bias, 0)\n\n def get_params(self):\n for param in self.parameters():\n if param.requires_grad:\n yield param\n\n # def train(self, mode=True):\n # \"\"\"\n # Override the default train() to freeze the BN parameters\n # \"\"\"\n # super().train(mode)\n # self.freeze_bn()\n #\n # def freeze_bn(self):\n # for m in self.modules():\n # if isinstance(m, nn.BatchNorm1d):\n # m.eval()\n # if isinstance(m, nn.BatchNorm2d):\n # m.eval()\n\n\ndef resnet18(**kwargs):\n return ResNet(18, **kwargs)\n\n\ndef resnet34(**kwargs):\n return ResNet(34, **kwargs)\n\n\ndef resnet50(**kwargs):\n return ResNet(50, **kwargs)\n\n\ndef resnet101(**kwargs):\n return ResNet(101, **kwargs)\n\n\ndef resnet152(**kwargs):\n return ResNet(152, **kwargs)\n\n\ndef resnet50_mde(**kwargs):\n return ResNet(50, **kwargs)\n\n\ndef resnet50_mldg_smm(**kwargs):\n return ResNet(50, **kwargs)\n\n" ]
[ [ "torch.nn.init.kaiming_normal_", "torch.nn.Linear", "torch.nn.init.constant_", "torch.nn.BatchNorm1d", "torch.chunk", "torch.nn.AdaptiveAvgPool2d", "torch.nn.functional.normalize", "torch.nn.init.normal_", "torch.nn.functional.relu", "torch.cat", "torch.nn.Dropout" ] ]
zhanghaohit/incubator-tvm
[ "ee0af843f3c5a3429e888079afb5f30789bd9bee" ]
[ "tests/python/relay/test_op_level1.py" ]
[ "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\nimport numpy as np\nimport pytest\nimport tvm\nimport scipy\nfrom tvm import relay\nfrom tvm.relay import transform\nfrom tvm.relay.testing import ctx_list\nimport topi.testing\nfrom tvm.contrib.nvcc import have_fp16\n\ndef run_infer_type(expr):\n mod = relay.Module.from_expr(expr)\n mod = transform.InferType()(mod)\n entry = mod[\"main\"]\n return entry if isinstance(expr, relay.Function) else entry.body\n\ndef sigmoid(x):\n one = np.ones_like(x)\n return one / (one + np.exp(-x))\n\ndef relu(x):\n x_copy = np.copy(x)\n np.maximum(x_copy, 0, x_copy)\n return x_copy\n\ndef rsqrt(x):\n one = np.ones_like(x)\n return one / np.sqrt(x)\n\ndef test_unary_op():\n def check_single_op(opfunc, ref, dtype):\n shape = (10, 4)\n dtype = dtype\n tp = relay.TensorType(shape)\n x = relay.var(\"x\", tp, dtype=dtype)\n y = opfunc(x)\n # test printer\n assert (\"{}(%x)\".format(y.op.name)) in y.astext()\n # test type inference\n yy = run_infer_type(y)\n assert yy.checked_type == tp\n\n if ref is not None:\n data = np.random.rand(*shape).astype(dtype)\n ref_res = ref(data)\n func = relay.Function([x], y)\n for target, ctx in ctx_list():\n # use graph by execuor default for testing, as we need\n # create function explicitly to avoid constant-folding.\n if dtype == 'float16' and target == 'cuda' and not have_fp16(tvm.gpu(0).compute_version):\n continue\n intrp = relay.create_executor(\"graph\", ctx=ctx, target=target)\n op_res = intrp.evaluate(func)(data)\n np.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=0.01)\n\n\n for opfunc, ref in [(tvm.relay.log, np.log),\n (tvm.relay.exp, np.exp),\n (tvm.relay.erf, scipy.special.erf),\n (tvm.relay.sqrt, np.sqrt),\n (tvm.relay.rsqrt, rsqrt),\n (tvm.relay.sigmoid, sigmoid),\n (tvm.relay.tanh, np.tanh),\n (relay.nn.relu, relu),\n (tvm.relay.cos, np.cos),\n (tvm.relay.sin, np.sin),\n (tvm.relay.atan, np.arctan)]:\n for dtype in ['float16', 'float32']:\n check_single_op(opfunc, ref, dtype)\n\n\ndef test_binary_op():\n def inst(vars, sh):\n return [vars.get(s, s) for s in sh]\n\n def check_binary_op(opfunc, ref, dtype):\n # TODO(@jroesch): this piece of code improperly uses type variables.\n n = tvm.var(\"n\")\n s1 = (5, n, 5)\n s2 = (n, 1)\n t1 = relay.TensorType(s1)\n t2 = relay.TensorType(s2)\n x = relay.var(\"x\", t1, dtype=dtype)\n y = relay.var(\"y\", t2, dtype=dtype)\n z = opfunc(x, y)\n # test printer\n assert (\"{}(%x, %y)\".format(z.op.name)) in z.astext()\n zz = run_infer_type(z)\n assert zz.checked_type == t1\n\n if ref is not None:\n t1 = relay.TensorType((5, 10, 5))\n t2 = relay.TensorType((5, 10, 5))\n x = relay.var(\"x\", t1, dtype=dtype)\n y = relay.var(\"y\", t2, dtype=dtype)\n z = opfunc(x, y)\n x_data = np.random.rand(5, 10, 5).astype(dtype)\n y_data = np.random.rand(5, 10, 5).astype(dtype)\n ref_res = ref(x_data, y_data)\n func = relay.Function([x, y], z)\n\n for target, ctx in ctx_list():\n # use graph by execuor default for testing, as we need\n # create function explicitly to avoid constant-folding.\n if dtype == 'float16' and target == 'cuda' and not have_fp16(tvm.gpu(0).compute_version):\n continue\n intrp = relay.create_executor(\"graph\", ctx=ctx, target=target)\n op_res = intrp.evaluate(func)(x_data, y_data)\n np.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=0.01)\n\n for opfunc, ref in [(relay.add, np.add),\n (relay.subtract, np.subtract),\n (relay.multiply, np.multiply),\n (relay.divide, np.divide),\n (relay.floor_divide, np.floor_divide),\n (relay.floor_mod, np.fmod)]:\n for dtype in ['float16', 'float32']:\n check_binary_op(opfunc, ref, dtype)\n\n\ndef test_expand_dims():\n # based on topi test\n def verify_expand_dims(dshape, dtype, oshape, axis, num_newaxis):\n x = relay.Var(\"x\", relay.TensorType(dshape, dtype))\n func = relay.Function([x], relay.expand_dims(x, axis, num_newaxis))\n for target, ctx in ctx_list():\n if dtype == 'float16' and target == 'cuda' and not have_fp16(tvm.gpu(0).compute_version):\n continue\n data = np.random.uniform(size=dshape).astype(dtype)\n ref_res = data.reshape(oshape)\n intrp = relay.create_executor(\"graph\", ctx=ctx, target=target)\n op_res = intrp.evaluate(func)(data)\n np.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=0.01)\n for dtype in ['float16', 'float32']:\n verify_expand_dims((3, 10), dtype, (3, 10, 1, 1), 2, 2)\n verify_expand_dims((3, 10), dtype, (1, 3, 10), -3, 1)\n\n\ndef test_bias_add():\n for dtype in ['float16', 'float32']:\n xshape=(10, 2, 3, 4)\n bshape=(2,)\n rtol = 1e-2 if dtype == 'float16' else 1e-5\n x = relay.var(\"x\", shape=xshape, dtype=dtype)\n bias = relay.var(\"bias\", dtype=dtype)\n z = relay.nn.bias_add(x, bias)\n zz = run_infer_type(z)\n assert \"axis=\" not in zz.astext()\n assert zz.args[1].checked_type == relay.TensorType(bshape, dtype)\n\n func = relay.Function([x, bias], z)\n x_data = np.random.uniform(size=xshape).astype(dtype)\n y_data = np.random.uniform(size=bshape).astype(dtype)\n ref_res = x_data + y_data.reshape((2, 1, 1))\n for target, ctx in ctx_list():\n if dtype == 'float16' and target == 'cuda' and not have_fp16(tvm.gpu(0).compute_version):\n continue\n intrp = relay.create_executor(\"graph\", ctx=ctx, target=target)\n op_res = intrp.evaluate(func)(x_data, y_data)\n np.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=rtol)\n\n\ndef test_expand_dims_infer_type():\n for dtype in ['float16', 'float32']:\n n, t, d = tvm.size_var(\"n\"), tvm.size_var(\"t\"), 100\n x = relay.var(\"x\", shape=(n, t, d), dtype=dtype)\n y = relay.expand_dims(x, axis=2)\n assert \"axis=2\" in y.astext()\n yy = run_infer_type(y)\n assert yy.checked_type == relay.TensorType((n, t, 1, 100), dtype)\n\n\ndef test_softmax():\n for dtype in ['float16', 'float32']:\n # Softmax accuracy for float16 is poor\n if dtype == 'float16':\n return\n shape = (10, 4)\n x = relay.var(\"x\", shape=shape, dtype=dtype)\n y = relay.nn.softmax(x, axis=1)\n assert \"nn.softmax\" in y.astext()\n yy = run_infer_type(y)\n assert yy.checked_type == relay.TensorType(shape, dtype)\n func = relay.Function([x], y)\n x_data = np.random.uniform(size=shape).astype(dtype)\n ref_res = topi.testing.softmax_python(x_data)\n for target, ctx in ctx_list():\n intrp = relay.create_executor(\"graph\", ctx=ctx, target=target)\n op_res = intrp.evaluate(func)(x_data)\n np.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-5)\n\n\ndef test_log_softmax():\n for dtype in ['float16', 'float32']:\n # Softmax accuracy for float16 is poor\n if dtype == 'float16':\n return\n shape = (10, 4)\n x = relay.var(\"x\", shape=shape, dtype=dtype)\n y = relay.nn.log_softmax(x, axis=1)\n assert \"nn.log_softmax\" in y.astext()\n yy = run_infer_type(y)\n assert yy.checked_type == relay.TensorType(shape, dtype)\n func = relay.Function([x], y)\n x_data = np.random.uniform(size=shape).astype(dtype)\n ref_res = topi.testing.log_softmax_python(x_data)\n for target, ctx in ctx_list():\n intrp = relay.create_executor(\"graph\", ctx=ctx, target=target)\n op_res = intrp.evaluate(func)(x_data)\n np.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-5)\n\n\ndef test_concatenate():\n for dtype in ['float16', 'float32']:\n n, t, d = tvm.size_var(\"n\"), tvm.size_var(\"t\"), 100\n x = relay.var(\"x\", shape=(n, t, d))\n y = relay.var(\"y\", shape=(n, t, d))\n z = relay.concatenate((x, y), axis=-1)\n assert \"axis=\" in z.astext()\n zz = run_infer_type(z)\n assert zz.checked_type == relay.TensorType((n, t, 200))\n \n x = relay.exp(x)\n z = relay.concatenate((x, y), axis=2)\n zz = run_infer_type(z)\n assert zz.checked_type == relay.TensorType((n, t, 200))\n \n z = relay.concatenate((x, y), axis=1)\n zz = run_infer_type(z)\n assert zz.checked_type == relay.TensorType((n, t + t, 100))\n \n # check shape mismatches (the following case is expected to raise tvm._ffi.base.TVMError.\n try:\n x = relay.var('p1', shape=(2, 5))\n y = relay.var('p2', shape=(2, 3))\n c = relay.concatenate([x, y], axis=0)\n func = relay.Function([x, y], c)\n zz = run_infer_type(func)\n except tvm._ffi.base.TVMError:\n pass\n else:\n assert False\n \n x = relay.var(\"x\", shape=(10, 5), dtype=dtype)\n y = relay.var(\"y\", shape=(10, 5), dtype=dtype)\n t = relay.var(\"z\", shape=(), dtype=dtype)\n z = relay.concatenate((x, y), axis=1)\n z = relay.add(z, t)\n # Check result.\n func = relay.Function([x, y, t], z)\n x_data = np.random.rand(10, 5).astype(dtype)\n y_data = np.random.rand(10, 5).astype(dtype)\n t_data = np.random.uniform(size=()).astype(dtype)\n ref_res = np.concatenate((x_data, y_data), axis=1) + t_data\n \n for target, ctx in ctx_list():\n if dtype == 'float16' and target == 'cuda' and not have_fp16(tvm.gpu(0).compute_version):\n continue\n intrp1 = relay.create_executor(\"graph\", ctx=ctx, target=target)\n intrp2 = relay.create_executor(\"debug\", ctx=ctx, target=target)\n op_res1 = intrp1.evaluate(func)(x_data, y_data, t_data)\n tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=0.01)\n op_res2 = intrp2.evaluate(func)(x_data, y_data, t_data)\n tvm.testing.assert_allclose(op_res2.asnumpy(), ref_res, rtol=0.01)\n\ndef test_dropout():\n for dtype in ['float16', 'float32']:\n n, t, d = tvm.size_var(\"n\"), tvm.size_var(\"t\"), tvm.size_var(\"d\")\n input_ty = relay.TensorType((n, t, d), dtype)\n x = relay.var(\"x\", input_ty)\n y = relay.nn.dropout(x, rate=0.75)\n assert \"rate=\" in y.astext()\n yy = run_infer_type(y)\n assert yy.checked_type == input_ty\n\n\ndef test_batch_norm():\n for dtype in ['float16', 'float32']:\n # beta and gamma ignored\n data = relay.var(\"data\", relay.TensorType((3, 2, 1), dtype))\n beta = relay.var(\"beta\", relay.TensorType((2,), dtype))\n gamma = relay.var(\"gamma\", relay.TensorType((2,), dtype))\n moving_mean = relay.var(\"moving_mean\", relay.TensorType((2,), dtype))\n moving_var = relay.var(\"moving_var\", relay.TensorType((2,), dtype))\n y = relay.nn.batch_norm(data, gamma, beta, moving_mean, moving_var,\n center=False, scale=False)\n yy = run_infer_type(y.astuple())\n assert \"center=\" in yy.astext()\n assert yy.checked_type == relay.ty.TupleType(tvm.convert([\n relay.TensorType((3, 2, 1), dtype),\n relay.TensorType((2,), dtype),\n relay.TensorType((2,), dtype)\n ]))\n\n beta = relay.var(\"beta\", relay.TensorType((3,), dtype))\n gamma = relay.var(\"gamma\", relay.TensorType((3,), dtype))\n moving_mean = relay.var(\"moving_mean\", relay.TensorType((3,), dtype))\n moving_var = relay.var(\"moving_var\", relay.TensorType((3,), dtype))\n\n y = relay.nn.batch_norm(data, gamma, beta, moving_mean, moving_var,\n axis=0, center=False, scale=False)\n yy = run_infer_type(y.astuple())\n assert yy.checked_type == relay.ty.TupleType(tvm.convert([\n relay.ty.TensorType((3, 2, 1), dtype),\n relay.ty.TensorType((3,), dtype),\n relay.ty.TensorType((3,), dtype)\n ]))\n\n # axis=-1\n data = relay.var(\"data\", relay.TensorType((1, 2, 3), dtype))\n beta = relay.var(\"beta\", relay.TensorType((3,), dtype))\n gamma = relay.var(\"gamma\", relay.TensorType((3,), dtype))\n moving_mean = relay.var(\"moving_mean\", relay.TensorType((3,), dtype))\n moving_var = relay.var(\"moving_var\", relay.TensorType((3,), dtype))\n y = relay.nn.batch_norm(data, gamma, beta, moving_mean, moving_var,\n axis=-1, center=False, scale=False)\n yy = run_infer_type(y.astuple())\n assert yy.checked_type == relay.ty.TupleType(tvm.convert([\n relay.ty.TensorType((1, 2, 3), dtype),\n relay.ty.TensorType((3,), dtype),\n relay.ty.TensorType((3,), dtype)\n ]))\n\[email protected]\ndef test_dense_type_check():\n dtype = 'float16'\n n, c , h, w = 2, 2 , 2 ,2\n x = relay.var(\"x\", relay.TensorType((n, c, h, w), dtype))\n # it should fail since it does not match with m(2)\n mismatch_w = 3\n w = relay.var(\"w\", relay.TensorType((2, mismatch_w), dtype))\n y = relay.nn.dense(x, w)\n yy = run_infer_type(y)\n\ndef test_dense():\n for dtype in ['float16', 'float32']:\n # Dense accuracy for float16 is poor\n if dtype == 'float16':\n return\n n, c , h, w = tvm.size_var(\"n\"), tvm.size_var(\"c\"), tvm.size_var(\"h\"), tvm.size_var(\"w\")\n x = relay.var(\"x\", relay.TensorType((n, c, h, w), dtype))\n w = relay.var(\"w\", relay.TensorType((2, w), dtype))\n y = relay.nn.dense(x, w, units=2)\n assert \"units=2\" in y.astext()\n yy = run_infer_type(y)\n assert yy.checked_type == relay.TensorType((n, c, h, 2), dtype)\n\n n, c , h, w = tvm.size_var(\"n\"), tvm.size_var(\"c\"), tvm.size_var(\"h\"), 2\n x = relay.var(\"x\", relay.TensorType((n, c, h, w), dtype))\n wh, ww = tvm.size_var(\"wh\"), tvm.size_var(\"ww\")\n w = relay.var(\"w\", relay.TensorType((ww, wh), dtype))\n y = relay.nn.dense(x, w)\n yy = run_infer_type(y)\n assert yy.checked_type == relay.TensorType((n, c, h, ww), dtype)\n\n n, c , h, w = tvm.size_var(\"n\"), tvm.size_var(\"c\"), tvm.size_var(\"h\"), 2\n x = relay.var(\"x\", relay.TensorType((n, c, h, w), dtype))\n w = relay.var(\"w\", relay.IncompleteType())\n y = relay.nn.dense(x, w, units=2)\n yy = run_infer_type(y)\n assert yy.checked_type == relay.TensorType((n, c, h, 2), dtype)\n\n x = relay.var(\"x\", shape=(10, 5), dtype=dtype)\n w = relay.var(\"w\", shape=(2, 5), dtype=dtype)\n z = relay.nn.dense(x, w)\n\n # Check result.\n func = relay.Function([x, w], z)\n x_data = np.random.rand(10, 5).astype(dtype)\n w_data = np.random.rand(2, 5).astype(dtype)\n ref_res = np.dot(x_data, w_data.T)\n\n for target, ctx in ctx_list():\n intrp1 = relay.create_executor(\"graph\", ctx=ctx, target=target)\n intrp2 = relay.create_executor(\"debug\", ctx=ctx, target=target)\n op_res1 = intrp1.evaluate(func)(x_data, w_data)\n tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5)\n op_res2 = intrp2.evaluate(func)(x_data, w_data)\n tvm.testing.assert_allclose(op_res2.asnumpy(), ref_res, rtol=1e-5)\n\n\ndef test_dense_dtype():\n data_dtype = 'uint8'\n weight_dtype = 'int8'\n out_dtype = 'uint8'\n n, c , h, w = tvm.size_var(\"n\"), tvm.size_var(\"c\"), tvm.size_var(\"h\"), tvm.size_var(\"w\")\n x = relay.var(\"x\", relay.TensorType((n, c, h, w), data_dtype))\n w = relay.var(\"w\", relay.TensorType((2, w), weight_dtype))\n y = relay.nn.dense(x, w, units=2, out_dtype=out_dtype)\n assert \"units=2\" in y.astext()\n yy = run_infer_type(y)\n assert yy.checked_type == relay.TensorType((n, c, h, 2), out_dtype)\n assert run_infer_type(yy.args[0]).checked_type.dtype == 'uint8'\n assert run_infer_type(yy.args[1]).checked_type.dtype == 'int8'\n\n\ndef test_bitserial_dense():\n m, k = tvm.size_var(\"m\"), tvm.size_var(\"k\")\n x = relay.var(\"x\", relay.TensorType((m, k), \"int16\"))\n w = relay.var(\"w\", relay.TensorType((k, 32), \"int16\"))\n y = relay.nn.bitserial_dense(x, w, units=32)\n \"units=8\" in y.astext()\n yy = run_infer_type(y)\n assert yy.checked_type == relay.TensorType((m, 32), \"int16\")\n\n\nif __name__ == \"__main__\":\n test_concatenate()\n test_bias_add()\n test_unary_op()\n test_binary_op()\n test_expand_dims_infer_type()\n test_expand_dims()\n test_softmax()\n test_log_softmax()\n test_dropout()\n test_batch_norm()\n test_dense()\n test_bitserial_dense()\n test_dense_dtype()\n" ]
[ [ "numpy.sqrt", "numpy.random.uniform", "numpy.concatenate", "numpy.ones_like", "numpy.copy", "numpy.exp", "numpy.random.rand", "numpy.maximum", "numpy.dot" ] ]