repo_name
stringlengths 8
130
| hexsha
sequence | file_path
sequence | code
sequence | apis
sequence |
---|---|---|---|---|
FusionBolt/nncase | [
"61e41170faed249303295d184f611f27cfefce9d"
] | [
"tests/importer/onnx/basic/test_matmul.py"
] | [
"# Copyright 2019-2021 Canaan Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# pylint: disable=invalid-name, unused-argument, import-outside-toplevel\n\nimport pytest\nimport torch\nfrom test_runner import OnnxTestRunner\n\n\ndef _make_module(shape):\n\n class MatmulModule(torch.nn.Module):\n def __init__(self):\n super(MatmulModule, self).__init__()\n self.y = torch.randn(*shape)\n\n def forward(self, x):\n x = torch.matmul(x, self.y)\n\n return x\n\n return MatmulModule()\n\n\nin_shapes = [\n [[1, 2], [2, 1]],\n [[3, 4], [4, 5]]\n]\n\n\[email protected]('in_shape', in_shapes)\ndef test_matmul(in_shape, request):\n module = _make_module(in_shape[1])\n\n runner = OnnxTestRunner(request.node.name)\n model_file = runner.from_torch(module, in_shape[0])\n runner.run(model_file)\n\n\nif __name__ == \"__main__\":\n pytest.main(['-vv', 'test_matmul.py'])\n"
] | [
[
"torch.randn",
"torch.matmul"
]
] |
ayush94582/pifu_surreal | [
"f370165481361991146fb80a0757be38a0763961"
] | [
"lib/model/HGPIFuNet.py"
] | [
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom .BasePIFuNet import BasePIFuNet\nfrom .SurfaceClassifier import SurfaceClassifier\nfrom .DepthNormalizer import DepthNormalizer\nfrom .HGFilters import *\nfrom ..net_util import init_net\n\n\nclass HGPIFuNet(BasePIFuNet):\n '''\n HG PIFu network uses Hourglass stacks as the image filter.\n It does the following:\n 1. Compute image feature stacks and store it in self.im_feat_list\n self.im_feat_list[-1] is the last stack (output stack)\n 2. Calculate calibration\n 3. If training, it index on every intermediate stacks,\n If testing, it index on the last stack.\n 4. Classification.\n 5. During training, error is calculated on all stacks.\n '''\n\n def __init__(self,\n opt,\n projection_mode='orthogonal',\n error_term=nn.MSELoss(),\n ):\n super(HGPIFuNet, self).__init__(\n projection_mode=projection_mode,\n error_term=error_term)\n\n self.name = 'hgpifu'\n\n self.opt = opt\n self.num_views = self.opt.num_views\n\n self.image_filter = HGFilter(opt)\n\n self.surface_classifier = SurfaceClassifier(\n filter_channels=self.opt.mlp_dim,\n num_views=self.opt.num_views,\n no_residual=self.opt.no_residual,\n last_op=nn.Sigmoid())\n\n self.normalizer = DepthNormalizer(opt)\n\n # This is a list of [B x Feat_i x H x W] features\n self.im_feat_list = []\n self.tmpx = None\n self.normx = None\n\n self.intermediate_preds_list = []\n\n init_net(self)\n\n def filter(self, images):\n '''\n Filter the input images\n store all intermediate features.\n :param images: [B, C, H, W] input images\n '''\n self.im_feat_list, self.tmpx, self.normx = self.image_filter(images)\n # If it is not in training, only produce the last im_feat\n if not self.training:\n self.im_feat_list = [self.im_feat_list[-1]]\n\n def query(self, points, calibs, transforms=None, labels=None):\n '''\n Given 3D points, query the network predictions for each point.\n Image features should be pre-computed before this call.\n store all intermediate features.\n query() function may behave differently during training/testing.\n :param points: [B, 3, N] world space coordinates of points\n :param calibs: [B, 3, 4] calibration matrices for each image\n :param transforms: Optional [B, 2, 3] image space coordinate transforms\n :param labels: Optional [B, Res, N] gt labeling\n :return: [B, Res, N] predictions for each point\n '''\n if labels is not None:\n self.labels = labels\n\n xyz = self.projection(points, calibs, transforms)\n xy = xyz[:, :2, :]\n z = xyz[:, 2:3, :]\n\n in_img = (xy[:, 0] >= -1.0) & (xy[:, 0] <= 1.0) & (xy[:, 1] >= -1.0) & (xy[:, 1] <= 1.0)\n\n z_feat = self.normalizer(z, calibs=calibs)\n\n if self.opt.skip_hourglass:\n tmpx_local_feature = self.index(self.tmpx, xy)\n\n self.intermediate_preds_list = []\n\n for im_feat in self.im_feat_list:\n # [B, Feat_i + z, N]\n im_feat = im_feat.reshape((-1, self.opt.temporalSize, 256, 128, 128)).reshape((-1, self.opt.temporalSize * 256, 128, 128))\n point_local_feat_list = [self.index(im_feat, xy), z_feat]\n\n if self.opt.skip_hourglass:\n point_local_feat_list.append(tmpx_local_feature)\n\n point_local_feat = torch.cat(point_local_feat_list, 1)\n\n # out of image plane is always set to 0\n pred = in_img[:,None].float() * self.surface_classifier(point_local_feat)\n self.intermediate_preds_list.append(pred)\n\n self.preds = self.intermediate_preds_list[-1]\n\n def get_im_feat(self):\n '''\n Get the image filter\n :return: [B, C_feat, H, W] image feature after filtering\n '''\n return self.im_feat_list[-1]\n\n def get_error(self):\n '''\n Hourglass has its own intermediate supervision scheme\n '''\n error = 0\n for preds in self.intermediate_preds_list:\n error += self.error_term(preds, self.labels)\n error /= len(self.intermediate_preds_list)\n \n return error\n\n def forward(self, images, points, calibs, transforms=None, labels=None):\n # Get image feature\n self.filter(images)\n\n # Phase 2: point query\n self.query(points=points, calibs=calibs, transforms=transforms, labels=labels)\n\n # get the prediction\n res = self.get_preds()\n \n # get the error\n error = self.get_error()\n\n return res, error\n"
] | [
[
"torch.nn.Sigmoid",
"torch.cat",
"torch.nn.MSELoss"
]
] |
nntrongnghia/learn-recsys | [
"43505c2663255d10e900f4cb98553eb5058e0a09"
] | [
"caser.py"
] | [
"from argparse import ArgumentParser\nfrom turtle import forward\nfrom typing import List\n\nimport numpy as np\nimport pytorch_lightning as pl\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom pytorch_lightning.loggers import TensorBoardLogger\nfrom torchmetrics import RetrievalHitRate\n\nfrom lit_data import LitDataModule\nfrom lit_model import LitModel\nfrom ml100k import ML100KSequence\nfrom utils import bpr_loss\n\n\nclass Caser(nn.Module):\n def __init__(self, embedding_dims, num_users, num_items,\n L=5, num_hfilters=16, num_vfilters=4,\n dropout=0.05, **kwargs):\n super().__init__()\n self.P = nn.Embedding(num_users, embedding_dims)\n self.Q = nn.Embedding(num_items, embedding_dims)\n\n self.num_hfilters = num_hfilters\n self.num_vfilters = num_vfilters\n # Vertical convolution\n self.conv_v = nn.Conv2d(1, num_vfilters, (L, 1))\n # Horizontal convolutions\n self.conv_h = nn.ModuleList([\n nn.Sequential(\n nn.Conv2d(1, num_hfilters, (h, embedding_dims)),\n nn.ReLU(),\n nn.AdaptiveMaxPool2d((1, 1)))\n for h in range(1, L+1)])\n # Fully-connected layer\n self.fc = nn.Sequential(\n nn.Dropout(dropout),\n nn.Linear(\n num_vfilters*embedding_dims + num_hfilters*L,\n embedding_dims),\n nn.ReLU())\n self.Q_out = nn.Embedding(num_items, 2*embedding_dims)\n self.b_out = nn.Embedding(num_items, 1)\n\n def forward(self, user_id, seq, item_id):\n item_emb = self.Q(seq).unsqueeze(1)\n user_emb = self.P(user_id)\n\n v = self.conv_v(item_emb)\n h = torch.cat([filt(item_emb) for filt in self.conv_h], axis=-2)\n x = self.fc(torch.cat([v.flatten(1), h.flatten(1)], -1))\n x = torch.cat([x, user_emb], -1)\n logit = (self.Q_out(item_id)*x).sum(-1) + self.b_out(item_id).squeeze()\n return logit\n\n\nclass LitCaser(pl.LightningModule):\n def __init__(self, lr=0.002, hitrate_cutout=10, **kwargs):\n super().__init__()\n self.save_hyperparameters()\n self.model = Caser(**kwargs)\n self.lr = lr\n self.hitrate = RetrievalHitRate(k=hitrate_cutout)\n\n def configure_optimizers(self):\n return torch.optim.Adam(self.parameters(), self.lr, weight_decay=1e-5)\n\n def forward(self, user_id, seq, item_id):\n return self.model(user_id, seq, item_id)\n\n def training_step(self, batch, batch_idx):\n user_id, seq, pos_item, neg_item = batch\n pos_logit = self(user_id, seq, pos_item)\n neg_logit = self(user_id, seq, neg_item)\n loss = bpr_loss(pos_logit, neg_logit)\n return loss\n\n def validation_step(self, batch, batch_idx):\n user_id, seq, item_id, is_pos = batch\n logit = self(user_id, seq, item_id)\n score = torch.sigmoid(logit).reshape(-1,)\n self.hitrate.update(score, is_pos, user_id)\n return\n\n def training_epoch_end(self, outputs):\n avg_loss = torch.stack([x[\"loss\"] for x in outputs]).mean()\n self.logger.experiment.add_scalar(\n \"train/loss\", avg_loss, self.current_epoch)\n\n def validation_epoch_end(self, outputs):\n self.logger.experiment.add_scalar(\n f\"val/hit_rate@{self.hitrate.k}\",\n self.hitrate.compute(),\n self.current_epoch)\n self.hitrate.reset()\n\n\ndef main(args):\n data = LitDataModule(\n ML100KSequence(seq_len=args.seq_len),\n batch_size=args.batch_size)\n data.setup()\n model = LitCaser(\n num_users=data.num_users, num_items=data.num_items,\n embedding_dims=args.embedding_dims,\n seq_len=args.seq_len)\n\n logger = TensorBoardLogger(\"lightning_logs\",\n name=f\"Caser_{args.embedding_dims}_L{args.seq_len}\")\n trainer = pl.Trainer.from_argparse_args(args, logger=logger)\n trainer.fit(model, data)\n\n\nif __name__ == \"__main__\":\n parser = ArgumentParser()\n parser.add_argument(\"--embedding_dims\", type=int, default=10)\n parser.add_argument(\"--seq_len\", type=int, default=5)\n parser.add_argument(\"--batch_size\", type=int, default=1024)\n pl.Trainer.add_argparse_args(parser)\n args = parser.parse_args()\n main(args)\n"
] | [
[
"torch.stack",
"torch.nn.Linear",
"torch.nn.AdaptiveMaxPool2d",
"torch.nn.Embedding",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.sigmoid",
"torch.cat",
"torch.nn.Dropout"
]
] |
Adityaojas/Pose-Estimation-Clean | [
"9096512da1e42189b5ed98b61e304e93526a0be4"
] | [
"open_pose.py"
] | [
"# Imprt Libraries\nfrom scipy.spatial import distance as dist\nimport numpy as np\nimport pandas as pd\nimport progressbar\nimport cv2\n\n# Necessary Paths\nprotoFile = \"openpose-master/models/pose/mpi/pose_deploy_linevec_faster_4_stages.prototxt\"\nweightsFile = \"openpose-master/models/pose_iter_160000.caffemodel\"\n\nvideo_path = 'samples/ex_11.mov'\ncsv_path = 'output/out_11.csv'\n\n# Load the model and the weights\nnet = cv2.dnn.readNetFromCaffe(protoFile, weightsFile)\n\n# Store the input video specifics\ncap = cv2.VideoCapture(video_path)\nn_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))\nfps = int(cap.get(cv2.CAP_PROP_FPS))\nok, frame = cap.read()\n(frameHeight, frameWidth) = frame.shape[:2]\nh = 500\nw = int((h/frameHeight) * frameWidth)\n\n# Dimensions for inputing into the model\ninHeight = 368\ninWidth = 368\n\n# Set up the progressbar\nwidgets = [\"--[INFO]-- Analyzing Video: \", progressbar.Percentage(), \" \",\n progressbar.Bar(), \" \", progressbar.ETA()]\npbar = progressbar.ProgressBar(maxval = n_frames,\n widgets=widgets).start()\np = 0\n\ndata = []\nprevious_x, previous_y = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]\n\n# Define the output\nout_path = 'outputs/out_11.mp4'\noutput = cv2.VideoWriter(out_path, 0, fps, (w, h))\n\nfourcc = cv2.VideoWriter_fourcc(*'MP4V')\nwriter = None\n(f_h, f_w) = (h, w)\nzeros = None\n\n# There are 15 points in the skeleton\npairs = [[0,1], # head\n [1,2],[1,5], # sholders\n [2,3],[3,4],[5,6],[6,7], # arms\n [1,14],[14,11],[14,8], # hips\n [8,9],[9,10],[11,12],[12,13]] # legs\n\n# probability threshold fro prediction of the coordinates\nthresh = 0.1 \n\ncircle_color, line_color = (0,255,255), (0,255,0)\n\n# Start the iteration\nwhile True:\n ok, frame = cap.read()\n\n if ok != True:\n break\n \n frame = cv2.resize(frame, (w, h), cv2.INTER_AREA) \n frame_copy = np.copy(frame)\n \n # Input the frame into the model\n inpBlob = cv2.dnn.blobFromImage(frame_copy, 1.0 / 255, (inWidth, inHeight), (0, 0, 0), swapRB=False, crop=False)\n net.setInput(inpBlob)\n output = net.forward()\n \n H = output.shape[2]\n W = output.shape[3]\n \n points = []\n x_data, y_data = [], []\n \n # Iterate through the returned output and store the data\n for i in range(15):\n probMap = output[0, i, :, :]\n minVal, prob, minLoc, point = cv2.minMaxLoc(probMap)\n x = (w * point[0]) / W\n y = (h * point[1]) / H\n \n if prob > thresh:\n points.append((int(x), int(y)))\n x_data.append(x)\n y_data.append(y)\n else :\n points.append((0, 0))\n x_data.append(previous_x[i])\n y_data.append(previous_y[i])\n \n for i in range(len(points)):\n cv2.circle(frame_copy, (points[i][0], points[i][1]), 2, circle_color, -1)\n \n for pair in pairs:\n partA = pair[0]\n partB = pair[1]\n cv2.line(frame_copy, points[partA], points[partB], line_color, 1, lineType=cv2.LINE_AA)\n \n if writer is None:\n writer = cv2.VideoWriter(out_path, fourcc, fps,\n (f_w, f_h), True)\n zeros = np.zeros((f_h, f_w), dtype=\"uint8\")\n \n writer.write(cv2.resize(frame_copy,(f_w, f_h)))\n \n cv2.imshow('frame' ,frame_copy)\n \n data.append(x_data + y_data)\n previous_x, previous_y = x_data, y_data\n \n p += 1\n pbar.update(p)\n \n key = cv2.waitKey(1) & 0xFF\n \n if key == ord(\"q\"):\n break\n\n# Save the output data from the video in CSV format\ndf = pd.DataFrame(data)\ndf.to_csv(csv_path, index = False)\nprint('save complete')\n\npbar.finish()\ncap.release()\ncv2.destroyAllWindows()"
] | [
[
"numpy.zeros",
"pandas.DataFrame",
"numpy.copy"
]
] |
wrutek/cnn_playground | [
"3ed013927838767d594e401dc8bbffc7ee363af2"
] | [
"dataset.py"
] | [
"from tensorflow.contrib.keras import datasets\nimport numpy as np\n\n\nclass MnistDataset:\n def __init__(self):\n (self.train_images, self.train_labels), (self.test_images, self.test_labels) = datasets.mnist.load_data()\n print('Dataset loaded')\n\n self.train_images = self.train_images/255.0 # type: np.ndarray\n self.test_images = self.test_images/255.0 # type: np.ndarray\n self.train_images = self.train_images.reshape((-1, 28, 28, 1))\n self.test_images = self.test_images.reshape((-1, 28, 28, 1))\n\n self.train_labels = self.train_labels.astype(np.int) # type: np.ndarray\n self.test_labels = self.test_labels.astype(np.int) # type: np.ndarray\n"
] | [
[
"tensorflow.contrib.keras.datasets.mnist.load_data"
]
] |
vincentschut/isce2 | [
"1557a05b7b6a3e65abcfc32f89c982ccc9b65e3c"
] | [
"components/isceobj/TopsProc/runOverlapIfg.py"
] | [
"#\n# Author: Piyush Agram\n# Copyright 2016\n#\n\n\nimport isceobj\nimport stdproc\nfrom stdproc.stdproc import crossmul\nimport numpy as np\nfrom isceobj.Util.Poly2D import Poly2D\nimport argparse\nimport os\nimport copy\nfrom isceobj.Sensor.TOPS import createTOPSSwathSLCProduct\nfrom .runBurstIfg import adjustValidLineSample\n\n\ndef takeLooks(inimg, alks, rlks):\n '''\n Take looks.\n '''\n\n from mroipac.looks.Looks import Looks\n\n spl = os.path.splitext(inimg.filename)\n ext = '.{0}alks_{1}rlks'.format(alks, rlks)\n outfile = spl[0] + ext + spl[1]\n\n\n lkObj = Looks()\n lkObj.setDownLooks(alks)\n lkObj.setAcrossLooks(rlks)\n lkObj.setInputImage(inimg)\n lkObj.setOutputFilename(outfile)\n lkObj.looks()\n\n return outfile\n\ndef loadVirtualArray(fname):\n from osgeo import gdal\n\n ds = gdal.Open(fname, gdal.GA_ReadOnly)\n data = ds.GetRasterBand(1).ReadAsArray()\n\n ds = None\n return data\n\ndef multiply(masname, slvname, outname, rngname, fact, referenceFrame,\n flatten=True, alks=3, rlks=7, virtual=True):\n\n\n masImg = isceobj.createSlcImage()\n masImg.load( masname + '.xml')\n\n width = masImg.getWidth()\n length = masImg.getLength()\n\n\n if not virtual:\n reference = np.memmap(masname, dtype=np.complex64, mode='r', shape=(length,width))\n secondary = np.memmap(slvname, dtype=np.complex64, mode='r', shape=(length, width))\n else:\n reference = loadVirtualArray(masname + '.vrt')\n secondary = loadVirtualArray(slvname + '.vrt')\n \n if os.path.exists(rngname):\n rng2 = np.memmap(rngname, dtype=np.float32, mode='r', shape=(length,width))\n else:\n print('No range offsets provided')\n rng2 = np.zeros((length,width))\n\n cJ = np.complex64(-1j)\n \n #Zero out anytging outside the valid region:\n ifg = np.memmap(outname, dtype=np.complex64, mode='w+', shape=(length,width))\n firstS = referenceFrame.firstValidSample\n lastS = referenceFrame.firstValidSample + referenceFrame.numValidSamples -1\n firstL = referenceFrame.firstValidLine\n lastL = referenceFrame.firstValidLine + referenceFrame.numValidLines - 1\n for kk in range(firstL,lastL + 1):\n ifg[kk,firstS:lastS + 1] = reference[kk,firstS:lastS + 1] * np.conj(secondary[kk,firstS:lastS + 1])\n if flatten:\n phs = np.exp(cJ*fact*rng2[kk,firstS:lastS + 1])\n ifg[kk,firstS:lastS + 1] *= phs\n\n ####\n reference=None\n secondary=None\n ifg = None\n\n objInt = isceobj.createIntImage()\n objInt.setFilename(outname)\n objInt.setWidth(width)\n objInt.setLength(length)\n objInt.setAccessMode('READ')\n objInt.renderHdr()\n\n try:\n outfile = takeLooks(objInt, alks, rlks)\n print('Output: ', outfile)\n except:\n raise Exception('Failed to multilook ifgs')\n\n return objInt\n\n\ndef runOverlapIfg(self):\n '''Create overlap interferograms.\n '''\n\n virtual = self.useVirtualFiles\n if not self.doESD:\n return \n\n\n swathList = self._insar.getValidSwathList(self.swaths)\n\n for swath in swathList:\n\n if self._insar.numberOfCommonBursts[swath-1] < 2:\n print('Skipping overlap ifg for swath IW{0}'.format(swath))\n continue\n\n minBurst = self._insar.commonBurstStartReferenceIndex[swath-1]\n maxBurst = minBurst + self._insar.numberOfCommonBursts[swath-1]\n\n nBurst = maxBurst - minBurst\n\n ifgdir = os.path.join( self._insar.coarseIfgDirname, self._insar.overlapsSubDirname, 'IW{0}'.format(swath))\n os.makedirs(ifgdir, exist_ok=True)\n\n ####All indexing is w.r.t stack reference for overlaps\n maxBurst = maxBurst - 1\n \n\n ####Load relevant products\n topReference = self._insar.loadProduct(os.path.join(self._insar.referenceSlcOverlapProduct, 'top_IW{0}.xml'.format(swath)))\n botReference = self._insar.loadProduct(os.path.join(self._insar.referenceSlcOverlapProduct, 'bottom_IW{0}.xml'.format(swath)))\n\n topCoreg = self._insar.loadProduct( os.path.join(self._insar.coregOverlapProduct,'top_IW{0}.xml'.format(swath)))\n botCoreg = self._insar.loadProduct( os.path.join(self._insar.coregOverlapProduct, 'bottom_IW{0}.xml'.format(swath)))\n\n coregdir = os.path.join(self._insar.coarseOffsetsDirname, self._insar.overlapsSubDirname, 'IW{0}'.format(swath))\n\n topIfg = createTOPSSwathSLCProduct()\n topIfg.configure()\n\n botIfg = createTOPSSwathSLCProduct()\n botIfg.configure()\n\n for ii in range(minBurst, maxBurst):\n \n jj = ii - minBurst\n\n ####Process the top bursts\n reference = topReference.bursts[jj] \n secondary = topCoreg.bursts[jj]\n\n referencename = reference.image.filename\n secondaryname = secondary.image.filename\n rdict = {'rangeOff' : os.path.join(coregdir, 'range_top_%02d_%02d.off'%(ii+1,ii+2)),\n 'azimuthOff': os.path.join(coregdir, 'azimuth_top_%02d_%02d.off'%(ii+1,ii+2))}\n \n \n adjustValidLineSample(reference,secondary)\n \n intname = os.path.join(ifgdir, '%s_top_%02d_%02d.int'%('burst',ii+1,ii+2))\n fact = 4 * np.pi * secondary.rangePixelSize / secondary.radarWavelength\n intimage = multiply(referencename, secondaryname, intname,\n rdict['rangeOff'], fact, reference, flatten=True,\n alks = self.numberAzimuthLooks, rlks=self.numberRangeLooks)\n\n burst = reference.clone()\n burst.image = intimage\n topIfg.bursts.append(burst)\n\n\n\n ####Process the bottom bursts\n reference = botReference.bursts[jj]\n secondary = botCoreg.bursts[jj]\n\n\n referencename = reference.image.filename\n secondaryname = secondary.image.filename\n rdict = {'rangeOff' : os.path.join(coregdir, 'range_bot_%02d_%02d.off'%(ii+1,ii+2)),\n 'azimuthOff': os.path.join(coregdir, 'azimuth_bot_%02d_%02d.off'%(ii+1,ii+2))}\n\n adjustValidLineSample(reference,secondary)\n intname = os.path.join(ifgdir, '%s_bot_%02d_%02d.int'%('burst',ii+1,ii+2))\n fact = 4 * np.pi * secondary.rangePixelSize / secondary.radarWavelength\n intimage = multiply(referencename, secondaryname, intname,\n rdict['rangeOff'], fact, reference, flatten=True,\n alks = self.numberAzimuthLooks, rlks=self.numberRangeLooks,\n virtual=virtual)\n\n burst = reference.clone()\n burst.image = intimage\n botIfg.bursts.append(burst)\n\n\n topIfg.numberOfBursts = len(topIfg.bursts)\n botIfg.numberOfBursts = len(botIfg.bursts)\n\n self._insar.saveProduct(topIfg, os.path.join(self._insar.coarseIfgOverlapProduct, 'top_IW{0}.xml'.format(swath)))\n self._insar.saveProduct(botIfg, os.path.join(self._insar.coarseIfgOverlapProduct, 'bottom_IW{0}.xml'.format(swath)))\n"
] | [
[
"numpy.complex64",
"numpy.zeros",
"numpy.conj",
"numpy.exp",
"numpy.memmap"
]
] |
kyspencer/GAMMA-PC-A-Greedy-Memetic-Algorithm-for-Storing-Cooling-Objects | [
"3462ff8cc555ad646b59909c661ca58b21294a7b"
] | [
"SampleScripts/tests/tests_moma.py"
] | [
"# test_moma.py\n# This file tests moma_dynamic.py for errors.\n# Author: Kristina Yancey Spencer\n\nimport unittest\nfrom mock import Mock\nimport binpacking_dynamic as bp\nimport coolcookies\nimport h5py\nimport mooproblem\nimport moma_dynamic as moma\nimport numpy as np\nimport solutions_dynamic as sols\nfrom random import choice, sample\n\n\[email protected]('too much output')\nclass LocalSearchTests(unittest.TestCase):\n\n def setUp(self):\n storedchrom = np.load('tests/chrom0.npz')\n chrom = storedchrom[storedchrom.files[0]]\n self.n = len(chrom)\n storedtfill = np.load('tests/tfill0.npz')\n tfill = storedtfill[storedtfill.files[0]]\n cookies = coolcookies.makeobjects(self.n, 100, 'tests/Cookies1000.txt')\n bpp = bp.BPP(self.n, 24, 300, cookies)\n self.moop = mooproblem.MOCookieProblem(self.n, 24, 300, 8, cookies)\n self.gen = moma.Generation(self.n, 100, 200, cookies, bpp, self.moop)\n self.m = sols.MultiSol(0, chrom, tfill, bpp)\n self.m = self.gen.moop.calcfeasibility(self.m)\n\n def test_paretols(self):\n neighbors = self.gen.paretols(self.m, 25, retrieve=True)\n for n in range(len(neighbors)):\n self.assertNotEqual(self.m.getid(), neighbors[n].getid())\n self.assertFalse(np.all(np.equal(self.m.getx(),\n neighbors[n].getx())))\n\n def test_itemswap(self):\n solution = self.gen.itemswap(self.m)\n self.moop.calcfeasibility(solution)\n\n def test_partswap(self):\n solution = self.gen.partswap(self.m)\n x = solution.getx()\n boxitems = np.sum(x, axis=1)\n for i in range(self.n):\n self.assertLessEqual(boxitems[i], self.moop.boxcap,\n msg='Over capacity error')\n\n def test_splitbin(self):\n solution = self.gen.splitbin(self.m)\n self.moop.calcfeasibility(solution)\n x = solution.getx()\n itemspresent = np.sum(x, axis=0)\n for j in range(self.n):\n self.assertEqual(itemspresent[j], 1, msg='No replacement error')\n\nclass GenerationTests(unittest.TestCase):\n\n def setUp(self):\n n = 24\n cookies = coolcookies.makeobjects(n, 6, 'tests/Cookies24.txt')\n moop = mooproblem.MOCookieProblem(n, 8, 15, 2, cookies)\n self.bpp = bp.BPP(n, 8, 15, cookies)\n self.gen = moma.Generation(n, 5, 10, cookies, self.bpp, moop)\n\n def test_initialp(self):\n self.gen.initialp('tests/seed.txt')\n self.assertEqual(len(self.gen.newgenes), 5)\n self.assertEqual(len(self.gen.newgenes[0][0]), 24)\n\n def test_initialtfill(self):\n tfill = self.gen.initialtfill()\n self.assertEqual(len(tfill), 24)\n\n def test_getpointtoswap(self):\n # Test for success\n vl1 = [7, 8, 10, 11]\n vl2 = [2, 5, 6, 9]\n p1, p2 = self.gen.getpointtoswap(vl1, 1263.0, vl2, 1437.0)\n self.assertTrue(p1 < 4 and p2 < 4)\n # Test for failure\n vl0 = [0, 1, 3, 4]\n fail = self.gen.getpointtoswap(vl0, 1090.0, vl1, 1437.0)\n self.assertFalse(fail)\n\n def test_findstartforswap(self):\n bool1 = [True, False, True, True, True]\n bool2 = [False, False, True, False]\n bool3 = [True, True, True]\n start1 = self.gen.findstartforswap(bool1)\n start2 = self.gen.findstartforswap(bool2)\n start3 = self.gen.findstartforswap(bool3)\n self.assertEqual(start1, 2)\n self.assertEqual(start2, len(bool2))\n self.assertEqual(start3, 1)\n\n def test_getrandsecondbin(self):\n vlrep = [[0, 1, 3, 4], [7, 8, 10, 11], [2, 5, 6, 9],\n [12, 13, 14, 15, 16, 17], [18, 19, 20, 21, 22, 23]]\n tfill = np.zeros(24, dtype=np.float)\n tfill[:5] = [1090.3, 1263.9, 1437.5, 1950.0, 2550.0]\n i2 = self.gen.getrandsecondbin(0, vlrep, tfill, range(5))\n self.assertTrue(i2 in [1, 2, 3, 4])\n\n def test_getseedvalue(self):\n self.assertEqual(self.gen.getseedvalue('tests/seed.txt'), 3572)\n\n def test_fittruncation(self):\n # Create 20 solutions with cd-values\n self.gen.archive = {}\n for m in range(20):\n tfill = self.gen.initialtfill()\n newsol = sols.MultiSol(m, sample(range(24), 24), tfill, self.bpp)\n newsol.updatefitvals([10, 50, 900])\n newsol.updaterank(choice([1, 2]))\n newsol.updatecd(choice(range(10)) / 10)\n self.gen.archive[m] = newsol\n approxset = [m for k, m in self.gen.archive.items() if m.getrank() == 1]\n keys = [k for k, m in self.gen.archive.items() if m.getrank() == 1]\n self.gen.fittruncation(keys, approxset, 5)\n newapproxset = [m for k, m in self.gen.archive.items() if m.getrank() == 1]\n self.assertEqual(len(newapproxset), 5)\n\n def test_reduce(self):\n # Create 10 solutions\n for m in range(10):\n tfill = self.gen.initialtfill()\n newsol = sols.MultiSol(m, sample(range(24), 24), tfill, self.bpp)\n newsol.updaterank(round(m / 2))\n self.gen.archive[m] = newsol\n # Test for normal reduction\n self.gen.reduce(6)\n self.assertEqual(len(self.gen.archive), 6)\n # Recreate 10 solutions with different crowding distance values\n for m in range(6):\n self.gen.archive[m].updaterank(round(m / 5))\n self.gen.archive[m].updatecd(m)\n for m in range(4):\n tfill = self.gen.initialtfill()\n newsol = sols.MultiSol(m, sample(range(24), 24), tfill, self.bpp)\n newsol.updaterank(round((m + 6) / 5))\n newsol.updatecd(m + 6)\n self.gen.archive[m + 6] = newsol\n # Test for cd reduction\n self.gen.reduce(6)\n self.assertEqual(len(self.gen.archive), 6)\n\n def test_approx(self):\n # Create 100 solutions\n self.gen.archive = {}\n ids = sample(range(1000), 100)\n for m in range(100):\n tfill = self.gen.initialtfill()\n newsol = sols.MultiSol(m, sample(range(24), 24), tfill, self.bpp)\n newsol = self.gen.moop.calcfeasibility(newsol)\n newsol = bp.coordarrays(newsol)\n fits = self.gen.moop.calcfits(newsol)\n newsol.updatefitvals(fits)\n self.gen.archive[newsol.index] = newsol\n ndset = self.gen.finalapproxset()\n self.assertNotEqual(len(ndset), 0)\n\n\n# class OutputTests(unittest.TestCase):\n#\n# def test_savexys(self):\n# ndset = []\n# mock = Mock()\n# mock.getindex.return_value = 1\n# mock.getx.return_value = np.matrix([[1, 0, 0, 0, 0],\n# [0, 1, 0, 0, 0],\n# [0, 0, 1, 0, 0],\n# [0, 0, 0, 1, 0],\n# [0, 0, 0, 0, 1]])\n# mock.gety.return_value = np.ones(5)\n# for m in range(10):\n# ndset.append(mock)\n# nsgaii.savexys(ndset, 'tests/')\n# h5f = h5py.File('tests/xymatrices.h5', 'r')\n# gname = h5f.keys()\n# self.assertEqual(gname[0], u'xmatrices')\n# xitems = h5f[gname[0]].items()\n# yitems = h5f[gname[1]].items()\n# self.assertEqual(len(xitems), len(yitems))\n\n\nif __name__ == '__main__':\n unittest.main()\n"
] | [
[
"numpy.load",
"numpy.zeros",
"numpy.sum"
]
] |
ojotoxy/TensorFlow-Image-Classifier-using-Inception-v3-deep-learning | [
"b73bfc379e8ac3080d4dc9e756833c5a30e5de4f"
] | [
"train.py"
] | [
"#!/usr/bin/env python3\n# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Simple transfer learning with an Inception v3 architecture model.\n\nWith support for TensorBoard.\n\nThis example shows how to take a Inception v3 architecture model trained on\nImageNet images, and train a new top layer that can recognize other classes of\nimages.\n\nThe top layer receives as input a 2048-dimensional vector for each image. We\ntrain a softmax layer on top of this representation. Assuming the softmax layer\ncontains N labels, this corresponds to learning N + 2048*N model parameters\ncorresponding to the learned biases and weights.\n\nHere's an example, which assumes you have a folder containing class-named\nsubfolders, each full of images for each label. The example folder flower_photos\nshould have a structure like this:\n\n~/flower_photos/daisy/photo1.jpg\n~/flower_photos/daisy/photo2.jpg\n...\n~/flower_photos/rose/anotherphoto77.jpg\n...\n~/flower_photos/sunflower/somepicture.jpg\n\nThe subfolder names are important, since they define what label is applied to\neach image, but the filenames themselves don't matter. Once your images are\nprepared, you can run the training with a command like this:\n\n\n```bash\nbazel build tensorflow/examples/image_retraining:retrain && \\\nbazel-bin/tensorflow/examples/image_retraining/retrain \\\n --image_dir ~/flower_photos\n```\n\nOr, if you have a pip installation of tensorflow, `retrain.py` can be run\nwithout bazel:\n\n```bash\npython tensorflow/examples/image_retraining/retrain.py \\\n --image_dir ~/flower_photos\n```\n\nYou can replace the image_dir argument with any folder containing subfolders of\nimages. The label for each image is taken from the name of the subfolder it's\nin.\n\nThis produces a new model file that can be loaded and run by any TensorFlow\nprogram, for example the label_image sample code.\n\n\nTo use with TensorBoard:\n\nBy default, this script will log summaries to /tmp/retrain_logs directory\n\nVisualize the summaries with this command:\n\ntensorboard --logdir /tmp/retrain_logs\n\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport argparse\nfrom datetime import datetime\nimport hashlib\nimport os.path\nimport random\nimport re\nimport struct\nimport sys\nimport tarfile\n\nimport numpy as np\nfrom six.moves import urllib\nimport tensorflow as tf\n\nfrom tensorflow.python.framework import graph_util\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.platform import gfile\nfrom tensorflow.python.util import compat\n\nFLAGS = None\n\n# These are all parameters that are tied to the particular model architecture\n# we're using for Inception v3. These include things like tensor names and their\n# sizes. If you want to adapt this script to work with another model, you will\n# need to update these to reflect the values in the network you're using.\n# pylint: disable=line-too-long\nDATA_URL = 'http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz'\n# pylint: enable=line-too-long\nBOTTLENECK_TENSOR_NAME = 'pool_3/_reshape:0'\nBOTTLENECK_TENSOR_SIZE = 2048\nMODEL_INPUT_WIDTH = 299\nMODEL_INPUT_HEIGHT = 299\nMODEL_INPUT_DEPTH = 3\nJPEG_DATA_TENSOR_NAME = 'DecodeJpeg/contents:0'\nRESIZED_INPUT_TENSOR_NAME = 'ResizeBilinear:0'\nMAX_NUM_IMAGES_PER_CLASS = 2 ** 27 - 1 # ~134M\n\n\ndef create_image_lists(image_dir, testing_percentage, validation_percentage):\n \"\"\"Builds a list of training images from the file system.\n\n Analyzes the sub folders in the image directory, splits them into stable\n training, testing, and validation sets, and returns a data structure\n describing the lists of images for each label and their paths.\n\n Args:\n image_dir: String path to a folder containing subfolders of images.\n testing_percentage: Integer percentage of the images to reserve for tests.\n validation_percentage: Integer percentage of images reserved for validation.\n\n Returns:\n A dictionary containing an entry for each label subfolder, with images split\n into training, testing, and validation sets within each label.\n \"\"\"\n if not gfile.Exists(image_dir):\n print(\"Image directory '\" + image_dir + \"' not found.\")\n return None\n result = {}\n sub_dirs = [x[0] for x in gfile.Walk(image_dir)]\n # The root directory comes first, so skip it.\n is_root_dir = True\n for sub_dir in sub_dirs:\n if is_root_dir:\n is_root_dir = False\n continue\n extensions = ['jpg', 'jpeg', 'JPG', 'JPEG']\n file_list = []\n dir_name = os.path.basename(sub_dir)\n if dir_name == image_dir:\n continue\n print(\"Looking for images in '\" + dir_name + \"'\")\n for extension in extensions:\n file_glob = os.path.join(image_dir, dir_name, '*.' + extension)\n file_list.extend(gfile.Glob(file_glob))\n if not file_list:\n print('No files found')\n continue\n if len(file_list) < 20:\n print('WARNING: Folder has less than 20 images, which may cause issues.')\n elif len(file_list) > MAX_NUM_IMAGES_PER_CLASS:\n print('WARNING: Folder {} has more than {} images. Some images will '\n 'never be selected.'.format(dir_name, MAX_NUM_IMAGES_PER_CLASS))\n label_name = re.sub(r'[^a-z0-9]+', ' ', dir_name.lower())\n training_images = []\n testing_images = []\n validation_images = []\n for file_name in file_list:\n base_name = os.path.basename(file_name)\n # We want to ignore anything after '_nohash_' in the file name when\n # deciding which set to put an image in, the data set creator has a way of\n # grouping photos that are close variations of each other. For example\n # this is used in the plant disease data set to group multiple pictures of\n # the same leaf.\n hash_name = re.sub(r'_nohash_.*$', '', file_name)\n # This looks a bit magical, but we need to decide whether this file should\n # go into the training, testing, or validation sets, and we want to keep\n # existing files in the same set even if more files are subsequently\n # added.\n # To do that, we need a stable way of deciding based on just the file name\n # itself, so we do a hash of that and then use that to generate a\n # probability value that we use to assign it.\n hash_name_hashed = hashlib.sha1(compat.as_bytes(hash_name)).hexdigest()\n percentage_hash = ((int(hash_name_hashed, 16) %\n (MAX_NUM_IMAGES_PER_CLASS + 1)) *\n (100.0 / MAX_NUM_IMAGES_PER_CLASS))\n if percentage_hash < validation_percentage:\n validation_images.append(base_name)\n elif percentage_hash < (testing_percentage + validation_percentage):\n testing_images.append(base_name)\n else:\n training_images.append(base_name)\n result[label_name] = {\n 'dir': dir_name,\n 'training': training_images,\n 'testing': testing_images,\n 'validation': validation_images,\n }\n return result\n\n\ndef get_image_path(image_lists, label_name, index, image_dir, category):\n \"\"\"\"Returns a path to an image for a label at the given index.\n\n Args:\n image_lists: Dictionary of training images for each label.\n label_name: Label string we want to get an image for.\n index: Int offset of the image we want. This will be moduloed by the\n available number of images for the label, so it can be arbitrarily large.\n image_dir: Root folder string of the subfolders containing the training\n images.\n category: Name string of set to pull images from - training, testing, or\n validation.\n\n Returns:\n File system path string to an image that meets the requested parameters.\n\n \"\"\"\n if label_name not in image_lists:\n tf.logging.fatal('Label does not exist %s.', label_name)\n label_lists = image_lists[label_name]\n if category not in label_lists:\n tf.logging.fatal('Category does not exist %s.', category)\n category_list = label_lists[category]\n if not category_list:\n tf.logging.fatal('Label %s has no images in the category %s.',\n label_name, category)\n mod_index = index % len(category_list)\n base_name = category_list[mod_index]\n sub_dir = label_lists['dir']\n full_path = os.path.join(image_dir, sub_dir, base_name)\n return full_path\n\n\ndef get_bottleneck_path(image_lists, label_name, index, bottleneck_dir,\n category):\n \"\"\"\"Returns a path to a bottleneck file for a label at the given index.\n\n Args:\n image_lists: Dictionary of training images for each label.\n label_name: Label string we want to get an image for.\n index: Integer offset of the image we want. This will be moduloed by the\n available number of images for the label, so it can be arbitrarily large.\n bottleneck_dir: Folder string holding cached files of bottleneck values.\n category: Name string of set to pull images from - training, testing, or\n validation.\n\n Returns:\n File system path string to an image that meets the requested parameters.\n \"\"\"\n return get_image_path(image_lists, label_name, index, bottleneck_dir,\n category) + '.txt'\n\n\ndef create_inception_graph():\n \"\"\"\"Creates a graph from saved GraphDef file and returns a Graph object.\n\n Returns:\n Graph holding the trained Inception network, and various tensors we'll be\n manipulating.\n \"\"\"\n with tf.Graph().as_default() as graph:\n model_filename = os.path.join(\n FLAGS.model_dir, 'classify_image_graph_def.pb')\n with gfile.FastGFile(model_filename, 'rb') as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n bottleneck_tensor, jpeg_data_tensor, resized_input_tensor = (\n tf.import_graph_def(graph_def, name='', return_elements=[\n BOTTLENECK_TENSOR_NAME, JPEG_DATA_TENSOR_NAME,\n RESIZED_INPUT_TENSOR_NAME]))\n return graph, bottleneck_tensor, jpeg_data_tensor, resized_input_tensor\n\n\ndef run_bottleneck_on_image(sess, image_data, image_data_tensor,\n bottleneck_tensor):\n \"\"\"Runs inference on an image to extract the 'bottleneck' summary layer.\n\n Args:\n sess: Current active TensorFlow Session.\n image_data: String of raw JPEG data.\n image_data_tensor: Input data layer in the graph.\n bottleneck_tensor: Layer before the final softmax.\n\n Returns:\n Numpy array of bottleneck values.\n \"\"\"\n bottleneck_values = sess.run(\n bottleneck_tensor,\n {image_data_tensor: image_data})\n bottleneck_values = np.squeeze(bottleneck_values)\n return bottleneck_values\n\n\ndef maybe_download_and_extract():\n \"\"\"Download and extract model tar file.\n\n If the pretrained model we're using doesn't already exist, this function\n downloads it from the TensorFlow.org website and unpacks it into a directory.\n \"\"\"\n dest_directory = FLAGS.model_dir\n if not os.path.exists(dest_directory):\n os.makedirs(dest_directory)\n filename = DATA_URL.split('/')[-1]\n filepath = os.path.join(dest_directory, filename)\n if not os.path.exists(filepath):\n\n def _progress(count, block_size, total_size):\n sys.stdout.write('\\r>> Downloading %s %.1f%%' %\n (filename,\n float(count * block_size) / float(total_size) * 100.0))\n sys.stdout.flush()\n\n filepath, _ = urllib.request.urlretrieve(DATA_URL,\n filepath,\n _progress)\n print()\n statinfo = os.stat(filepath)\n print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')\n tarfile.open(filepath, 'r:gz').extractall(dest_directory)\n\n\ndef ensure_dir_exists(dir_name):\n \"\"\"Makes sure the folder exists on disk.\n\n Args:\n dir_name: Path string to the folder we want to create.\n \"\"\"\n if not os.path.exists(dir_name):\n os.makedirs(dir_name)\n\n\ndef write_list_of_floats_to_file(list_of_floats, file_path):\n \"\"\"Writes a given list of floats to a binary file.\n\n Args:\n list_of_floats: List of floats we want to write to a file.\n file_path: Path to a file where list of floats will be stored.\n\n \"\"\"\n\n s = struct.pack('d' * BOTTLENECK_TENSOR_SIZE, *list_of_floats)\n with open(file_path, 'wb') as f:\n f.write(s)\n\n\ndef read_list_of_floats_from_file(file_path):\n \"\"\"Reads list of floats from a given file.\n\n Args:\n file_path: Path to a file where list of floats was stored.\n Returns:\n Array of bottleneck values (list of floats).\n\n \"\"\"\n\n with open(file_path, 'rb') as f:\n s = struct.unpack('d' * BOTTLENECK_TENSOR_SIZE, f.read())\n return list(s)\n\n\nbottleneck_path_2_bottleneck_values = {}\n\n\ndef create_bottleneck_file(bottleneck_path, image_lists, label_name, index,\n image_dir, category, sess, jpeg_data_tensor,\n bottleneck_tensor):\n \"\"\"Create a single bottleneck file.\"\"\"\n print('Creating bottleneck at ' + bottleneck_path)\n image_path = get_image_path(image_lists, label_name, index,\n image_dir, category)\n if not gfile.Exists(image_path):\n tf.logging.fatal('File does not exist %s', image_path)\n image_data = gfile.FastGFile(image_path, 'rb').read()\n try:\n bottleneck_values = run_bottleneck_on_image(\n sess, image_data, jpeg_data_tensor, bottleneck_tensor)\n except:\n raise RuntimeError('Error during processing file %s' % image_path)\n\n bottleneck_string = ','.join(str(x) for x in bottleneck_values)\n with open(bottleneck_path, 'w') as bottleneck_file:\n bottleneck_file.write(bottleneck_string)\n\n\ndef get_or_create_bottleneck(sess, image_lists, label_name, index, image_dir,\n category, bottleneck_dir, jpeg_data_tensor,\n bottleneck_tensor):\n \"\"\"Retrieves or calculates bottleneck values for an image.\n\n If a cached version of the bottleneck data exists on-disk, return that,\n otherwise calculate the data and save it to disk for future use.\n\n Args:\n sess: The current active TensorFlow Session.\n image_lists: Dictionary of training images for each label.\n label_name: Label string we want to get an image for.\n index: Integer offset of the image we want. This will be modulo-ed by the\n available number of images for the label, so it can be arbitrarily large.\n image_dir: Root folder string of the subfolders containing the training\n images.\n category: Name string of which set to pull images from - training, testing,\n or validation.\n bottleneck_dir: Folder string holding cached files of bottleneck values.\n jpeg_data_tensor: The tensor to feed loaded jpeg data into.\n bottleneck_tensor: The output tensor for the bottleneck values.\n\n Returns:\n Numpy array of values produced by the bottleneck layer for the image.\n \"\"\"\n label_lists = image_lists[label_name]\n sub_dir = label_lists['dir']\n sub_dir_path = os.path.join(bottleneck_dir, sub_dir)\n ensure_dir_exists(sub_dir_path)\n bottleneck_path = get_bottleneck_path(image_lists, label_name, index,\n bottleneck_dir, category)\n if not os.path.exists(bottleneck_path):\n create_bottleneck_file(bottleneck_path, image_lists, label_name, index,\n image_dir, category, sess, jpeg_data_tensor,\n bottleneck_tensor)\n with open(bottleneck_path, 'r') as bottleneck_file:\n bottleneck_string = bottleneck_file.read()\n did_hit_error = False\n try:\n bottleneck_values = [float(x) for x in bottleneck_string.split(',')]\n except ValueError:\n print('Invalid float found, recreating bottleneck')\n did_hit_error = True\n if did_hit_error:\n create_bottleneck_file(bottleneck_path, image_lists, label_name, index,\n image_dir, category, sess, jpeg_data_tensor,\n bottleneck_tensor)\n with open(bottleneck_path, 'r') as bottleneck_file:\n bottleneck_string = bottleneck_file.read()\n # Allow exceptions to propagate here, since they shouldn't happen after a\n # fresh creation\n bottleneck_values = [float(x) for x in bottleneck_string.split(',')]\n return bottleneck_values\n\n\ndef cache_bottlenecks(sess, image_lists, image_dir, bottleneck_dir,\n jpeg_data_tensor, bottleneck_tensor):\n \"\"\"Ensures all the training, testing, and validation bottlenecks are cached.\n\n Because we're likely to read the same image multiple times (if there are no\n distortions applied during training) it can speed things up a lot if we\n calculate the bottleneck layer values once for each image during\n preprocessing, and then just read those cached values repeatedly during\n training. Here we go through all the images we've found, calculate those\n values, and save them off.\n\n Args:\n sess: The current active TensorFlow Session.\n image_lists: Dictionary of training images for each label.\n image_dir: Root folder string of the subfolders containing the training\n images.\n bottleneck_dir: Folder string holding cached files of bottleneck values.\n jpeg_data_tensor: Input tensor for jpeg data from file.\n bottleneck_tensor: The penultimate output layer of the graph.\n\n Returns:\n Nothing.\n \"\"\"\n how_many_bottlenecks = 0\n ensure_dir_exists(bottleneck_dir)\n for label_name, label_lists in image_lists.items():\n for category in ['training', 'testing', 'validation']:\n category_list = label_lists[category]\n for index, unused_base_name in enumerate(category_list):\n get_or_create_bottleneck(sess, image_lists, label_name, index,\n image_dir, category, bottleneck_dir,\n jpeg_data_tensor, bottleneck_tensor)\n\n how_many_bottlenecks += 1\n if how_many_bottlenecks % 100 == 0:\n print(str(how_many_bottlenecks) + ' bottleneck files created.')\n\n\ndef get_random_cached_bottlenecks(sess, image_lists, how_many, category,\n bottleneck_dir, image_dir, jpeg_data_tensor,\n bottleneck_tensor):\n \"\"\"Retrieves bottleneck values for cached images.\n\n If no distortions are being applied, this function can retrieve the cached\n bottleneck values directly from disk for images. It picks a random set of\n images from the specified category.\n\n Args:\n sess: Current TensorFlow Session.\n image_lists: Dictionary of training images for each label.\n how_many: If positive, a random sample of this size will be chosen.\n If negative, all bottlenecks will be retrieved.\n category: Name string of which set to pull from - training, testing, or\n validation.\n bottleneck_dir: Folder string holding cached files of bottleneck values.\n image_dir: Root folder string of the subfolders containing the training\n images.\n jpeg_data_tensor: The layer to feed jpeg image data into.\n bottleneck_tensor: The bottleneck output layer of the CNN graph.\n\n Returns:\n List of bottleneck arrays, their corresponding ground truths, and the\n relevant filenames.\n \"\"\"\n class_count = len(image_lists.keys())\n bottlenecks = []\n ground_truths = []\n filenames = []\n if how_many >= 0:\n # Retrieve a random sample of bottlenecks.\n for unused_i in range(how_many):\n label_index = random.randrange(class_count)\n label_name = list(image_lists.keys())[label_index]\n image_index = random.randrange(MAX_NUM_IMAGES_PER_CLASS + 1)\n image_name = get_image_path(image_lists, label_name, image_index,\n image_dir, category)\n bottleneck = get_or_create_bottleneck(sess, image_lists, label_name,\n image_index, image_dir, category,\n bottleneck_dir, jpeg_data_tensor,\n bottleneck_tensor)\n ground_truth = np.zeros(class_count, dtype=np.float32)\n ground_truth[label_index] = 1.0\n bottlenecks.append(bottleneck)\n ground_truths.append(ground_truth)\n filenames.append(image_name)\n else:\n # Retrieve all bottlenecks.\n for label_index, label_name in enumerate(image_lists.keys()):\n for image_index, image_name in enumerate(\n image_lists[label_name][category]):\n image_name = get_image_path(image_lists, label_name, image_index,\n image_dir, category)\n bottleneck = get_or_create_bottleneck(sess, image_lists, label_name,\n image_index, image_dir, category,\n bottleneck_dir, jpeg_data_tensor,\n bottleneck_tensor)\n ground_truth = np.zeros(class_count, dtype=np.float32)\n ground_truth[label_index] = 1.0\n bottlenecks.append(bottleneck)\n ground_truths.append(ground_truth)\n filenames.append(image_name)\n return bottlenecks, ground_truths, filenames\n\n\ndef get_random_distorted_bottlenecks(\n sess, image_lists, how_many, category, image_dir, input_jpeg_tensor,\n distorted_image, resized_input_tensor, bottleneck_tensor):\n \"\"\"Retrieves bottleneck values for training images, after distortions.\n\n If we're training with distortions like crops, scales, or flips, we have to\n recalculate the full model for every image, and so we can't use cached\n bottleneck values. Instead we find random images for the requested category,\n run them through the distortion graph, and then the full graph to get the\n bottleneck results for each.\n\n Args:\n sess: Current TensorFlow Session.\n image_lists: Dictionary of training images for each label.\n how_many: The integer number of bottleneck values to return.\n category: Name string of which set of images to fetch - training, testing,\n or validation.\n image_dir: Root folder string of the subfolders containing the training\n images.\n input_jpeg_tensor: The input layer we feed the image data to.\n distorted_image: The output node of the distortion graph.\n resized_input_tensor: The input node of the recognition graph.\n bottleneck_tensor: The bottleneck output layer of the CNN graph.\n\n Returns:\n List of bottleneck arrays and their corresponding ground truths.\n \"\"\"\n class_count = len(image_lists.keys())\n bottlenecks = []\n ground_truths = []\n for unused_i in range(how_many):\n label_index = random.randrange(class_count)\n label_name = list(image_lists.keys())[label_index]\n image_index = random.randrange(MAX_NUM_IMAGES_PER_CLASS + 1)\n image_path = get_image_path(image_lists, label_name, image_index, image_dir,\n category)\n if not gfile.Exists(image_path):\n tf.logging.fatal('File does not exist %s', image_path)\n jpeg_data = gfile.FastGFile(image_path, 'rb').read()\n # Note that we materialize the distorted_image_data as a numpy array before\n # sending running inference on the image. This involves 2 memory copies and\n # might be optimized in other implementations.\n distorted_image_data = sess.run(distorted_image,\n {input_jpeg_tensor: jpeg_data})\n bottleneck = run_bottleneck_on_image(sess, distorted_image_data,\n resized_input_tensor,\n bottleneck_tensor)\n ground_truth = np.zeros(class_count, dtype=np.float32)\n ground_truth[label_index] = 1.0\n bottlenecks.append(bottleneck)\n ground_truths.append(ground_truth)\n return bottlenecks, ground_truths\n\n\ndef should_distort_images(flip_left_right, random_crop, random_scale,\n random_brightness):\n \"\"\"Whether any distortions are enabled, from the input flags.\n\n Args:\n flip_left_right: Boolean whether to randomly mirror images horizontally.\n random_crop: Integer percentage setting the total margin used around the\n crop box.\n random_scale: Integer percentage of how much to vary the scale by.\n random_brightness: Integer range to randomly multiply the pixel values by.\n\n Returns:\n Boolean value indicating whether any distortions should be applied.\n \"\"\"\n return (flip_left_right or (random_crop != 0) or (random_scale != 0) or\n (random_brightness != 0))\n\n\ndef add_input_distortions(flip_left_right, random_crop, random_scale,\n random_brightness):\n \"\"\"Creates the operations to apply the specified distortions.\n\n During training it can help to improve the results if we run the images\n through simple distortions like crops, scales, and flips. These reflect the\n kind of variations we expect in the real world, and so can help train the\n model to cope with natural data more effectively. Here we take the supplied\n parameters and construct a network of operations to apply them to an image.\n\n Cropping\n ~~~~~~~~\n\n Cropping is done by placing a bounding box at a random position in the full\n image. The cropping parameter controls the size of that box relative to the\n input image. If it's zero, then the box is the same size as the input and no\n cropping is performed. If the value is 50%, then the crop box will be half the\n width and height of the input. In a diagram it looks like this:\n\n < width >\n +---------------------+\n | |\n | width - crop% |\n | < > |\n | +------+ |\n | | | |\n | | | |\n | | | |\n | +------+ |\n | |\n | |\n +---------------------+\n\n Scaling\n ~~~~~~~\n\n Scaling is a lot like cropping, except that the bounding box is always\n centered and its size varies randomly within the given range. For example if\n the scale percentage is zero, then the bounding box is the same size as the\n input and no scaling is applied. If it's 50%, then the bounding box will be in\n a random range between half the width and height and full size.\n\n Args:\n flip_left_right: Boolean whether to randomly mirror images horizontally.\n random_crop: Integer percentage setting the total margin used around the\n crop box.\n random_scale: Integer percentage of how much to vary the scale by.\n random_brightness: Integer range to randomly multiply the pixel values by.\n graph.\n\n Returns:\n The jpeg input layer and the distorted result tensor.\n \"\"\"\n\n jpeg_data = tf.placeholder(tf.string, name='DistortJPGInput')\n decoded_image = tf.image.decode_jpeg(jpeg_data, channels=MODEL_INPUT_DEPTH)\n decoded_image_as_float = tf.cast(decoded_image, dtype=tf.float32)\n decoded_image_4d = tf.expand_dims(decoded_image_as_float, 0)\n margin_scale = 1.0 + (random_crop / 100.0)\n resize_scale = 1.0 + (random_scale / 100.0)\n margin_scale_value = tf.constant(margin_scale)\n resize_scale_value = tf.random_uniform(tensor_shape.scalar(),\n minval=1.0,\n maxval=resize_scale)\n scale_value = tf.multiply(margin_scale_value, resize_scale_value)\n precrop_width = tf.multiply(scale_value, MODEL_INPUT_WIDTH)\n precrop_height = tf.multiply(scale_value, MODEL_INPUT_HEIGHT)\n precrop_shape = tf.stack([precrop_height, precrop_width])\n precrop_shape_as_int = tf.cast(precrop_shape, dtype=tf.int32)\n precropped_image = tf.image.resize_bilinear(decoded_image_4d,\n precrop_shape_as_int)\n precropped_image_3d = tf.squeeze(precropped_image, squeeze_dims=[0])\n cropped_image = tf.random_crop(precropped_image_3d,\n [MODEL_INPUT_HEIGHT, MODEL_INPUT_WIDTH,\n MODEL_INPUT_DEPTH])\n if flip_left_right:\n flipped_image = tf.image.random_flip_left_right(cropped_image)\n else:\n flipped_image = cropped_image\n brightness_min = 1.0 - (random_brightness / 100.0)\n brightness_max = 1.0 + (random_brightness / 100.0)\n brightness_value = tf.random_uniform(tensor_shape.scalar(),\n minval=brightness_min,\n maxval=brightness_max)\n brightened_image = tf.multiply(flipped_image, brightness_value)\n distort_result = tf.expand_dims(brightened_image, 0, name='DistortResult')\n return jpeg_data, distort_result\n\n\ndef variable_summaries(var):\n \"\"\"Attach a lot of summaries to a Tensor (for TensorBoard visualization).\"\"\"\n with tf.name_scope('summaries'):\n mean = tf.reduce_mean(var)\n tf.summary.scalar('mean', mean)\n with tf.name_scope('stddev'):\n stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))\n tf.summary.scalar('stddev', stddev)\n tf.summary.scalar('max', tf.reduce_max(var))\n tf.summary.scalar('min', tf.reduce_min(var))\n tf.summary.histogram('histogram', var)\n\n\ndef add_final_training_ops(class_count, final_tensor_name, bottleneck_tensor):\n \"\"\"Adds a new softmax and fully-connected layer for training.\n\n We need to retrain the top layer to identify our new classes, so this function\n adds the right operations to the graph, along with some variables to hold the\n weights, and then sets up all the gradients for the backward pass.\n\n The set up for the softmax and fully-connected layers is based on:\n https://tensorflow.org/versions/master/tutorials/mnist/beginners/index.html\n\n Args:\n class_count: Integer of how many categories of things we're trying to\n recognize.\n final_tensor_name: Name string for the new final node that produces results.\n bottleneck_tensor: The output of the main CNN graph.\n\n Returns:\n The tensors for the training and cross entropy results, and tensors for the\n bottleneck input and ground truth input.\n \"\"\"\n with tf.name_scope('input'):\n bottleneck_input = tf.placeholder_with_default(\n bottleneck_tensor, shape=[None, BOTTLENECK_TENSOR_SIZE],\n name='BottleneckInputPlaceholder')\n\n ground_truth_input = tf.placeholder(tf.float32,\n [None, class_count],\n name='GroundTruthInput')\n\n # Organizing the following ops as `final_training_ops` so they're easier\n # to see in TensorBoard\n layer_name = 'final_training_ops'\n with tf.name_scope(layer_name):\n with tf.name_scope('weights'):\n initial_value = tf.truncated_normal([BOTTLENECK_TENSOR_SIZE, class_count],\n stddev=0.001)\n\n layer_weights = tf.Variable(initial_value, name='final_weights')\n\n variable_summaries(layer_weights)\n with tf.name_scope('biases'):\n layer_biases = tf.Variable(tf.zeros([class_count]), name='final_biases')\n variable_summaries(layer_biases)\n with tf.name_scope('Wx_plus_b'):\n logits = tf.matmul(bottleneck_input, layer_weights) + layer_biases\n tf.summary.histogram('pre_activations', logits)\n\n final_tensor = tf.nn.softmax(logits, name=final_tensor_name)\n tf.summary.histogram('activations', final_tensor)\n\n with tf.name_scope('cross_entropy'):\n cross_entropy = tf.nn.softmax_cross_entropy_with_logits(\n labels=ground_truth_input, logits=logits)\n with tf.name_scope('total'):\n cross_entropy_mean = tf.reduce_mean(cross_entropy)\n tf.summary.scalar('cross_entropy', cross_entropy_mean)\n\n with tf.name_scope('train'):\n optimizer = tf.train.GradientDescentOptimizer(FLAGS.learning_rate)\n train_step = optimizer.minimize(cross_entropy_mean)\n\n return (train_step, cross_entropy_mean, bottleneck_input, ground_truth_input,\n final_tensor)\n\n\ndef add_evaluation_step(result_tensor, ground_truth_tensor):\n \"\"\"Inserts the operations we need to evaluate the accuracy of our results.\n\n Args:\n result_tensor: The new final node that produces results.\n ground_truth_tensor: The node we feed ground truth data\n into.\n\n Returns:\n Tuple of (evaluation step, prediction).\n \"\"\"\n with tf.name_scope('accuracy'):\n with tf.name_scope('correct_prediction'):\n prediction = tf.argmax(result_tensor, 1)\n correct_prediction = tf.equal(\n prediction, tf.argmax(ground_truth_tensor, 1))\n with tf.name_scope('accuracy'):\n evaluation_step = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n tf.summary.scalar('accuracy', evaluation_step)\n return evaluation_step, prediction\n\n\ndef main(_):\n # Setup the directory we'll write summaries to for TensorBoard\n if tf.gfile.Exists(FLAGS.summaries_dir):\n tf.gfile.DeleteRecursively(FLAGS.summaries_dir)\n tf.gfile.MakeDirs(FLAGS.summaries_dir)\n\n # Set up the pre-trained graph.\n maybe_download_and_extract()\n graph, bottleneck_tensor, jpeg_data_tensor, resized_image_tensor = (\n create_inception_graph())\n\n # Look at the folder structure, and create lists of all the images.\n image_lists = create_image_lists(FLAGS.image_dir, FLAGS.testing_percentage,\n FLAGS.validation_percentage)\n class_count = len(image_lists.keys())\n if class_count == 0:\n print('No valid folders of images found at ' + FLAGS.image_dir)\n return -1\n if class_count == 1:\n print('Only one valid folder of images found at ' + FLAGS.image_dir +\n ' - multiple classes are needed for classification.')\n return -1\n\n # See if the command-line flags mean we're applying any distortions.\n do_distort_images = should_distort_images(\n FLAGS.flip_left_right, FLAGS.random_crop, FLAGS.random_scale,\n FLAGS.random_brightness)\n\n with tf.Session(graph=graph) as sess:\n\n if do_distort_images:\n # We will be applying distortions, so setup the operations we'll need.\n (distorted_jpeg_data_tensor,\n distorted_image_tensor) = add_input_distortions(\n FLAGS.flip_left_right, FLAGS.random_crop,\n FLAGS.random_scale, FLAGS.random_brightness)\n else:\n # We'll make sure we've calculated the 'bottleneck' image summaries and\n # cached them on disk.\n cache_bottlenecks(sess, image_lists, FLAGS.image_dir,\n FLAGS.bottleneck_dir, jpeg_data_tensor,\n bottleneck_tensor)\n\n # Add the new layer that we'll be training.\n (train_step, cross_entropy, bottleneck_input, ground_truth_input,\n final_tensor) = add_final_training_ops(len(image_lists.keys()),\n FLAGS.final_tensor_name,\n bottleneck_tensor)\n\n # Create the operations we need to evaluate the accuracy of our new layer.\n evaluation_step, prediction = add_evaluation_step(\n final_tensor, ground_truth_input)\n\n # Merge all the summaries and write them out to the summaries_dir\n merged = tf.summary.merge_all()\n train_writer = tf.summary.FileWriter(FLAGS.summaries_dir + '/train',\n sess.graph)\n\n validation_writer = tf.summary.FileWriter(\n FLAGS.summaries_dir + '/validation')\n\n # Set up all our weights to their initial default values.\n init = tf.global_variables_initializer()\n sess.run(init)\n\n # Run the training for as many cycles as requested on the command line.\n for i in range(FLAGS.how_many_training_steps):\n # Get a batch of input bottleneck values, either calculated fresh every\n # time with distortions applied, or from the cache stored on disk.\n if do_distort_images:\n (train_bottlenecks,\n train_ground_truth) = get_random_distorted_bottlenecks(\n sess, image_lists, FLAGS.train_batch_size, 'training',\n FLAGS.image_dir, distorted_jpeg_data_tensor,\n distorted_image_tensor, resized_image_tensor, bottleneck_tensor)\n else:\n (train_bottlenecks,\n train_ground_truth, _) = get_random_cached_bottlenecks(\n sess, image_lists, FLAGS.train_batch_size, 'training',\n FLAGS.bottleneck_dir, FLAGS.image_dir, jpeg_data_tensor,\n bottleneck_tensor)\n # Feed the bottlenecks and ground truth into the graph, and run a training\n # step. Capture training summaries for TensorBoard with the `merged` op.\n\n train_summary, _ = sess.run(\n [merged, train_step],\n feed_dict={bottleneck_input: train_bottlenecks,\n ground_truth_input: train_ground_truth})\n train_writer.add_summary(train_summary, i)\n\n # !! ausgabe der trainingsgenaugikeit *gaendert\n is_last_step = (i + 1 == FLAGS.how_many_training_steps)\n if (i % FLAGS.eval_step_interval) == 0 or is_last_step:\n train_accuracy, cross_entropy_value = sess.run(\n [evaluation_step, cross_entropy],\n feed_dict={bottleneck_input: train_bottlenecks,\n ground_truth_input: train_ground_truth})\n print('%s: Step %d: Train accuracy = %.1f%%' % (datetime.now(), i,\n train_accuracy * 100))\n print('%s: Step %d: Cross entropy = %f' % (datetime.now(), i,\n cross_entropy_value))\n validation_bottlenecks, validation_ground_truth, _ = (\n get_random_cached_bottlenecks(\n sess, image_lists, FLAGS.validation_batch_size, 'validation',\n FLAGS.bottleneck_dir, FLAGS.image_dir, jpeg_data_tensor,\n bottleneck_tensor))\n # Run a validation step and capture training summaries for TensorBoard\n # with the `merged` op.\n validation_summary, validation_accuracy = sess.run(\n [merged, evaluation_step],\n feed_dict={bottleneck_input: validation_bottlenecks,\n ground_truth_input: validation_ground_truth})\n validation_writer.add_summary(validation_summary, i)\n print('%s: Step %d: Validation accuracy = %.1f%% (N=%d)' %\n (datetime.now(), i, validation_accuracy * 100,\n len(validation_bottlenecks)))\n\n # We've completed all our training, so run a final test evaluation on\n # some new images we haven't used before.\n test_bottlenecks, test_ground_truth, test_filenames = (\n get_random_cached_bottlenecks(sess, image_lists, FLAGS.test_batch_size,\n 'testing', FLAGS.bottleneck_dir,\n FLAGS.image_dir, jpeg_data_tensor,\n bottleneck_tensor))\n test_accuracy, predictions = sess.run(\n [evaluation_step, prediction],\n feed_dict={bottleneck_input: test_bottlenecks,\n ground_truth_input: test_ground_truth})\n print('Final test accuracy = %.1f%% (N=%d)' % (\n test_accuracy * 100, len(test_bottlenecks)))\n\n if FLAGS.print_misclassified_test_images:\n print('=== MISCLASSIFIED TEST IMAGES ===')\n for i, test_filename in enumerate(test_filenames):\n if predictions[i] != test_ground_truth[i].argmax():\n print('%70s %s' % (test_filename,\n list(image_lists.keys())[predictions[i]]))\n\n \n print('writing checkpoint...')\n saver = tf.train.Saver()\n \n saver.save(sess, FLAGS.output_graph.replace('pb', 'ckpt'))\n # Write out the trained graph and labels with the weights stored as\n # constants.\n# output_graph_def = graph_util.convert_variables_to_constants(\n# sess, graph.as_graph_def(), [FLAGS.final_tensor_name])\n# with gfile.FastGFile(FLAGS.output_graph, 'wb') as f:\n# f.write(output_graph_def.SerializeToString())\n \n with gfile.FastGFile(FLAGS.output_labels, 'w') as f:\n f.write('\\n'.join(image_lists.keys()) + '\\n')\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '--image_dir',\t# definiert in train.sh\n type=str,\n default='training_dataset',\n help='Path to folders of labeled images.'\n )\n parser.add_argument(\n '--output_graph',\n type=str,\n default='tf_files/retrained_graph.pb',\n help='Where to save the trained graph.'\n )\n parser.add_argument(\n '--output_labels',\n type=str,\n default='tf_files/retrained_labels.txt',\n help='Where to save the trained graph\\'s labels.'\n )\n parser.add_argument(\n '--summaries_dir',\n type=str,\n default='tf_files/training_summaries/basic',\n help='Where to save summary logs for TensorBoard.'\n )\n parser.add_argument(\n '--how_many_training_steps',\n type=int,\n default=1000,\n help='How many training steps to run before ending.'\n )\n parser.add_argument(\n '--learning_rate',\n type=float,\n default=0.01,\n help='How large a learning rate to use when training.'\n )\n parser.add_argument(\n '--testing_percentage',\n type=int,\n default=10,\n help='What percentage of images to use as a test set.'\n )\n parser.add_argument(\n '--validation_percentage',\n type=int,\n default=10,\n help='What percentage of images to use as a validation set.'\n )\n parser.add_argument(\n '--eval_step_interval',\n type=int,\n default=10,\n help='How often to evaluate the training results.'\n )\n #geadded in shell befehl *geaendert\n parser.add_argument(\n '--train_batch_size',\n type=int,\n default=100,\n help='How many images to train on at a time.'\n )\n parser.add_argument(\n '--test_batch_size',\n type=int,\n default=-1,\n help=\"\"\"\\\n How many images to test on. This test set is only used once, to evaluate\n the final accuracy of the model after training completes.\n A value of -1 causes the entire test set to be used, which leads to more\n stable results across runs.\\\n \"\"\"\n )\n parser.add_argument(\n '--validation_batch_size',\n type=int,\n default=100,\n help=\"\"\"\\\n How many images to use in an evaluation batch. This validation set is\n used much more often than the test set, and is an early indicator of how\n accurate the model is during training.\n A value of -1 causes the entire validation set to be used, which leads to\n more stable results across training iterations, but may be slower on large\n training sets.\\\n \"\"\"\n )\n parser.add_argument(\n '--print_misclassified_test_images',\n default=False,\n help=\"\"\"\\\n Whether to print out a list of all misclassified test images.\\\n \"\"\",\n action='store_true'\n )\n parser.add_argument(\n '--model_dir',\n type=str,\n default='inception',\n help=\"\"\"\\\n Path to classify_image_graph_def.pb,\n imagenet_synset_to_human_label_map.txt, and\n imagenet_2012_challenge_label_map_proto.pbtxt.\\\n \"\"\"\n )\n parser.add_argument(\n '--bottleneck_dir',\n type=str,\n default='tf_files/bottlenecks',\n help='Path to cache bottleneck layer values as files.'\n )\n parser.add_argument(\n '--final_tensor_name',\n type=str,\n default='final_result',\n help=\"\"\"\\\n The name of the output classification layer in the retrained graph.\\\n \"\"\"\n )\n parser.add_argument(\n '--flip_left_right',\n default=False,\n help=\"\"\"\\\n Whether to randomly flip half of the training images horizontally.\\\n \"\"\",\n action='store_true'\n )\n parser.add_argument(\n '--random_crop',\n type=int,\n default=0,\n help=\"\"\"\\\n A percentage determining how much of a margin to randomly crop off the\n training images.\\\n \"\"\"\n )\n parser.add_argument(\n '--random_scale',\n type=int,\n default=0,\n help=\"\"\"\\\n A percentage determining how much to randomly scale up the size of the\n training images by.\\\n \"\"\"\n )\n parser.add_argument(\n '--random_brightness',\n type=int,\n default=0,\n help=\"\"\"\\\n A percentage determining how much to randomly multiply the training image\n input pixels up or down by.\\\n \"\"\"\n )\n FLAGS, unparsed = parser.parse_known_args()\n tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)\n"
] | [
[
"tensorflow.summary.scalar",
"tensorflow.nn.softmax_cross_entropy_with_logits",
"tensorflow.reduce_max",
"tensorflow.matmul",
"tensorflow.squeeze",
"tensorflow.name_scope",
"tensorflow.image.resize_bilinear",
"tensorflow.Variable",
"tensorflow.summary.FileWriter",
"tensorflow.nn.softmax",
"tensorflow.summary.histogram",
"tensorflow.global_variables_initializer",
"tensorflow.python.platform.gfile.Glob",
"tensorflow.multiply",
"tensorflow.python.framework.tensor_shape.scalar",
"tensorflow.Graph",
"tensorflow.image.decode_jpeg",
"tensorflow.import_graph_def",
"tensorflow.gfile.MakeDirs",
"tensorflow.reduce_min",
"tensorflow.constant",
"tensorflow.GraphDef",
"tensorflow.stack",
"tensorflow.random_crop",
"numpy.zeros",
"tensorflow.app.run",
"tensorflow.image.random_flip_left_right",
"tensorflow.logging.fatal",
"tensorflow.expand_dims",
"tensorflow.cast",
"tensorflow.python.util.compat.as_bytes",
"tensorflow.Session",
"tensorflow.train.Saver",
"tensorflow.python.platform.gfile.FastGFile",
"tensorflow.placeholder_with_default",
"tensorflow.placeholder",
"tensorflow.zeros",
"numpy.squeeze",
"tensorflow.summary.merge_all",
"tensorflow.python.platform.gfile.Walk",
"tensorflow.truncated_normal",
"tensorflow.reduce_mean",
"tensorflow.python.platform.gfile.Exists",
"tensorflow.train.GradientDescentOptimizer",
"tensorflow.argmax",
"tensorflow.square",
"tensorflow.gfile.Exists",
"tensorflow.gfile.DeleteRecursively"
]
] |
NonAbelianCapu/PyCompton | [
"a9f79d77b5d312a45bad341f961d36b703d12579"
] | [
"src/simulation.py"
] | [
"import particle as p\nimport detector as dct\nimport particle_source as ps\nimport cross_sections as xs\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport random\n\nclass Simulation(object):\n \"\"\"docstring for .\"\"\"\n\n def __init__(self, T, dt, detector, particle_source):\n\n self.T = T\n self.dt = dt\n\n if not isinstance(detector, dct.Detector):\n raise TypeError(\"Detector is not Detector class\")\n\n self.detector = detector\n\n if not isinstance(particle_source, ps.Particle_Source):\n raise TypeError(\"Particle source is not Particle Source Class\")\n\n self.p_source = particle_source\n\n\n\n def gen_flight_distance(self, particle):\n\n r = random.random()\n return -np.log(r)\n\n\n def evolve(self):\n pass\n"
] | [
[
"numpy.log"
]
] |
trinhvietthanh/federated-learning | [
"95d79b76558e6d801f7d2b4d21bf68a7889b920a"
] | [
"src/baseline_main.py"
] | [
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Python version: 3.6\n\n\nfrom tqdm import tqdm\nimport matplotlib.pyplot as plt\n\nimport torch\nfrom torch.utils.data import DataLoader\n\nfrom utils import get_dataset\nfrom options import args_parser\nfrom update import test_inference\nfrom models import MLP, CNNMnist, CNNFashion_Mnist, CNNCifar\n\n\nif __name__ == '__main__':\n args = args_parser()\n if args.gpu:\n torch.cuda.set_device(args.gpu)\n device = 'cuda' if args.gpu else 'cpu'\n\n # load datasets\n train_dataset, test_dataset, _ = get_dataset(args)\n\n # BUILD MODEL\n if args.model == 'cnn':\n # Convolutional neural netork\n if args.dataset == 'mnist':\n global_model = CNNMnist(args=args)\n elif args.dataset == 'fmnist':\n global_model = CNNFashion_Mnist(args=args)\n elif args.dataset == 'cifar':\n global_model = CNNCifar(args=args)\n elif args.model == 'mlp':\n # Multi-layer preceptron\n img_size = train_dataset[0][0].shape\n len_in = 1\n for x in img_size:\n len_in *= x\n # global_model = MLP(dim_in=len_in, dim_hidden=64,\n # dim_out=args.num_classes)\n global_model = MLP(dim_in=len_in, dim_hidden=200,\n dim_out=args.num_classes)\n else:\n exit('Error: unrecognized model')\n\n # Set the model to train and send it to device.\n global_model.to(device)\n global_model.train()\n print(global_model)\n\n # Training\n # Set optimizer and criterion\n if args.optimizer == 'sgd':\n optimizer = torch.optim.SGD(global_model.parameters(), lr=args.lr,\n momentum=0.5)\n elif args.optimizer == 'adam':\n optimizer = torch.optim.Adam(global_model.parameters(), lr=args.lr,\n weight_decay=1e-4)\n\n trainloader = DataLoader(train_dataset, batch_size=64, shuffle=True)\n criterion = torch.nn.NLLLoss().to(device)\n epoch_loss = []\n\n for epoch in tqdm(range(args.epochs)):\n batch_loss = []\n\n for batch_idx, (images, labels) in enumerate(trainloader):\n images, labels = images.to(device), labels.to(device)\n\n optimizer.zero_grad()\n outputs = global_model(images)\n loss = criterion(outputs, labels)\n loss.backward()\n optimizer.step()\n\n if batch_idx % 50 == 0:\n print('Train Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}'.format(\n epoch+1, batch_idx * len(images), len(trainloader.dataset),\n 100. * batch_idx / len(trainloader), loss.item()))\n batch_loss.append(loss.item())\n\n loss_avg = sum(batch_loss)/len(batch_loss)\n print('\\nTrain loss:', loss_avg)\n epoch_loss.append(loss_avg)\n\n # Plot loss\n plt.figure()\n plt.plot(range(len(epoch_loss)), epoch_loss)\n plt.xlabel('epochs')\n plt.ylabel('Train loss')\n plt.savefig('../save/nn_{}_{}_{}.png'.format(args.dataset, args.model,\n args.epochs))\n\n # testing\n test_acc, test_loss = test_inference(args, global_model, test_dataset)\n print('Test on', len(test_dataset), 'samples')\n print(\"Test Accuracy: {:.2f}%\".format(100*test_acc))\n"
] | [
[
"torch.utils.data.DataLoader",
"torch.nn.NLLLoss",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"torch.cuda.set_device"
]
] |
akin-aroge/NXCT | [
"bae6347ba8da1e433a46520bd346babf64f25481"
] | [
"fctest/__PolCurve__.py"
] | [
"import matplotlib.pyplot as plt\n\n\nclass PolCurve:\n\n def __init__(self, current_density, voltage):\n\n self.current_density = current_density\n self.voltage = voltage\n\n def plot_pol_curve(self, label=None, return_axes=None, ax=None):\n\n if ax is None:\n ax = plt.gca()\n\n ax.plot(self.current_density, self.voltage, '-.', label=label)\n ax.set_xlabel('current density, j (A/$cm^2$)')\n ax.set_ylabel('voltage (V)')\n ax.set_title('polarisation curve')\n ax.legend()\n ax.grid(True)\n\n if return_axes is not None and return_axes == True:\n plt.close()\n return ax"
] | [
[
"matplotlib.pyplot.gca",
"matplotlib.pyplot.close"
]
] |
Lachimax/FRB | [
"aa3bb6828db0cf81931dac35cf7bd7184fc3b598"
] | [
"frb/figures/utils.py"
] | [
"\"\"\" Simple utilities for figures\"\"\"\n\nimport numpy as np\nimport matplotlib as mpl\n\n\ndef log_me(val, err):\n \"\"\"\n Generate log and error from linear input\n \n Args:\n val (float): \n err (float): \n\n Returns:\n float, (float/None):\n Returns none if the err is negative\n\n \"\"\"\n if err < 0.:\n xerr = None\n else:\n xerr = np.array([[np.log10(val) - np.log10(val - err)],\n [-np.log10(val) + np.log10(val + err)]])\n return np.log10(val), xerr\n\n\ndef set_fontsize(ax,fsz):\n \"\"\"\n Set the fontsize throughout an Axis\n \n Args:\n ax (Matplotlib Axis): \n fsz (float): Font size\n\n Returns:\n\n \"\"\"\n for item in ([ax.title, ax.xaxis.label, ax.yaxis.label] +\n ax.get_xticklabels() + ax.get_yticklabels()):\n item.set_fontsize(fsz)\n\n\ndef set_bokeh_fontsize(p, fsz):\n \"\"\"\n Adjust font size for Bokeh axes\n\n Args:\n p (Bokeh plot class):\n sz (int): Font size\n \"\"\"\n p.xaxis.axis_label_text_font_size = '{:d}pt'.format(fsz)\n p.xaxis.major_label_text_font_size = \"{:d}pt\".format(fsz)\n #\n p.yaxis.axis_label_text_font_size = '{:d}pt'.format(fsz)\n p.yaxis.major_label_text_font_size = \"{:d}pt\".format(fsz)\n\n\ndef set_mplrc():\n \"\"\"\n Font fussing for matplotlib\n\n Returns:\n\n \"\"\"\n mpl.rcParams['mathtext.default'] = 'it'\n mpl.rcParams['font.size'] = 12\n mpl.rc('font',family='Times New Roman')\n mpl.rcParams['text.latex.preamble'] = [r'\\boldmath']\n mpl.rc('text', usetex=True)\n"
] | [
[
"numpy.log10",
"matplotlib.rc"
]
] |
dendisuhubdy/ClassyVision | [
"c7f8de4615181b5a14dd5ec44fa72bebb790e886"
] | [
"classy_vision/models/resnext.py"
] | [
"#!/usr/bin/env python3\n# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\n\"\"\"\nImplementation of ResNeXt (https://arxiv.org/pdf/1611.05431.pdf)\n\"\"\"\n\nimport copy\nimport math\nfrom typing import Any, Dict, List, Optional, Tuple, Union\n\nimport torch.nn as nn\nfrom classy_vision.generic.util import is_pos_int\n\nfrom . import register_model\nfrom .classy_model import ClassyModel\nfrom .squeeze_and_excitation_layer import SqueezeAndExcitationLayer\n\n\n# global setting for in-place ReLU:\nINPLACE = True\n\n\ndef conv3x3(in_planes, out_planes, stride=1, groups=1):\n \"\"\"helper function for constructing 3x3 grouped convolution\"\"\"\n return nn.Conv2d(\n in_planes,\n out_planes,\n kernel_size=3,\n stride=stride,\n padding=1,\n groups=groups,\n bias=False,\n )\n\n\ndef conv1x1(in_planes, out_planes, stride=1):\n \"\"\"helper function for constructing 1x1 convolution\"\"\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)\n\n\nclass GenericLayer(nn.Module):\n \"\"\"\n Parent class for 2-layer (BasicLayer) and 3-layer (BottleneckLayer)\n bottleneck layer class\n \"\"\"\n\n def __init__(\n self,\n convolutional_block,\n in_planes,\n out_planes,\n stride=1,\n mid_planes_and_cardinality=None,\n reduction=4,\n final_bn_relu=True,\n use_se=False,\n se_reduction_ratio=16,\n ):\n\n # assertions on inputs:\n assert is_pos_int(in_planes) and is_pos_int(out_planes)\n assert is_pos_int(stride) and is_pos_int(reduction)\n\n # set object fields:\n super(GenericLayer, self).__init__()\n self.convolutional_block = convolutional_block\n self.final_bn_relu = final_bn_relu\n\n # final batchnorm and relu layer:\n if final_bn_relu:\n self.bn = nn.BatchNorm2d(out_planes)\n self.relu = nn.ReLU(inplace=INPLACE)\n\n # define down-sampling layer (if direct residual impossible):\n self.downsample = None\n if stride != 1 or in_planes != out_planes:\n self.downsample = nn.Sequential(\n conv1x1(in_planes, out_planes, stride=stride),\n nn.BatchNorm2d(out_planes),\n )\n\n self.se = (\n SqueezeAndExcitationLayer(out_planes, reduction_ratio=se_reduction_ratio)\n if use_se\n else None\n )\n\n def forward(self, x):\n\n # if required, perform downsampling along shortcut connection:\n if self.downsample is None:\n residual = x\n else:\n residual = self.downsample(x)\n\n # forward pass through convolutional block:\n out = self.convolutional_block(x)\n\n if self.final_bn_relu:\n out = self.bn(out)\n\n if self.se is not None:\n out = self.se(out)\n\n # add residual connection, perform rely + batchnorm, and return result:\n out += residual\n if self.final_bn_relu:\n out = self.relu(out)\n return out\n\n\nclass BasicLayer(GenericLayer):\n \"\"\"\n ResNeXt layer with `in_planes` input planes and `out_planes`\n output planes.\n \"\"\"\n\n def __init__(\n self,\n in_planes,\n out_planes,\n stride=1,\n mid_planes_and_cardinality=None,\n reduction=4,\n final_bn_relu=True,\n use_se=False,\n se_reduction_ratio=16,\n ):\n\n # assertions on inputs:\n assert is_pos_int(in_planes) and is_pos_int(out_planes)\n assert is_pos_int(stride) and is_pos_int(reduction)\n\n # define convolutional block:\n convolutional_block = nn.Sequential(\n conv3x3(in_planes, out_planes, stride=stride),\n nn.BatchNorm2d(out_planes),\n nn.ReLU(inplace=INPLACE),\n conv3x3(out_planes, out_planes),\n )\n\n # call constructor of generic layer:\n super().__init__(\n convolutional_block,\n in_planes,\n out_planes,\n stride=stride,\n reduction=reduction,\n final_bn_relu=final_bn_relu,\n use_se=use_se,\n se_reduction_ratio=se_reduction_ratio,\n )\n\n\nclass BottleneckLayer(GenericLayer):\n \"\"\"\n ResNeXt bottleneck layer with `in_planes` input planes, `out_planes`\n output planes, and a bottleneck `reduction`.\n \"\"\"\n\n def __init__(\n self,\n in_planes,\n out_planes,\n stride=1,\n mid_planes_and_cardinality=None,\n reduction=4,\n final_bn_relu=True,\n use_se=False,\n se_reduction_ratio=16,\n ):\n\n # assertions on inputs:\n assert is_pos_int(in_planes) and is_pos_int(out_planes)\n assert is_pos_int(stride) and is_pos_int(reduction)\n\n # define convolutional layers:\n bottleneck_planes = int(math.ceil(out_planes / reduction))\n cardinality = 1\n if mid_planes_and_cardinality is not None:\n mid_planes, cardinality = mid_planes_and_cardinality\n bottleneck_planes = mid_planes * cardinality\n\n convolutional_block = nn.Sequential(\n conv1x1(in_planes, bottleneck_planes),\n nn.BatchNorm2d(bottleneck_planes),\n nn.ReLU(inplace=INPLACE),\n conv3x3(\n bottleneck_planes, bottleneck_planes, stride=stride, groups=cardinality\n ),\n nn.BatchNorm2d(bottleneck_planes),\n nn.ReLU(inplace=INPLACE),\n conv1x1(bottleneck_planes, out_planes),\n )\n\n # call constructor of generic layer:\n super(BottleneckLayer, self).__init__(\n convolutional_block,\n in_planes,\n out_planes,\n stride=stride,\n reduction=reduction,\n final_bn_relu=final_bn_relu,\n use_se=use_se,\n se_reduction_ratio=se_reduction_ratio,\n )\n\n\nclass SmallInputInitialBlock(nn.Module):\n \"\"\"\n ResNeXt initial block for small input with `in_planes` input planes\n \"\"\"\n\n def __init__(self, init_planes):\n super().__init__()\n self._module = nn.Sequential(\n conv3x3(3, init_planes, stride=1),\n nn.BatchNorm2d(init_planes),\n nn.ReLU(inplace=INPLACE),\n )\n\n def forward(self, x):\n return self._module(x)\n\n\nclass InitialBlock(nn.Module):\n \"\"\"\n ResNeXt initial block with `in_planes` input planes\n \"\"\"\n\n def __init__(self, init_planes):\n super().__init__()\n self._module = nn.Sequential(\n nn.Conv2d(3, init_planes, kernel_size=7, stride=2, padding=3, bias=False),\n nn.BatchNorm2d(init_planes),\n nn.ReLU(inplace=INPLACE),\n nn.MaxPool2d(kernel_size=3, stride=2, padding=1),\n )\n\n def forward(self, x):\n return self._module(x)\n\n\n@register_model(\"resnext\")\nclass ResNeXt(ClassyModel):\n def __init__(\n self,\n num_blocks,\n init_planes: int = 64,\n reduction: int = 4,\n small_input: bool = False,\n zero_init_bn_residuals: bool = False,\n base_width_and_cardinality: Optional[Union[Tuple, List]] = None,\n basic_layer: bool = False,\n final_bn_relu: bool = True,\n bn_weight_decay: Optional[bool] = False,\n use_se: bool = False,\n se_reduction_ratio: int = 16,\n ):\n \"\"\"\n Implementation of `ResNeXt <https://arxiv.org/pdf/1611.05431.pdf>`_.\n\n Args:\n small_input: set to `True` for 32x32 sized image inputs.\n final_bn_relu: set to `False` to exclude the final batchnorm and\n ReLU layers. These settings are useful when training Siamese\n networks.\n use_se: Enable squeeze and excitation\n se_reduction_ratio: The reduction ratio to apply in the excitation\n stage. Only used if `use_se` is `True`.\n \"\"\"\n super().__init__()\n\n # assertions on inputs:\n assert type(num_blocks) == list\n assert all(is_pos_int(n) for n in num_blocks)\n assert is_pos_int(init_planes) and is_pos_int(reduction)\n assert type(small_input) == bool\n assert type(bn_weight_decay) == bool\n assert (\n type(zero_init_bn_residuals) == bool\n ), \"zero_init_bn_residuals must be a boolean, set to true if gamma of last\\\n BN of residual block should be initialized to 0.0, false for 1.0\"\n assert base_width_and_cardinality is None or (\n isinstance(base_width_and_cardinality, (tuple, list))\n and len(base_width_and_cardinality) == 2\n and is_pos_int(base_width_and_cardinality[0])\n and is_pos_int(base_width_and_cardinality[1])\n )\n assert isinstance(use_se, bool), \"use_se has to be a boolean\"\n\n # Chooses whether to apply weight decay to batch norm\n # parameters. This improves results in some situations,\n # e.g. ResNeXt models trained / evaluated using the Imagenet\n # dataset, but can cause worse performance in other scenarios\n self.bn_weight_decay = bn_weight_decay\n\n # initial convolutional block:\n self.num_blocks = num_blocks\n self.small_input = small_input\n self._make_initial_block(small_input, init_planes, basic_layer)\n\n # compute number of planes at each spatial resolution:\n out_planes = [init_planes * 2 ** i * reduction for i in range(len(num_blocks))]\n in_planes = [init_planes] + out_planes[:-1]\n\n # create subnetworks for each spatial resolution:\n blocks = []\n for idx in range(len(out_planes)):\n mid_planes_and_cardinality = None\n if base_width_and_cardinality is not None:\n w, c = base_width_and_cardinality\n mid_planes_and_cardinality = (w * 2 ** idx, c)\n new_block = self._make_resolution_block(\n in_planes[idx],\n out_planes[idx],\n idx,\n num_blocks[idx], # num layers\n stride=1 if idx == 0 else 2,\n mid_planes_and_cardinality=mid_planes_and_cardinality,\n reduction=reduction,\n final_bn_relu=final_bn_relu or (idx != (len(out_planes) - 1)),\n use_se=use_se,\n se_reduction_ratio=se_reduction_ratio,\n )\n blocks.append(nn.Sequential(*new_block))\n self.blocks = nn.Sequential(*blocks)\n\n self.out_planes = out_planes[-1]\n self._num_classes = out_planes\n\n # initialize weights:\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode=\"fan_out\", nonlinearity=\"relu\")\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n\n # Init BatchNorm gamma to 0.0 for last BN layer, it gets 0.2-0.3% higher\n # final val top1 for larger batch sizes.\n if zero_init_bn_residuals:\n for m in self.modules():\n if isinstance(m, GenericLayer):\n if hasattr(m, \"bn\"):\n nn.init.constant_(m.bn.weight, 0)\n\n def _make_initial_block(self, small_input, init_planes, basic_layer):\n if small_input:\n self.initial_block = SmallInputInitialBlock(init_planes)\n self.layer_type = BasicLayer\n else:\n self.initial_block = InitialBlock(init_planes)\n self.layer_type = BasicLayer if basic_layer else BottleneckLayer\n\n # helper function that creates ResNet blocks at single spatial resolution:\n def _make_resolution_block(\n self,\n in_planes,\n out_planes,\n resolution_idx,\n num_blocks,\n stride=1,\n mid_planes_and_cardinality=None,\n reduction=4,\n final_bn_relu=True,\n use_se=False,\n se_reduction_ratio=16,\n ):\n\n # add the desired number of residual blocks:\n blocks = []\n for idx in range(num_blocks):\n blocks.append(\n self.build_attachable_block(\n \"block{}-{}\".format(resolution_idx, idx),\n self.layer_type(\n in_planes if idx == 0 else out_planes,\n out_planes,\n stride=stride if idx == 0 else 1, # only first block has stride\n mid_planes_and_cardinality=mid_planes_and_cardinality,\n reduction=reduction,\n final_bn_relu=final_bn_relu or (idx != (num_blocks - 1)),\n use_se=use_se,\n se_reduction_ratio=se_reduction_ratio,\n ),\n )\n )\n return blocks\n\n @classmethod\n def from_config(cls, config: Dict[str, Any]) -> \"ResNeXt\":\n \"\"\"Instantiates a ResNeXt from a configuration.\n\n Args:\n config: A configuration for a ResNeXt.\n See :func:`__init__` for parameters expected in the config.\n\n Returns:\n A ResNeXt instance.\n \"\"\"\n assert \"num_blocks\" in config\n config = {\n \"num_blocks\": config[\"num_blocks\"],\n \"init_planes\": config.get(\"init_planes\", 64),\n \"reduction\": config.get(\"reduction\", 4),\n \"base_width_and_cardinality\": config.get(\"base_width_and_cardinality\"),\n \"small_input\": config.get(\"small_input\", False),\n \"basic_layer\": config.get(\"basic_layer\", False),\n \"final_bn_relu\": config.get(\"final_bn_relu\", True),\n \"zero_init_bn_residuals\": config.get(\"zero_init_bn_residuals\", False),\n \"bn_weight_decay\": config.get(\"bn_weight_decay\", False),\n \"use_se\": config.get(\"use_se\", False),\n \"se_reduction_ratio\": config.get(\"se_reduction_ratio\", 16),\n }\n return cls(**config)\n\n # forward pass in residual network:\n def forward(self, x):\n # initial convolutional block:\n out = self.initial_block(x)\n\n # evaluate all residual blocks:\n # TODO: (kaizh) T43794289 exit early if there is no block that has heads\n self.blocks(out)\n\n # By default the classification layer is implemented as one head on top\n # of the last block. The head is automatically computed right after the\n # last block.\n head_outputs = self.execute_heads()\n if len(head_outputs) == 0:\n raise Exception(\"Expecting at least one head that generates output\")\n elif len(head_outputs) == 1:\n return list(head_outputs.values())[0]\n else:\n return head_outputs\n\n def get_optimizer_params(self):\n return super().get_optimizer_params(bn_weight_decay=self.bn_weight_decay)\n\n @property\n def input_shape(self):\n if self.small_input:\n return (3, 32, 32)\n else:\n return (3, 224, 224)\n\n @property\n def output_shape(self):\n return (1, self._num_classes)\n\n @property\n def model_depth(self):\n return sum(self.num_blocks)\n\n\nclass _ResNeXt(ResNeXt):\n @classmethod\n def from_config(cls, config: Dict[str, Any]) -> \"ResNeXt\":\n config = copy.deepcopy(config)\n config.pop(\"name\")\n if \"heads\" in config:\n config.pop(\"heads\")\n return cls(**config)\n\n\n@register_model(\"resnet18\")\nclass ResNet18(_ResNeXt):\n def __init__(self, **kwargs):\n super().__init__(\n num_blocks=[2, 2, 2, 2],\n basic_layer=True,\n zero_init_bn_residuals=True,\n **kwargs,\n )\n\n\n@register_model(\"resnet34\")\nclass ResNet34(ResNeXt):\n def __init__(self, **kwargs):\n super().__init__(\n num_blocks=[3, 4, 6, 3],\n basic_layer=True,\n zero_init_bn_residuals=True,\n **kwargs,\n )\n\n\n@register_model(\"resnet50\")\nclass ResNet50(_ResNeXt):\n def __init__(self, **kwargs):\n super().__init__(\n num_blocks=[3, 4, 6, 3],\n basic_layer=False,\n zero_init_bn_residuals=True,\n **kwargs,\n )\n\n\n@register_model(\"resnet101\")\nclass ResNet101(_ResNeXt):\n def __init__(self, **kwargs):\n super().__init__(\n num_blocks=[3, 4, 23, 3],\n basic_layer=False,\n zero_init_bn_residuals=True,\n **kwargs,\n )\n\n\n@register_model(\"resnet152\")\nclass ResNet152(_ResNeXt):\n def __init__(self, **kwargs):\n super().__init__(\n num_blocks=[3, 8, 36, 3],\n basic_layer=False,\n zero_init_bn_residuals=True,\n **kwargs,\n )\n\n\n# Note, the ResNeXt models all have weight decay enabled for the batch\n# norm parameters. We have found empirically that this gives better\n# results when training on ImageNet (~0.5pp of top-1 acc) and brings\n# our results on track with reported ImageNet results...but for\n# training on other datasets, we have observed losses in accuracy (for\n# example, the dataset used in https://arxiv.org/abs/1805.00932).\n@register_model(\"resnext50_32x4d\")\nclass ResNeXt50(_ResNeXt):\n def __init__(self, **kwargs):\n super().__init__(\n num_blocks=[3, 4, 6, 3],\n basic_layer=False,\n zero_init_bn_residuals=True,\n base_width_and_cardinality=(4, 32),\n bn_weight_decay=True,\n **kwargs,\n )\n\n\n@register_model(\"resnext101_32x4d\")\nclass ResNeXt101(_ResNeXt):\n def __init__(self, **kwargs):\n super().__init__(\n num_blocks=[3, 4, 23, 3],\n basic_layer=False,\n zero_init_bn_residuals=True,\n base_width_and_cardinality=(4, 32),\n bn_weight_decay=True,\n **kwargs,\n )\n\n\n@register_model(\"resnext152_32x4d\")\nclass ResNeXt152(_ResNeXt):\n def __init__(self, **kwargs):\n super().__init__(\n num_blocks=[3, 8, 36, 3],\n basic_layer=False,\n zero_init_bn_residuals=True,\n base_width_and_cardinality=(4, 32),\n bn_weight_decay=True,\n **kwargs,\n )\n"
] | [
[
"torch.nn.MaxPool2d",
"torch.nn.BatchNorm2d",
"torch.nn.init.kaiming_normal_",
"torch.nn.init.constant_",
"torch.nn.Conv2d",
"torch.nn.Sequential",
"torch.nn.ReLU"
]
] |
herolab-uga/pf-doa-localization | [
"f6d4f3b5bafdde7a9afa905b96378fdc113f70f6"
] | [
"dataset1/DifferentialRSS-Localization-DataSet1.py"
] | [
"import math\nimport numpy as np\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\n\nfrom matplotlib import pyplot as pb\nimport random\nfrom datetime import datetime\nimport time\nimport sys\nimport csv\n\n\ndef dist(x, y, pos):\n return math.sqrt((pos[0]-x)**2 + (pos[1]-y)**2)\n\nareaSize=(4, 4)\nnode_pos = [(0,0),(0,4),(4,0)]\n\nif 'wifi' in sys.argv[1]:\n rss0 = -45.73\n pathloss_exponent = 2.162\nelif 'ble' in sys.argv[1]:\n rss0 = -75.48\n pathloss_exponent = 2.271 \nelif 'zigbee' in sys.argv[1]:\n rss0 = -50.33\n pathloss_exponent = 2.935\n\ndef gen_wifi(freq=2.4, power=20, trans_gain=0, recv_gain=0, size=areaSize, pos=(5,5), shadow_dev=1, n=pathloss_exponent,noise=1):\n if pos is None:\n pos = (random.randrange(size[0]), random.randrange(size[1]))\n\n random.seed(datetime.now())\n\n normal_dist = np.random.normal(0, shadow_dev, size=[size[0]+1, size[1]+1])\n rss = []\n\n random.seed(datetime.now())\n\n for x in range(0,3):\n distance = dist(node_pos[x][0], node_pos[x][1], pos)\n val =rss0 - 10 * n * math.log10(distance) + normal_dist[int(pos[0])][int(pos[1])]\n rss.append(val-noise*random.random())\n\n return rss\n\n\nwith open(sys.argv[1]) as f:\n dict_from_csv = [{k: v for k, v in row.items()}\n for row in csv.DictReader(f, skipinitialspace=True)]\n\noverall_rss=[]\noriginal_tragectory=[]\n\nfor i in range(len(dict_from_csv)):\n dict=dict_from_csv[i]\n x , y = float(dict['x']) , float(dict['y'])\n original_tragectory.append((x,y))\n random.seed(datetime.now())\n rss = [-int(float(dict['RSSI A']))-random.random(),-int(float(dict['RSSI B']))-random.random() ,-int(float(dict['RSSI C']))-random.random()]\n overall_rss.append(rss)\n\n\n\nrandom.seed(datetime.now())\nprevious_errors =[]\ndistance_error =[]\nparticles = []\n\nstart_time = time.time()\nfor x in np.arange(0.1,areaSize[0]-1,0.2):\n for y in np.arange(0.1,areaSize[1]-1,0.2):\n particles.append((x,y))\n\nfor i in range(0,len(original_tragectory)):\n positions =[]\n errors=[]\n error=0\n for particle in particles:\n x,y=particle[0],particle[1]\n actual_rss = gen_wifi(pos=(x,y),noise=0)\n error=np.sum(np.subtract(actual_rss,overall_rss[i]))\n \n positions.append((x,y))\n errors.append(error)\n\n\n min_error = min(errors)\n min_index = errors.index(min_error)\n pos=positions[min_index]\n previous_errors.append(errors[min_index])\n distance_error.append(dist(pos[0],pos[1],original_tragectory[i]))\n\n\n\nprint(\"--- Computation Time: %s seconds ---\" % (time.time() - start_time))\ndistcumulativeEror=np.sum(distance_error)\ndistmeanError=np.average(distance_error)\ndistStandardDeviationError=np.std(distance_error)\nprint(\"DIST_ERROR: Cummulative Error: \" + str(distcumulativeEror)+\"\\tMean Error: \"+str(distmeanError)+\"\\tStandard Deviation: \"+str(distStandardDeviationError))\n\n\n"
] | [
[
"numpy.sum",
"numpy.subtract",
"numpy.arange",
"numpy.random.normal",
"numpy.std",
"numpy.average"
]
] |
MobleyLab/SAMPL6 | [
"c661d3985af7fa0ba8c64a1774cfb2363cd31bda"
] | [
"host_guest/Analysis/Scripts/pkganalysis/efficiency.py"
] | [
"#!/usr/bin/env python\n\n\"\"\"Functions for the efficiency statistical analysis.\"\"\"\n\n\n# =============================================================================\n# GLOBAL IMPORTS\n# =============================================================================\n\nimport copy\n\nimport numpy as np\nimport scipy as sp\nimport arch.bootstrap\n\n\n# =============================================================================\n# UTILITY FUNCTIONS\n# =============================================================================\n\ndef discard_initial_zeros(free_energy_A, free_energy_B):\n \"\"\"Discard the initial data points for which there's no data.\n\n Some submissions don't have estimates for the first part of\n the trajectory. This function removes the initial data points\n with mean free energy 0.0.\n\n Parameters\n ----------\n free_energy_A : numpy.ndarray\n free_energy_A[r][c] is the r-th replicate of the free energy estimate\n computed by method A at the c-th computational cost.\n free_energy_B : numpy.ndarray\n free_energy_B[r][c] is the r-th replicate of the free energy estimate\n computed by method B at the c-th computational cost.\n\n Returns\n -------\n free_energy_A : numpy.ndarray\n free_energy_B : numpy.ndarray\n A copy of the free energy trajectories after discarding the\n initial computational costs without data.\n\n \"\"\"\n # Compute the mean free energies of the two methods.\n mean_c_A = free_energy_A.mean(axis=0)\n mean_c_B = free_energy_B.mean(axis=0)\n\n # Find the first indices for which both methods\n # have a non-zero free energy estimate.\n first_nonzero_idx_A = np.nonzero(mean_c_A)[0][0]\n first_nonzero_idx_B = np.nonzero(mean_c_B)[0][0]\n first_nonzero_idx = max(first_nonzero_idx_A, first_nonzero_idx_B)\n\n # Discard the initial computational costs.\n free_energy_A = copy.deepcopy(free_energy_A[:,first_nonzero_idx:])\n free_energy_B = copy.deepcopy(free_energy_B[:,first_nonzero_idx:])\n\n return free_energy_A, free_energy_B\n\n\n# =============================================================================\n# MAIN CLASS\n# =============================================================================\n\nclass EfficiencyAnalysis:\n \"\"\"Utility class for the calculation of relative efficiency statistics.\n\n Parameters\n ----------\n free_energy_A : numpy.ndarray\n free_energy_A[r][c] is the r-th replicate of the free energy estimate\n computed by method A at the c-th computational cost.\n free_energy_B : numpy.ndarray\n free_energy_B[r][c] is the r-th replicate of the free energy estimate\n computed by method B at the c-th computational cost.\n asymptotic_free_energy_A : float, optional\n If given, this will be used as the asymptotic free energy of\n method A to compute the bias. Otherwise, this is estimated\n as free_energy_A.mean(0)[-1].\n asymptotic_free_energy_B : float, optional\n If given, this will be used as the asymptotic free energy of\n method B to compute the bias. Otherwise, this is estimated\n as free_energy_B.mean(0)[-1].\n model : 'normal' or None\n If 'normal', the distribution of free_energy_X[:,c] at a fixed\n computational cost will be assumed to be normal, and parametric\n bootstrapping will be used. This has the disadvantage of ignoring\n eventual correlations between consecutive data points of the\n free energy trajectories, but it may model the tails of the\n distribution better when the number of independent trajectories\n is low.\n\n \"\"\"\n\n def __init__(\n self, free_energy_A, free_energy_B,\n asymptotic_free_energy_A=None,\n asymptotic_free_energy_B=None,\n model=None,\n ):\n # Check that all means and stds have the same number of data points.\n shapes = {x.shape for x in [free_energy_A, free_energy_B]}\n if len(shapes) != 1:\n raise ValueError('free_energy_A and free_energy_B must have the same shape')\n\n if model not in {None, self._NORMAL_MODEL}:\n raise ValueError('model must be None or {}'.format(self._NORMAL_MODEL))\n\n self._free_energy_A = free_energy_A\n self._free_energy_B = free_energy_B\n self._asymptotic_free_energy_A = asymptotic_free_energy_A\n self._asymptotic_free_energy_B = asymptotic_free_energy_B\n self._model = model\n\n @property\n def n_replicates_A(self):\n \"\"\"The number of replicate free energy trajectories for method A.\"\"\"\n return self._free_energy_A.shape[0]\n\n @property\n def n_replicates_B(self):\n \"\"\"The number of replicate free energy trajectories for method B.\"\"\"\n return self._free_energy_B.shape[0]\n\n @property\n def asymptotic_free_energy_A(self):\n \"\"\"The asymptotic free energy for method A.\"\"\"\n return _estimate_asymptotic_free_energy(\n self._free_energy_A, self._asymptotic_free_energy_A)\n\n @property\n def asymptotic_free_energy_B(self):\n \"\"\"The asymptotic free energy for method B.\"\"\"\n return _estimate_asymptotic_free_energy(\n self._free_energy_B, self._asymptotic_free_energy_B)\n\n @property\n def mean_c_A(self):\n \"\"\"mean_c_A[c] is the mean of the free energy estimates computed\n by method A at the c-th computational cost.\"\"\"\n return self._free_energy_A.mean(axis=0)\n\n @property\n def mean_c_B(self):\n \"\"\"mean_c_B[c] is the mean of the free energy estimates computed\n by method B at the c-th computational cost.\"\"\"\n return self._free_energy_B.mean(axis=0)\n\n @property\n def std_c_A(self):\n \"\"\"std_c_A[c] is the standard deviation of the free energy estimate\n computed by method A at the c-th computational cost.\"\"\"\n if self._model == self._NORMAL_MODEL:\n return _normal_unbiased_std(self._free_energy_A)\n return self._free_energy_A.std(axis=0, ddof=1)\n\n @property\n def std_c_B(self):\n \"\"\"std_c_B[c] is the standard deviation of the free energy estimate\n computed by method B at the c-th computational cost.\"\"\"\n if self._model == self._NORMAL_MODEL:\n return _normal_unbiased_std(self._free_energy_B)\n return self._free_energy_B.std(axis=0, ddof=1)\n\n @property\n def var_c_A(self):\n \"\"\"var_c_A[c] is the variance of the free energy estimate\n computed by method A at the c-th computational cost.\"\"\"\n return self._free_energy_A.var(axis=0, ddof=1)\n\n @property\n def var_c_B(self):\n \"\"\"var_c_B[c] is the variance of the free energy estimate\n computed by method B at the c-th computational cost.\"\"\"\n return self._free_energy_B.var(axis=0, ddof=1)\n\n @property\n def bias_c_A(self):\n \"\"\"bias_c_A[c] is the bias of the free energy estimates computed\n by method A at the c-th computational cost.\"\"\"\n return _bias(self.mean_c_A, self.asymptotic_free_energy_A)\n\n @property\n def bias_c_B(self):\n \"\"\"bias_c_B[c] is the bias of the free energy estimates computed\n by method B at the c-th computational cost.\"\"\"\n return _bias(self.mean_c_B, self.asymptotic_free_energy_B)\n\n def compute_std_relative_efficiency(\n self,\n confidence_interval=None,\n n_bootstrap_samples=10000,\n ):\n std_relative_efficiency = _std_relative_efficiency(\n self._free_energy_A, self._free_energy_B)\n\n if self._model == self._NORMAL_MODEL:\n bootstrap_func = _generate_normal_std_rel_eff_sample_arch\n sampling = 'parametric'\n else:\n bootstrap_func = _std_relative_efficiency\n sampling = 'nonparametric'\n\n if confidence_interval is not None:\n ci = self._compute_rel_eff_ci(\n bootstrap_func, sampling, confidence_interval,\n n_bootstrap_samples, include_asymptotic=False\n )\n return std_relative_efficiency, ci\n return std_relative_efficiency\n\n def compute_abs_bias_relative_efficiency(\n self,\n confidence_interval=None,\n n_bootstrap_samples=10000,\n ):\n abs_bias_relative_efficiency = _abs_bias_relative_efficiency(\n self._free_energy_A, self._free_energy_B,\n self.asymptotic_free_energy_A,\n self.asymptotic_free_energy_B\n )\n\n if self._model == self._NORMAL_MODEL:\n bootstrap_func = _generate_normal_abs_bias_rel_eff_sample_arch\n sampling = 'parametric'\n else:\n bootstrap_func = _abs_bias_relative_efficiency\n sampling = 'nonparametric'\n\n if confidence_interval is not None:\n ci = self._compute_rel_eff_ci(\n bootstrap_func, sampling, confidence_interval,\n n_bootstrap_samples, include_asymptotic=True\n )\n return abs_bias_relative_efficiency, ci\n return abs_bias_relative_efficiency\n\n def compute_rmse_relative_efficiency(\n self,\n confidence_interval=None,\n n_bootstrap_samples=10000,\n ):\n rmse_bias_relative_efficiency = _rmse_relative_efficiency(\n self._free_energy_A, self._free_energy_B,\n self.asymptotic_free_energy_A,\n self.asymptotic_free_energy_B\n )\n\n if self._model == self._NORMAL_MODEL:\n bootstrap_func = _generate_normal_rmse_rel_eff_sample_arch\n sampling = 'parametric'\n else:\n bootstrap_func = _rmse_relative_efficiency\n sampling = 'nonparametric'\n\n if confidence_interval is not None:\n ci = self._compute_rel_eff_ci(\n bootstrap_func, sampling, confidence_interval,\n n_bootstrap_samples, include_asymptotic=True\n )\n return rmse_bias_relative_efficiency, ci\n return rmse_bias_relative_efficiency\n\n def _compute_rel_eff_ci(\n self, arch_func, sampling, confidence_interval,\n n_bootstrap_samples, include_asymptotic\n ):\n \"\"\"Shortcut to compute a CI with arch.bootstrap.\"\"\"\n bs = _IIDBootstrapNotEqual(self._free_energy_A, self._free_energy_B)\n\n # Configure extra keyword arguments for arch_func.\n kwargs = {}\n if self._model == self._NORMAL_MODEL:\n kwargs['params_cache'] = {}\n if include_asymptotic:\n kwargs['asymptotic_free_energy_A'] = self.asymptotic_free_energy_A\n kwargs['asymptotic_free_energy_B'] = self.asymptotic_free_energy_B\n\n if len(kwargs) == 0:\n kwargs = None\n\n ci = bs.conf_int(\n arch_func, reps=n_bootstrap_samples, method='bca',\n size=confidence_interval, sampling=sampling,\n extra_kwargs=kwargs\n )\n # Convert shape from (2,1) to (2,)\n assert ci.shape == (2, 1)\n return np.array([ci[0][0], ci[1][0]])\n\n # Class constants\n _NORMAL_MODEL = 'normal'\n\n\n# =============================================================================\n# Basic statistics utilities\n# =============================================================================\n\ndef _normal_unbiased_std(data):\n \"\"\"Return the unbiased estimate of the standard deviation assuming normal distribution.\n\n The sqrt of the Bessel-corrected variance is a biased estimate of\n the std. If the underlying data has a normal distribution, we have\n an analytical expression for the unbiased estimate.\n\n Parameters\n ----------\n data : np.ndarray\n data[i][j] is the i-th replicate of the j-th measurement (e.g.,\n the i-th replicate of the free energy trajectory at the j-th\n computational cost).\n\n Returns\n -------\n unbiased_std : np.ndarray\n unbiased_std[j] is the unbiased estimate of the standard\n deviation for the j-th measurement.\n\n See Also\n --------\n http://web.eecs.umich.edu/~fessler/papers/files/tr/stderr.pdf.\n\n \"\"\"\n n_replicates = data.shape[0]\n\n bessel_std = np.std(data, ddof=1, axis=0)\n\n # Compute the factor used to unbias the Bessel-corrected sample std\n # Use the log gamma function for numerical stability.\n loggamma1 = sp.special.gammaln((n_replicates-1)/2)\n loggamma2 = sp.special.gammaln(n_replicates/2)\n k_n = np.sqrt((n_replicates-1)/2) * np.exp(loggamma1 - loggamma2)\n\n return k_n * bessel_std\n\n\ndef _bias(mean_c, asymptotic_free_energy=None):\n \"\"\"Compute the bias from the mean.\"\"\"\n asymptotic_free_energy = _estimate_asymptotic_free_energy(\n mean_c, asymptotic_free_energy)\n return mean_c - asymptotic_free_energy\n\n\ndef _estimate_asymptotic_free_energy(data, asymptotic_free_energy=None):\n \"\"\"Shortcut to estimate the asymptotic free energy as mean_DG[-1] if asymptotic_free_energy is None.\n\n Parameters\n ----------\n data : numpy.ndarray\n If 2D this is handled as the free energy trajectories. If 1D\n this is considered the mean free energy.\n\n \"\"\"\n if asymptotic_free_energy is None:\n if len(data.shape) == 2:\n data = data.mean(axis=0)\n return data[-1]\n return asymptotic_free_energy\n\n\n# =============================================================================\n# Relative efficiency definitions\n# =============================================================================\n\ndef _relative_efficiency(stats_A, stats_B):\n \"\"\"Encapsulate the definition of relative efficiency.\n\n Parameters\n ----------\n stats_A : np.ndarray\n stats_B : np.ndarray\n If 1D, stats_A[c] is the statistic at the c-th computational cost.\n If 2D, stats_A[r][c] is the r-th bootstrap sample of the statistic\n at the c-th computational cost.\n\n \"\"\"\n assert stats_A.shape == stats_B.shape\n # Check if this is a bootstrapped version or not.\n if len(stats_A.shape) == 1:\n axis = None # sum\n axis = -1 # trapz\n else:\n axis = 1\n\n # sum_A = np.sum(stats_A, axis=axis)\n # sum_B = np.sum(stats_B, axis=axis)\n sum_A = sp.integrate.trapz(stats_A, axis=axis)\n sum_B = sp.integrate.trapz(stats_B, axis=axis)\n return np.log10(sum_A / sum_B)\n\n\ndef _std_relative_efficiency(free_energy_A, free_energy_B, model=None):\n \"\"\"Shortcut to compute the standard deviation relative efficiency from the free energy trajectories.\"\"\"\n if model == 'normal':\n std_c_A = _normal_unbiased_std(free_energy_A)\n std_c_B = _normal_unbiased_std(free_energy_B)\n else:\n assert model is None\n std_c_A = np.std(free_energy_A, axis=0, ddof=1)\n std_c_B = np.std(free_energy_B, axis=0, ddof=1)\n return _relative_efficiency(std_c_A, std_c_B)\n\n\ndef _abs_bias_relative_efficiency_from_params(\n mean_c_A, mean_c_B,\n asymptotic_free_energy_A=None,\n asymptotic_free_energy_B=None\n):\n \"\"\"Shortcut to compute the absolute bias relative efficiency from mean and asymptotic free energy.\"\"\"\n bias_c_A = _bias(mean_c_A, asymptotic_free_energy_A)\n bias_c_B = _bias(mean_c_B, asymptotic_free_energy_B)\n return _relative_efficiency(np.abs(bias_c_A), np.abs(bias_c_B))\n\n\ndef _abs_bias_relative_efficiency(\n free_energy_A, free_energy_B,\n asymptotic_free_energy_A=None,\n asymptotic_free_energy_B=None\n):\n \"\"\"Shortcut to compute the absolute bias relative efficiency from the free energy trajectories.\"\"\"\n mean_c_A = free_energy_A.mean(axis=0)\n mean_c_B = free_energy_B.mean(axis=0)\n return _abs_bias_relative_efficiency_from_params(\n mean_c_A, mean_c_B, asymptotic_free_energy_A, asymptotic_free_energy_B)\n\n\ndef _rmse_relative_efficiency_from_params(\n mean_c_A, mean_c_B, var_c_A, var_c_B,\n asymptotic_free_energy_A=None,\n asymptotic_free_energy_B=None\n):\n \"\"\"Shortcut to compute the RMSE relative efficiency from mean, var, and asymptotic free energy.\"\"\"\n bias_c_A = _bias(mean_c_A, asymptotic_free_energy_A)\n bias_c_B = _bias(mean_c_B, asymptotic_free_energy_B)\n rmse_c_A = np.sqrt(var_c_A + bias_c_A**2)\n rmse_c_B = np.sqrt(var_c_B + bias_c_B**2)\n return _relative_efficiency(rmse_c_A, rmse_c_B)\n\n\ndef _rmse_relative_efficiency(\n free_energy_A, free_energy_B,\n asymptotic_free_energy_A=None,\n asymptotic_free_energy_B=None\n):\n \"\"\"Shortcut to compute the RMSE relative efficiency from the free energy trajectories.\"\"\"\n asymptotic_free_energy_A = _estimate_asymptotic_free_energy(\n free_energy_A, asymptotic_free_energy_A)\n asymptotic_free_energy_B = _estimate_asymptotic_free_energy(\n free_energy_B, asymptotic_free_energy_B)\n rmse_c_A = np.sqrt(np.mean((free_energy_A - asymptotic_free_energy_A)**2, axis=0))\n rmse_c_B = np.sqrt(np.mean((free_energy_B - asymptotic_free_energy_B)**2, axis=0))\n return _relative_efficiency(rmse_c_A, rmse_c_B)\n\n\n# =============================================================================\n# Parametric bootstrap sampling with normal statistics\n# =============================================================================\n\ndef _generate_normal_std_sample(\n std_c, n_replicates,\n n_bootstrap_samples=None\n):\n \"\"\"Generate a bootstrap sample for the standard deviation assuming the\n data is normally-distributed.\n\n In this function, free_energy[:, c] at a fixed computational cost is\n assumed to be normally distributed. Under this assumption the standard\n deviation is chi distributed with n_replicates-1 degrees of freedom,\n which allows us to generate a bootstrap sample for the standard\n deviation as a function of the computational cost from a parametric\n distribution.\n\n Parameters\n ----------\n std_c : numpy.ndarray\n std_c[c] is the standard deviation of the free energy estimate\n at the c-th computational cost.\n n_replicates : int\n The number of replicates used to compute the standard deviations.\n n_bootstrap_samples : int or None, optional\n If not None, multiple bootstrap samples are generated.\n\n Returns\n -------\n bootstrap_std : np.ndarray\n If n_bootstrap_samples is None, bootstrap_std[c] is the bootstrap\n sample of the standard deviation at the c-th computational cost.\n Otherwise, bootstrap_std[r][c] is the r-th bootstrap sample of the\n standard deviation at the c-th computational cost.\n\n \"\"\"\n # Sample from the chi distribution with the correct scale.\n df = n_replicates - 1\n n_costs = len(std_c)\n if n_bootstrap_samples is None:\n size = n_costs\n else:\n size = (n_bootstrap_samples, n_costs)\n return sp.stats.chi.rvs(df=df, scale=std_c/np.sqrt(df), size=size)\n\n\ndef _generate_normal_std_rel_eff_sample(\n std_c_A, std_c_B, n_replicates,\n n_bootstrap_samples=None\n):\n \"\"\"Generate a bootstrap sample for the standard deviation relative efficiency\n assuming the data is normally-distributed.\n\n See Also\n --------\n _generate_normal_std_sample\n\n Parameters\n ----------\n std_c_A : numpy.ndarray\n std_c_A[c] is the standard deviation of the free energy estimate\n computed by method A at the c-th computational cost.\n std_c_A : numpy.ndarray\n std_c_B[c] is the standard deviation of the free energy estimate\n computed by method B at the c-th computational cost.\n n_replicates : int\n The number of replicates used to compute the standard deviations.\n n_bootstrap_samples : int or None, optional\n If not None, multiple bootstrap samples are generated.\n\n Returns\n -------\n bootstrap_std_rel_eff : np.ndarray\n If n_bootstrap_samples is None, bootstrap_std_rel_eff[c] is the\n bootstrap sample of the standard deviation relative efficiency at\n the c-th computational cost. Otherwise, bootstrap_std_rel_eff[r][c]\n is the r-th bootstrap sample of the standard deviation relative\n efficiency at the c-th computational cost.\n\n \"\"\"\n bootstrap_std_c_A = _generate_normal_std_sample(std_c_A, n_replicates, n_bootstrap_samples)\n bootstrap_std_c_B = _generate_normal_std_sample(std_c_B, n_replicates, n_bootstrap_samples)\n # Compute the relative efficiency.\n return _relative_efficiency(bootstrap_std_c_A, bootstrap_std_c_B)\n\n\ndef _generate_normal_abs_bias_sample(\n mean_c, std_c, n_replicates,\n asymptotic_free_energy=None,\n n_bootstrap_samples=None\n):\n \"\"\"Generate a bootstrap sample for the absolute bias assuming the\n data to be normally-distributed.\n\n In this function, free_energy[:, c] at a fixed computational cost is\n assumed to be normally distributed. Under this assumption the sample\n mean is t-distributed with n_replicates-1 degrees of freedom, which\n allows us to generate a bootstrap sample for the absolute bias as a\n function of the computational cost from a parametric distribution.\n\n Parameters\n ----------\n mean_c : numpy.ndarray\n mean_c[c] is the mean of the free energy estimates computed\n at the c-th computational cost.\n std_c : numpy.ndarray\n std_c[c] is the standard deviation of the free energy estimate\n computed at the c-th computational cost.\n n_replicates : int\n The number of replicates used to compute the means and standard\n deviations.\n asymptotic_free_energy : float, optional\n If given, this will be used as the asymptotic free energy of\n to compute the bias. Otherwise, this is estimated as mean_c[-1].\n n_bootstrap_samples : int or None, optional\n If not None, multiple bootstrap samples are generated.\n\n Returns\n -------\n bootstrap_abs_bias : np.ndarray\n If n_bootstrap_samples is None, bootstrap_abs_bias[c] is the bootstrap\n sample of the absolute bias at the c-th computational cost. Otherwise,\n bootstrap_abs_bias[r][c] is the r-th bootstrap sample of the absolute\n bias at the c-th computational cost.\n\n \"\"\"\n # Sample mean the t distribution with the correct scale and mean.\n df = n_replicates - 1\n bias_c = _bias(mean_c, asymptotic_free_energy)\n n_costs = len(std_c)\n if n_bootstrap_samples is None:\n size = n_costs\n else:\n size = (n_bootstrap_samples, n_costs)\n bootstrap_abs_bias_c = sp.stats.t.rvs(df=df, scale=std_c/np.sqrt(n_replicates), size=size) + bias_c\n return np.abs(bootstrap_abs_bias_c)\n\n\ndef _generate_normal_abs_bias_rel_eff_sample(\n mean_c_A, mean_c_B, std_c_A, std_c_B, n_replicates,\n asymptotic_free_energy_A=None, asymptotic_free_energy_B=None,\n n_bootstrap_samples=None\n):\n \"\"\"Generate a bootstrap sample for the absolute bias relative efficiency\n assuming the data to be normally distributed.\n\n See Also\n --------\n _generate_normal_abs_bias_sample\n\n Parameters\n ----------\n mean_c_A : numpy.ndarray\n mean_c_A[c] is the mean of the free energy estimates computed\n by method A at the c-th computational cost.\n mean_c_B : numpy.ndarray\n mean_c_B[c] is the mean of the free energy estimates computed\n by method B at the c-th computational cost.\n std_c_A : numpy.ndarray\n std_c_A[c] is the standard deviation of the free energy estimate\n computed by method A at the c-th computational cost.\n std_c_B : numpy.ndarray\n std_c_B[c] is the standard deviation of the free energy estimate\n computed by method B at the c-th computational cost.\n n_replicates : int\n The number of replicates used to compute the means and standard\n deviations.\n asymptotic_free_energy_A : float, optional\n If given, this will be used as the asymptotic free energy of\n method A to compute the bias. Otherwise, this is estimated\n as mean_c_A[-1].\n asymptotic_free_energy_B : float, optional\n If given, this will be used as the asymptotic free energy of\n method B to compute the bias. Otherwise, this is estimated\n as mean_c_B[-1].\n n_bootstrap_samples : int or None, optional\n If not None, multiple bootstrap samples are generated.\n\n Returns\n -------\n bootstrap_abs_bias_rel_eff : np.ndarray\n If n_bootstrap_samples is None, bootstrap_abs_bias_rel_eff[c] is\n the bootstrap sample of the absolute bias relative efficiency at the\n c-th computational cost. Otherwise, bootstrap_abs_bias_rel_eff[r][c]\n is the r-th bootstrap sample of the absolute bias relative efficiency\n at the c-th computational cost.\n\n \"\"\"\n bootstrap_bias_c_A = _generate_normal_abs_bias_sample(\n mean_c_A, std_c_A, n_replicates,\n asymptotic_free_energy=asymptotic_free_energy_A,\n n_bootstrap_samples=n_bootstrap_samples\n )\n bootstrap_bias_c_B = _generate_normal_abs_bias_sample(\n mean_c_B, std_c_B, n_replicates,\n asymptotic_free_energy=asymptotic_free_energy_B,\n n_bootstrap_samples=n_bootstrap_samples\n )\n # Compute the relative efficiency.\n return _relative_efficiency(bootstrap_bias_c_A, bootstrap_bias_c_B)\n\n\ndef _generate_normal_rmse_sample(\n mean_c, std_c, n_replicates,\n asymptotic_free_energy=None,\n n_bootstrap_samples=None\n):\n \"\"\"Generate a bootstrap sample for the RMSE assuming the data to be\n normally distributed.\n\n In this function, free_energy[:, c] at a fixed computational cost is\n assumed to be normally distributed. Under this assumption the sample\n mean is t-distributed and the standard deviation is chi-distributed\n with n_replicates-1 degrees of freedom, which allows us to generate\n a bootstrap sample for the absolute bias as a function of the computational\n cost from a parametric distribution.\n\n Parameters\n ----------\n mean_c : numpy.ndarray\n mean_c[c] is the mean of the free energy estimates computed\n at the c-th computational cost.\n std_c : numpy.ndarray\n std_c[c] is the standard deviation of the free energy estimate\n computed at the c-th computational cost.\n n_replicates : int\n The number of replicates used to compute the means and standard\n deviations.\n asymptotic_free_energy : float, optional\n If given, this will be used as the asymptotic free energy of\n to compute the bias. Otherwise, this is estimated as mean_c[-1].\n n_bootstrap_samples : int or None, optional\n If not None, multiple bootstrap samples are generated.\n\n Returns\n -------\n bootstrap_rmse : np.ndarray\n If n_bootstrap_samples is None, bootstrap_rmse[c] is the bootstrap\n sample of the RMSE at the c-th computational cost. Otherwise,\n bootstrap_rmse[r][c] is the r-th bootstrap sample of the RMSE at\n the c-th computational cost.\n\n \"\"\"\n bootstrap_std_c = _generate_normal_std_sample(\n std_c, n_replicates, n_bootstrap_samples)\n bootstrap_abs_bias_c = _generate_normal_abs_bias_sample(\n mean_c, std_c, n_replicates, asymptotic_free_energy, n_bootstrap_samples)\n return np.sqrt(bootstrap_std_c**2 + bootstrap_abs_bias_c**2)\n\n\ndef _generate_normal_rmse_rel_eff_sample(\n mean_c_A, mean_c_B, std_c_A, std_c_B, n_replicates,\n asymptotic_free_energy_A=None, asymptotic_free_energy_B=None,\n n_bootstrap_samples=None\n):\n \"\"\"Generate a bootstrap sample for the RMSE relative efficiency\n assuming the data to be normally distributed.\n\n See Also\n --------\n _generate_normal_rmse_sample\n\n Parameters\n ----------\n mean_c_A : numpy.ndarray\n mean_c_A[c] is the mean of the free energy estimates computed\n by method A at the c-th computational cost.\n mean_c_B : numpy.ndarray\n mean_c_B[c] is the mean of the free energy estimates computed\n by method B at the c-th computational cost.\n std_c_A : numpy.ndarray\n std_c_A[c] is the standard deviation of the free energy estimate\n computed by method A at the c-th computational cost.\n std_c_A : numpy.ndarray\n std_c_B[c] is the standard deviation of the free energy estimate\n computed by method B at the c-th computational cost.\n n_replicates : int\n The number of replicates used to compute the means and standard\n deviations.\n asymptotic_free_energy_A : float, optional\n If given, this will be used as the asymptotic free energy of\n method A to compute the bias. Otherwise, this is estimated\n as mean_c_A[-1].\n asymptotic_free_energy_B : float, optional\n If given, this will be used as the asymptotic free energy of\n method B to compute the bias. Otherwise, this is estimated\n as mean_c_B[-1].\n n_bootstrap_samples : int or None, optional\n If not None, multiple bootstrap samples are generated.\n\n Returns\n -------\n bootstrap_rmse_rel_eff : np.ndarray\n If n_bootstrap_samples is None, bootstrap_rmse_rel_eff[c] is the\n bootstrap sample of the RMSE relative efficiency at the c-th\n computational cost. Otherwise, bootstrap_rmse_rel_eff[r][c] is\n the r-th bootstrap sample of the RMSE relative efficiency at\n the c-th computational cost.\n\n \"\"\"\n # Generate samples for the std and bias.\n bootstrap_rmse_c_A = _generate_normal_rmse_sample(\n mean_c_A, std_c_A, n_replicates, asymptotic_free_energy_A, n_bootstrap_samples)\n bootstrap_rmse_c_B = _generate_normal_rmse_sample(\n mean_c_B, std_c_B, n_replicates, asymptotic_free_energy_B, n_bootstrap_samples)\n return _relative_efficiency(bootstrap_rmse_c_A, bootstrap_rmse_c_B)\n\n\n# =============================================================================\n# Wrappers of bootstrap sampling functions for arch.bootstrap.IIDBootstrap\n# =============================================================================\n\nclass _IIDBootstrapNotEqual(arch.bootstrap.IIDBootstrap):\n \"\"\"A bootstrap facility class that avoid generating bootstrap sampling\n concentrating all the distribution on a single data point.\"\"\"\n\n def bootstrap(self, reps):\n for _ in range(reps):\n indices = np.asarray(self.update_indices())\n # Regenerate indices until there is at least one that is different\n # to avoid generating trajectories with std == 0.0.\n while np.allclose(indices, indices[1]):\n indices = np.asarray(self.update_indices())\n self._index = indices\n yield self._resample()\n\n\ndef _cache_params_arch(\n free_energy_A, free_energy_B, params_cache, compute_mean=True,\n):\n \"\"\"Utility function to handle the cache of the free energy mean and std.\"\"\"\n if params_cache is None:\n # Generate parameters just for this call.\n params_cache = {}\n\n if isinstance(params_cache, dict) and len(params_cache) == 0:\n # Cache number of replicates.\n assert free_energy_A.shape == free_energy_B.shape\n params_cache['n_replicates'] = free_energy_A.shape[0]\n\n for i, free_energy in enumerate([free_energy_A, free_energy_B]):\n suffix = 'A' if i == 0 else 'B'\n\n # Cache standard deviation.\n params_cache['std_c_' + suffix] = _normal_unbiased_std(free_energy)\n # Cache mean bias if requested.\n if compute_mean:\n params_cache['mean_c_' + suffix] = free_energy.mean(axis=0)\n\n return params_cache\n\n\ndef _generate_normal_std_rel_eff_sample_arch(\n free_energy_A, free_energy_B,\n params=None, state=None,\n params_cache=None\n):\n \"\"\"Wraps around _generate_normal_std_rel_eff_sample for use with arch.bootstrap.\"\"\"\n params_cache = _cache_params_arch(free_energy_A, free_energy_B,\n params_cache, compute_mean=False)\n if params is None:\n return _relative_efficiency(params_cache['std_c_A'], params_cache['std_c_B'])\n return _generate_normal_std_rel_eff_sample(**params_cache)\n\n\ndef _generate_normal_abs_bias_rel_eff_sample_arch(\n free_energy_A, free_energy_B, params=None, state=None,\n asymptotic_free_energy_A=None, asymptotic_free_energy_B=None,\n params_cache=None\n):\n \"\"\"Wraps around _generate_normal_abs_bias_rel_eff_sample for use with arch.bootstrap.\"\"\"\n params_cache = _cache_params_arch(free_energy_A, free_energy_B,\n params_cache, compute_mean=True)\n\n if params is None:\n return _abs_bias_relative_efficiency_from_params(\n params_cache['mean_c_A'], params_cache['mean_c_B'],\n asymptotic_free_energy_A, asymptotic_free_energy_B\n )\n return _generate_normal_abs_bias_rel_eff_sample(\n asymptotic_free_energy_A=asymptotic_free_energy_A,\n asymptotic_free_energy_B=asymptotic_free_energy_B,\n **params_cache\n )\n\n\ndef _generate_normal_rmse_rel_eff_sample_arch(\n free_energy_A, free_energy_B, params=None, state=None,\n asymptotic_free_energy_A=None, asymptotic_free_energy_B=None,\n params_cache=None\n):\n \"\"\"Wraps around _generate_normal_abs_bias_rel_eff_sample for use with arch.bootstrap.\"\"\"\n params_cache = _cache_params_arch(free_energy_A, free_energy_B,\n params_cache, compute_mean=True)\n if params is None:\n return _rmse_relative_efficiency_from_params(\n params_cache['mean_c_A'], params_cache['mean_c_B'],\n params_cache['std_c_A']**2, params_cache['std_c_B']**2,\n asymptotic_free_energy_A, asymptotic_free_energy_B,\n )\n\n return _generate_normal_rmse_rel_eff_sample(\n asymptotic_free_energy_A=asymptotic_free_energy_A,\n asymptotic_free_energy_B=asymptotic_free_energy_B,\n **params_cache\n )\n"
] | [
[
"numpy.sqrt",
"numpy.allclose",
"scipy.special.gammaln",
"numpy.abs",
"numpy.exp",
"numpy.log10",
"numpy.nonzero",
"scipy.integrate.trapz",
"numpy.std",
"numpy.array",
"numpy.mean"
]
] |
tostenzel/thesis-projects-tostenzel | [
"18a3ee89fcd558ad2a7cfe1020980da75b9d034a"
] | [
"scrypy/jac_estimation_chol.py"
] | [
"\"\"\"\nEstimates covariance matrix for KW94 Dataset 1 with\nSimulated Max. Likelihood.\n\n\"\"\"\nimport os\n\nimport numpy as np\nimport pandas as pd\nimport respy as rp\nfrom estimagic.differentiation.differentiation import jacobian\nfrom estimagic.inference.likelihood_covs import cov_jacobian\nfrom estimagic.optimization.optimize import maximize\n\n\ndef jac_estimation_chol(save=False):\n \"\"\"\n Estimates covariance matrix for KW94 Dataset 1 with Simulated Max. Likelihood.\n The Jacobian matrix is used instead of Hessian because it yields no inversion\n error.\n The parameters contain Cholesky factors instead of SD-Corr-Matrix because\n these factors are unconstrained. Therefore, their distribution can be estimated\n by an unconstrained normal distribution.\n\n Parameters\n ----------\n save : bool\n Indicates wether to save data.\n\n Returns\n -------\n par_estimates_chol_df : DataFrame\n Contains the estimates parameters and the not estimates fixed parameters\n in respy format.\n rand_par_chol_df : DataFrame\n Df containing variable parameters, SDs and lower and upper bound in\n estimagic format. It can be post-processed with surface/topography plot.\n cov_chol_df : DataFrame\n Df containing the covariance matrix.\n corr_chol_df : DataFrame\n DF containing the correlation matrix.\n\n \"\"\"\n # Df is sample of 1000 agents in 40 periods.\n sim_params_sdcorr, options, df = rp.get_example_model(\"kw_94_one\")\n\n # Write params in terms of Cholesky factors instead of SD-Corr-matrix.\n # This transformation holds only true for the parametrization in KW94 Dataset 1.\n # Simply change SD-Corr indices to cholesky indices.\n sim_params_chol = chol_reindex_params(sim_params_sdcorr)\n\n # Estimate parameters.\n # log_like = log_like_obs.mean(). Used for consistency with optimizers.\n # Gives log-likelihood function for mean agent.\n crit_func = rp.get_crit_func(sim_params_chol, options, df, \"log_like\")\n\n # Get constraint for parameter estimation\n constr = rp.get_parameter_constraints(\"kw_94_one\")\n # Kick out constraints for SD-Corr-Matrix. Cholesky factors are unconstrained.\n constr_chol = constr[1:4]\n\n _, par_estimates_chol_df = maximize(\n crit_func,\n sim_params_chol,\n \"scipy_L-BFGS-B\",\n db_options={\"rollover\": 200},\n algo_options={\"maxfun\": 1},\n constraints=constr_chol,\n dashboard=False,\n )\n\n # df will take lower and upper bounds after standard error esitmation\n # so that cols fit topography plot requirements.\n rand_par_chol_df = pd.DataFrame(\n data=par_estimates_chol_df[\"value\"].values[:27],\n index=par_estimates_chol_df[:27].index,\n columns=[\"value\"],\n )\n\n # The rest of this function estimates the variation of the estimates.\n # Log-likelihood function for sample of agents.\n log_like_obs_func = rp.get_crit_func(\n par_estimates_chol_df, options, df, version=\"log_like_obs\"\n )\n\n # Jacobian matrix.\n jacobian_matrix = jacobian(\n log_like_obs_func, par_estimates_chol_df, extrapolation=False\n )\n\n # Drop zero lines to avoid multicollinearity for matrix inversion.\n jacobian_matrix = jacobian_matrix.loc[:, (jacobian_matrix != 0).any(axis=0)]\n\n jacobian_cov_matrix = cov_jacobian(jacobian_matrix.to_numpy())\n\n jacobian_cov_matrix = cov_jacobian(jacobian_matrix.to_numpy())\n\n cov_chol_df = pd.DataFrame(\n data=jacobian_cov_matrix,\n index=par_estimates_chol_df[:27].index,\n columns=par_estimates_chol_df[:27].index,\n )\n\n corr_chol_df = cov_chol_df.copy(deep=True)\n for i in range(0, len(cov_chol_df)):\n for j in range(0, len(cov_chol_df)):\n corr_chol_df.iloc[i, j] = cov_chol_df.iloc[i, j] / (\n np.sqrt(cov_chol_df.iloc[i, i] * cov_chol_df.iloc[j, j])\n )\n\n assert -1 <= corr_chol_df.values.any() <= 1, \"Corrs must be inside [-1,1]\"\n\n # Estimate parameters.\n # log_like = log_like_obs.mean(). Used for consistency with optimizers.\n # Gives log-likelihood function for mean agent.\n crit_func = rp.get_crit_func(par_estimates_chol_df, options, df, \"log_like\")\n\n constr = rp.get_parameter_constraints(\"kw_94_one\")\n # Kick out constraints for SD-Corr-Matrix. Cholesky factors are unconstrained.\n constr_chol = constr[1:4]\n\n # Include upper and lower bounds to par_df for surface/topography plot.\n rand_par_chol_df[\"sd\"] = np.sqrt(np.diag(jacobian_cov_matrix))\n rand_par_chol_df[\"lower\"] = rand_par_chol_df[\"value\"] - 2 * rand_par_chol_df[\"sd\"]\n rand_par_chol_df[\"upper\"] = rand_par_chol_df[\"value\"] + 2 * rand_par_chol_df[\"sd\"]\n\n # Define the script path relative to the jupyter notebook that calls the script.\n abs_dir = os.path.dirname(__file__)\n if save is True:\n # Contains 3 fixed respy parameters.\n par_estimates_chol_df.to_pickle(\n os.path.join(abs_dir, \"input/est_rp_params_chol.pkl\")\n )\n # Contains only flexible parametes. Can be used for surface/topography plot.\n rand_par_chol_df.to_pickle(\n os.path.join(abs_dir, \"input/est_rand_params_chol.pkl\")\n )\n cov_chol_df.to_pickle(os.path.join(abs_dir, \"input/est_cov_chol.pkl\"))\n corr_chol_df.to_pickle(os.path.join(abs_dir, \"input/est_corr_chol.pkl\"))\n else:\n pass\n\n return par_estimates_chol_df, rand_par_chol_df, cov_chol_df, corr_chol_df\n\n\ndef chol_reindex_params(params_sdcorr):\n \"\"\"\n Creates the params Df with Cholesky factors and the right indices for\n respy. This transformation holds only true for the parametrization\n in KW94 Dataset 1.\n Thus, this function simply changes SD-Corr indices to cholesky indices.\n Without the slicing and merging, index ('maximum_exp', 'edu') yields\n an uniqueness error for the second index when (..., 'sd_edu') is set to\n (..., 'edu'). Yet, because we have double_indices the indices ARE unique.\n\n Parameters\n ----------\n params_sdcorr : DataFrame\n Parameters DataFrame in respy format with SD-Corr matrix elements\n\n Returns\n -------\n params_chol : DataFrame\n Parameters DataFrame in respy format with matrix elements from Choleksy\n decomposition of covariance matrix that underlies the SD-Corr matrix.\n\n \"\"\"\n p_chol_slice = params_sdcorr.iloc[17:27, :]\n # Remove unused inherited index levels.\n p_chol_slice.index = p_chol_slice.index.remove_unused_levels()\n # Use the SPECIFIC property of Dataset 1 in KW94 where SD-Corr-Matrix\n # equals Cholesky maxtrix.\n # This mean we just need to, firstly, rename the first index.\n p_chol_slice.index = p_chol_slice.index.set_levels(\n p_chol_slice.index.levels[0].str.replace(\"shocks_sdcorr\", \"shocks_chol\"),\n level=0,\n )\n\n # And secondly we need to convert the second index to respy cholesky format.\n dic = {\"sd\": \"chol\", \"corr\": \"chol\"}\n for i, j in dic.items():\n p_chol_slice.index = p_chol_slice.index.set_levels(\n p_chol_slice.index.levels[1].str.replace(i, j), level=1\n )\n\n # Insert params_chol with index in params_sdcorr by merging slices.\n part_1 = params_sdcorr.iloc[0:17, :]\n part_1.index = part_1.index.remove_unused_levels()\n part_3 = params_sdcorr.iloc[27:31, :]\n part_3.index = part_3.index.remove_unused_levels()\n\n parts = [part_1, p_chol_slice, part_3]\n params_chol = pd.concat(parts)\n\n return params_chol\n\n\nif __name__ == \"__main__\":\n jac_estimation_chol(save=True)\n"
] | [
[
"numpy.sqrt",
"pandas.DataFrame",
"pandas.concat",
"numpy.diag"
]
] |
guoyk1990/Keras-FCN | [
"35afe12e514a3bf5e56bc90e69d5b329ce9ae68d"
] | [
"evaluate.py"
] | [
"import numpy as np\nimport matplotlib.pyplot as plt\nfrom pylab import *\nimport os\nimport sys\nimport time\nimport cv2\nfrom PIL import Image\nfrom keras.preprocessing.image import *\nfrom keras.utils.np_utils import to_categorical\nfrom keras.models import load_model\nimport keras.backend as K\n\nfrom models import *\nfrom inference import inference\n\n\ndef calculate_iou(model_name, nb_classes, res_dir, label_dir, image_list):\n conf_m = zeros((nb_classes, nb_classes), dtype=float)\n total = 0\n # mean_acc = 0.\n for img_num in image_list:\n img_num = img_num.strip('\\n')\n total += 1\n print('#%d: %s' % (total, img_num))\n pred = img_to_array(Image.open('%s/%s.png' % (res_dir, img_num))).astype(int)\n label = img_to_array(Image.open('%s/%s.png' % (label_dir, img_num))).astype(int)\n flat_pred = np.ravel(pred)\n flat_label = np.ravel(label)\n # acc = 0.\n for p, l in zip(flat_pred, flat_label):\n if l == 255:\n continue\n if l < nb_classes and p < nb_classes:\n conf_m[l, p] += 1\n else:\n print('Invalid entry encountered, skipping! Label: ', l,\n ' Prediction: ', p, ' Img_num: ', img_num)\n\n # if l==p:\n # acc+=1\n #acc /= flat_pred.shape[0]\n #mean_acc += acc\n #mean_acc /= total\n #print 'mean acc: %f'%mean_acc\n I = np.diag(conf_m)\n U = np.sum(conf_m, axis=0) + np.sum(conf_m, axis=1) - I\n IOU = I/U\n meanIOU = np.mean(IOU)\n return conf_m, IOU, meanIOU\n\n\ndef evaluate(model_name, weight_file, image_size, nb_classes, batch_size, val_file_path, data_dir, label_dir,\n label_suffix='.png',\n data_suffix='.jpg'):\n current_dir = os.path.dirname(os.path.realpath(__file__))\n save_dir = os.path.join(current_dir, 'Models/'+model_name+'/res/')\n if os.path.exists(save_dir) == False:\n os.mkdir(save_dir)\n fp = open(val_file_path)\n image_list = fp.readlines()\n fp.close()\n\n start_time = time.time()\n inference(model_name, weight_file, image_size, image_list, data_dir, label_dir, return_results=False, save_dir=save_dir,\n label_suffix=label_suffix, data_suffix=data_suffix)\n duration = time.time() - start_time\n print('{}s used to make predictions.\\n'.format(duration))\n\n start_time = time.time()\n conf_m, IOU, meanIOU = calculate_iou(model_name, nb_classes, save_dir, label_dir, image_list)\n print('IOU: ')\n print(IOU)\n print('meanIOU: %f' % meanIOU)\n print('pixel acc: %f' % (np.sum(np.diag(conf_m))/np.sum(conf_m)))\n duration = time.time() - start_time\n print('{}s used to calculate IOU.\\n'.format(duration))\n\nif __name__ == '__main__':\n # model_name = 'Atrous_DenseNet'\n model_name = 'AtrousFCN_Resnet50_16s'\n # model_name = 'DenseNet_FCN'\n weight_file = 'checkpoint_weights.hdf5'\n # weight_file = 'model.hdf5'\n image_size = (512, 512)\n nb_classes = 21\n batch_size = 1\n dataset = 'VOC2012_BERKELEY'\n if dataset == 'VOC2012_BERKELEY':\n # pascal voc + berkeley semantic contours annotations\n train_file_path = os.path.expanduser('~/.keras/datasets/VOC2012/combined_imageset_train.txt') #Data/VOClarge/VOC2012/ImageSets/Segmentation\n # train_file_path = os.path.expanduser('~/.keras/datasets/oneimage/train.txt') #Data/VOClarge/VOC2012/ImageSets/Segmentation\n val_file_path = os.path.expanduser('~/.keras/datasets/VOC2012/combined_imageset_val.txt')\n data_dir = os.path.expanduser('~/.keras/datasets/VOC2012/VOCdevkit/VOC2012/JPEGImages')\n label_dir = os.path.expanduser('~/.keras/datasets/VOC2012/combined_annotations')\n label_suffix = '.png'\n if dataset == 'COCO':\n train_file_path = os.path.expanduser('~/.keras/datasets/VOC2012/VOCdevkit/VOC2012/ImageSets/Segmentation/train.txt') #Data/VOClarge/VOC2012/ImageSets/Segmentation\n # train_file_path = os.path.expanduser('~/.keras/datasets/oneimage/train.txt') #Data/VOClarge/VOC2012/ImageSets/Segmentation\n val_file_path = os.path.expanduser('~/.keras/datasets/VOC2012/VOCdevkit/VOC2012/ImageSets/Segmentation/val.txt')\n data_dir = os.path.expanduser('~/.keras/datasets/VOC2012/VOCdevkit/VOC2012/JPEGImages')\n label_dir = os.path.expanduser('~/.keras/datasets/VOC2012/VOCdevkit/VOC2012/SegmentationClass')\n label_suffix = '.npy'\n evaluate(model_name, weight_file, image_size, nb_classes, batch_size, val_file_path, data_dir, label_dir,\n label_suffix=label_suffix, data_suffix=data_suffix)\n"
] | [
[
"numpy.ravel",
"numpy.sum",
"numpy.mean",
"numpy.diag"
]
] |
Christopher-Bradshaw/emcee | [
"d2047e867afc8a2b0cce9eb3af4a56b3017aaba9"
] | [
"emcee/tests/unit/test_autocorr.py"
] | [
"# -*- coding: utf-8 -*-\n\nfrom __future__ import division, print_function\n\nimport pytest\nimport numpy as np\n\nfrom emcee.autocorr import integrated_time, AutocorrError\n\n\ndef get_chain(seed=1234, ndim=3, N=100000):\n np.random.seed(seed)\n a = 0.9\n x = np.empty((N, ndim))\n x[0] = np.zeros(ndim)\n for i in range(1, N):\n x[i] = x[i-1] * a + np.random.rand(ndim)\n return x\n\n\ndef test_1d(seed=1234, ndim=1, N=250000):\n x = get_chain(seed=seed, ndim=ndim, N=N)\n tau = integrated_time(x)\n assert np.all(np.abs(tau - 19.0) / 19. < 0.2)\n\n\ndef test_nd(seed=1234, ndim=3, N=150000):\n x = get_chain(seed=seed, ndim=ndim, N=N)\n tau = integrated_time(x)\n assert np.all(np.abs(tau - 19.0) / 19. < 0.2)\n\n\ndef test_too_short(seed=1234, ndim=3, N=100):\n x = get_chain(seed=seed, ndim=ndim, N=N)\n with pytest.raises(AutocorrError):\n integrated_time(x)\n tau = integrated_time(x, quiet=True) # NOQA\n\n\ndef test_autocorr_multi_works():\n np.random.seed(42)\n xs = np.random.randn(16384, 2)\n\n # This throws exception unconditionally in buggy impl's\n acls_multi = integrated_time(xs)\n acls_single = np.array([integrated_time(xs[:, i])\n for i in range(xs.shape[1])])\n\n assert np.all(np.abs(acls_multi - acls_single) < 2)\n"
] | [
[
"numpy.empty",
"numpy.zeros",
"numpy.random.randn",
"numpy.random.seed",
"numpy.abs",
"numpy.random.rand"
]
] |
bqia0/CycleGAN | [
"8be914de7f75de91d2c43c4745e4292d138ff591"
] | [
"networks.py"
] | [
"\nimport torch\nimport torch.nn as nn\nimport torchvision\nimport torchvision.datasets as datasets\nimport torchvision.transforms as transforms\nimport utils\nimport os\nfrom PIL import Image\n\n\nclass ResnetGenerator(nn.Module):\n \"\"\" Generator class utilizing resnets\"\"\"\n\n def __init__(self, input_channels, output_channels, ngf=64, normalization='instance', use_dropout=False, num_blocks=9):\n \"\"\"\n Paramters:\n input_channels: number of channels in input image\n output_channels: number of channels in output image\n ngf: number of filters use in convolutions\n normalization: batch or instance\n num_blocks: number of resnet blocks. 9 for 256x256, 6 for lower\n \"\"\"\n super(ResnetGenerator, self).__init__()\n model = []\n \n # Paper states to use instance norm, but we'll give the option of using batch\n if normalization == 'instance':\n norm_layer = nn.InstanceNorm2d\n elif normalization == 'batch':\n norm_layer = nn.BatchNorm2d\n else:\n raise NotImplementedError('ResnetGenerator: Normaliztion [%s] is not implemented' % normalization)\n\n use_bias = False\n if normalization == 'instance':\n use_bias = True\n\n \"\"\" First Convolution Block\"\"\"\n # C7s1-64 block\n model += [nn.ReflectionPad2d(3), \n nn.Conv2d(input_channels, ngf, kernel_size=7, bias=use_bias), \n norm_layer(ngf), \n nn.ReLU(True)]\n\n \"\"\"Downsampling\"\"\"\n # d128 block\n model += [nn.Conv2d(ngf, 128, kernel_size=3, stride=2, padding=1, bias=use_bias),\n norm_layer(128),\n nn.ReLU(True)]\n # d256 block\n model += [nn.Conv2d(128, 256, kernel_size=3, stride=2, padding=1, bias=use_bias),\n norm_layer(256),\n nn.ReLU(True)]\n\n \"\"\"Resnet Blocks\"\"\"\n for _ in range(num_blocks):\n # R256 blocks\n model+=[ResnetBlock(256, norm_layer, use_dropout, use_bias)]\n \n \"\"\"Upsampling\"\"\"\n # U128 block\n model += [nn.ConvTranspose2d(256, 128,\n kernel_size=3, stride=2,\n padding=1, output_padding=1,\n bias=use_bias),\n norm_layer(128),\n nn.ReLU(True)]\n\n # U64 block\n model += [nn.ConvTranspose2d(128, 64,\n kernel_size=3, stride=2,\n padding=1, output_padding=1,\n bias=use_bias),\n norm_layer(64),\n nn.ReLU(True)]\n \n \"\"\"Final Convolution block\"\"\"\n # C7s1-3 block\n model += [nn.ReflectionPad2d(3), \n nn.Conv2d(64, output_channels, kernel_size=7, bias=use_bias)]\n\n # Last block uses tanh instead of ReLU\n model += [nn.Tanh()]\n\n self.model = nn.Sequential(*model)\n \n def forward(self, x):\n return self.model(x)\n\n\nclass ResnetBlock(nn.Module):\n def __init__ (self, num_filters, norm_layer, use_dropout, use_bias):\n super(ResnetBlock, self).__init__()\n self.conv_block = self.build_conv_block(num_filters, norm_layer, use_dropout, use_bias)\n \n def build_conv_block(self, num_filters, norm_layer, use_dropout, use_bias):\n \"\"\"Image should come out the same size\"\"\"\n conv_block = []\n \n # Paper said reflection padding was used to reduce artifacts\n conv_block += [nn.ReflectionPad2d(1)]\n\n # Convolution with kernel size 3, and bias if InstanceNorm is used\n conv_block += [nn.Conv2d(num_filters, num_filters, kernel_size=3, bias=use_bias), \n norm_layer(num_filters), \n nn.ReLU(True)]\n\n if use_dropout:\n conv_block += [nn.Dropout(0.5)]\n\n conv_block += [nn.ReflectionPad2d(1)]\n\n conv_block += [nn.Conv2d(num_filters, num_filters, kernel_size=3, bias=use_bias), \n norm_layer(num_filters)]\n\n return nn.Sequential(*conv_block)\n \n def forward(self, x):\n return x + self.conv_block(x)\n\nclass Discriminator(nn.Module):\n \"\"\"discriminator class trained alongisde generator\"\"\"\n\n def __init__(self, input_channels, ndf=64, n_layers=3, normalization='instance'):\n \"\"\"\n Paramters:\n input_channels: number of channels in input image\n ndf: number of filters use in convolutions\n n_layers: number of conv layers\n normalization: batch or instance\n \"\"\"\n super(Discriminator, self).__init__()\n model = []\n \n # Paper states to use instance norm, but we'll give the option of using batch\n if normalization == 'instance':\n norm_layer = nn.InstanceNorm2d\n elif normalization == 'batch':\n norm_layer = nn.BatchNorm2d\n else:\n raise NotImplementedError('ResnetGenerator: Normaliztion [%s] is not implemented' % normalization)\n\n use_bias = False\n if normalization == 'instance':\n use_bias = True\n \n # C64 block\n model+=[nn.Conv2d(input_channels, ndf, kernel_size=4, stride=2, padding=1), \n nn.LeakyReLU(0.2, True)]\n\n # C128, C256, C512 block\n for i in range(1, n_layers):\n mult_i = 2 ** (i-1)\n mult_o = 2 ** i\n model+=[nn.Conv2d(ndf * mult_i, ndf * mult_o, kernel_size=4, stride=2, padding=1, bias=use_bias),\n norm_layer(ndf * mult_o), \n nn.LeakyReLU(0.2, True)]\n\n mult_i = 2 ** (n_layers-1)\n mult_o = 2 ** n_layers\n model+=[nn.Conv2d(ndf * mult_i, ndf * mult_o, kernel_size=4, stride=1, padding=1, bias=use_bias),\n norm_layer(ndf * mult_o), \n nn.LeakyReLU(0.2, True)]\n \n # Final convolution\n model += [nn.Conv2d(ndf * mult_o, 1, kernel_size=4, stride=1, padding=1)]\n\n self.model = nn.Sequential(*model)\n\n def forward(self, x):\n return self.model(x)\n\nclass CycleGAN(object):\n \"\"\"CycleGAN Class\n Contains both discriminators and generators\n and all optimizers/schedulers\n \"\"\"\n def __init__(self, args):\n # Device\n self.device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n\n # Generator Networks\n self.G_AB = ResnetGenerator(input_channels=3, output_channels=3, ngf=args.ngf, \n normalization=args.norm, use_dropout=not args.no_dropout).to(self.device) # A-> B\n\n self.G_BA = ResnetGenerator(input_channels=3, output_channels=3, ngf=args.ngf, \n normalization=args.norm, use_dropout=not args.no_dropout).to(self.device) # B-> A\n\n # Discriminator Networks\n if args.train:\n self.D_A = Discriminator(input_channels=3, ndf=args.ndf, normalization=args.norm).to(self.device)\n self.D_B = Discriminator(input_channels=3, ndf=args.ndf, normalization=args.norm).to(self.device)\n\n\n # Losses\n self.MSE = nn.MSELoss()\n self.L1 = nn.L1Loss()\n\n # Training items\n self.curr_epoch = 0\n\n self.gen_optimizer = torch.optim.Adam(list(self.G_AB.parameters()) + list(self.G_BA.parameters()), lr=args.lr, betas=(0.5, 0.999))\n self.dis_optimizer = torch.optim.Adam(list(self.D_A.parameters()) + list(self.D_B.parameters()), lr=args.lr, betas=(0.5, 0.999))\n\n self.gen_scheduler = torch.optim.lr_scheduler.LambdaLR(self.gen_optimizer, lr_lambda=utils.LambdaLR(args.epochs, args.decay_epoch).step)\n self.dis_scheduler = torch.optim.lr_scheduler.LambdaLR(self.dis_optimizer, lr_lambda=utils.LambdaLR(args.epochs, args.decay_epoch).step)\n\n # Transforms\n # https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/2c5f2b14a577753b6ce40716e42dc28b21ed775a/data/base_dataset.py#L81\n # and from default base options\n # https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/options/base_options.py\n self.train_transforms = transforms.Compose([\n transforms.Resize(args.load_size, Image.BICUBIC),\n transforms.RandomCrop(args.crop_size),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n ])\n\n self.test_transforms = transforms.Compose([\n transforms.Resize(args.crop_size, Image.BICUBIC),\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n ])\n\n\n def save_checkpoint(self, curr_epoch, args):\n state = {\n 'epoch': curr_epoch,\n 'G_AB': self.G_AB.state_dict(),\n 'G_BA': self.G_BA.state_dict(),\n 'D_A': self.D_A.state_dict(),\n 'D_B': self.D_B.state_dict(),\n 'G_Optimizer': self.gen_optimizer.state_dict(),\n 'D_Optimizer': self.dis_optimizer.state_dict(),\n # 'G_Scheduler': self.gen_scheduler.state_dict(),\n # 'D_Scheduler': self.dis_scheduler.state_dict()\n }\n file_dir = os.path.join(args.checkpoint_dir, args.dataset).replace(\"\\\\\",\"/\")\n if not os.path.exists(file_dir):\n os.makedirs(file_dir)\n torch.save(state, os.path.join(file_dir, 'checkpoint.ckpt').replace(\"\\\\\",\"/\"))\n\n\n def load_checkpoint(self,args):\n file_dir = os.path.join(args.checkpoint_dir, args.dataset, 'checkpoint.ckpt').replace(\"\\\\\",\"/\")\n if os.path.isfile(file_dir):\n print(\"=> loading checkpoint '{}'\".format(file_dir))\n checkpoint = torch.load(file_dir)\n self.G_AB.load_state_dict(checkpoint['G_AB'])\n self.G_BA.load_state_dict(checkpoint['G_BA'])\n\n if args.train:\n self.curr_epoch = checkpoint['epoch']\n self.D_A.load_state_dict(checkpoint['D_A'])\n self.D_B.load_state_dict(checkpoint['D_B'])\n self.gen_optimizer.load_state_dict(checkpoint['G_Optimizer'])\n self.dis_optimizer.load_state_dict(checkpoint['D_Optimizer'])\n # self.gen_scheduler.load_state_dict(checkpoint['G_Scheduler'])\n # self.dis_scheduler.load_state_dict(checkpoint['D_Scheduler'])\n\n print(\"=> loaded checkpoint (epoch {})\".format(checkpoint['epoch']))\n else:\n print(\"=> no checkpoint found at '{}'\".format(file_dir))\n\n\n def get_dataloader(self, args):\n\n if args.train:\n mode = 'train'\n transforms = self.train_transforms\n else:\n mode = 'test'\n transforms = self.test_transforms\n\n data = utils.CycleGANDataset('./datasets/'+args.dataset, transform=transforms, mode=mode)\n\n loader = torch.utils.data.DataLoader(data,\n batch_size=args.batch_size, \n shuffle=True, \n num_workers=1)\n \n return loader\n \n def test(self, args):\n loader = self.get_dataloader(args)\n self.load_checkpoint(args)\n\n self.G_BA.eval()\n self.G_AB.eval()\n\n i = 0\n\n results_dir = os.path.join(args.results_dir, args.dataset).replace(\"\\\\\",\"/\")\n if not os.path.exists(results_dir):\n os.makedirs(results_dir)\n\n for a_real, b_real in loader:\n\n if i == args.test_samples:\n break\n\n a_real = a_real.to(self.device)\n b_real = b_real.to(self.device)\n\n with torch.no_grad():\n a_fake = self.G_BA(b_real)\n b_fake = self.G_AB(a_real)\n a_reconstruct = self.G_BA(b_fake)\n b_reconstruct = self.G_AB(a_fake)\n i+=1\n\n output_image = (torch.cat([a_real, b_fake, a_reconstruct, b_real, a_fake, b_reconstruct], dim=0).data + 1)/ 2.0 # why add 1 then devide?\n torchvision.utils.save_image(output_image, os.path.join(results_dir, 'test_{}.jpg'.format(i)).replace(\"\\\\\",\"/\"), nrow=3)\n\n\n def train(self, args):\n # Obtain dataloaders\n loader = self.get_dataloader(args)\n\n # Generated image pools\n imagepool_a = utils.ImagePool()\n imagepool_b = utils.ImagePool()\n\n lambda_coef = args.lamda\n lambda_idt = args.idt_coef\n\n # Initialize Weights\n utils.init_weights(self.G_BA)\n utils.init_weights(self.G_AB)\n utils.init_weights(self.D_A)\n utils.init_weights(self.D_B)\n\n step = 0\n\n self.load_checkpoint(args)\n\n # Terrible hack\n self.gen_scheduler.last_epoch = self.curr_epoch - 1\n self.dis_scheduler.last_epoch = self.curr_epoch - 1\n \n self.G_BA.train()\n self.G_AB.train()\n\n for epoch in range(self.curr_epoch, args.epochs):\n\n for a_real, b_real in loader:\n # Send data to (ideally) GPU\n a_real = a_real.to(self.device)\n b_real = b_real.to(self.device)\n\n # batch size\n batch_size = a_real.shape[0]\n positive_labels = torch.ones(batch_size).to(self.device)\n negative_labels = torch.zeros(batch_size).to(self.device)\n \n # Generator forward passes\n a_fake = self.G_BA(b_real)\n b_fake = self.G_AB(a_real)\n\n a_reconstruct = self.G_BA(b_fake)\n b_reconstruct = self.G_AB(a_fake)\n\n a_identity = self.G_BA(a_real)\n b_identity = self.G_AB(b_real)\n\n # Identity Loss\n a_idt_loss = self.L1(a_identity, a_real) * lambda_coef * lambda_idt \n b_idt_loss = self.L1(b_identity, b_real) * lambda_coef * lambda_idt \n\n # GAN Loss\n a_fake_dis = self.D_A(a_fake)\n b_fake_dis = self.D_B(b_fake)\n\n positive_labels = torch.ones_like(a_fake_dis)\n\n a_gan_loss = self.MSE(a_fake_dis, positive_labels)\n b_gan_loss = self.MSE(b_fake_dis, positive_labels)\n\n # Cycle Loss\n a_cycle_loss = self.L1(a_reconstruct, a_real) * lambda_coef\n b_cycle_loss = self.L1(b_reconstruct, b_real) * lambda_coef\n\n # Total Loss\n total_gan_loss = a_idt_loss + b_idt_loss + a_gan_loss + b_gan_loss + a_cycle_loss + b_cycle_loss\n\n # Sample previously generated images for discriminator forward pass\n a_fake = torch.Tensor(imagepool_a(a_fake.detach().cpu().clone().numpy())) # a_fake first dim might be batch entry\n b_fake = torch.Tensor(imagepool_b(b_fake.detach().cpu().clone().numpy()))\n\n a_fake = a_fake.to(self.device)\n b_fake = b_fake.to(self.device)\n\n # Discriminator forward pass\n a_real_dis = self.D_A(a_real)\n a_fake_dis = self.D_B(a_fake)\n b_real_dis = self.D_B(b_real)\n b_fake_dis = self.D_B(b_fake)\n\n # Discriminator Losses\n positive_labels = torch.ones_like(a_fake_dis)\n negative_labels = torch.zeros_like(a_fake_dis)\n\n a_dis_real_loss = self.MSE(a_real_dis, positive_labels)\n a_dis_fake_loss = self.MSE(a_fake_dis, negative_labels)\n b_dis_real_loss = self.MSE(b_real_dis, positive_labels)\n b_dis_fake_loss = self.MSE(b_fake_dis, negative_labels)\n\n a_dis_loss = (a_dis_real_loss + a_dis_fake_loss) * 0.5\n b_dis_loss = (b_dis_real_loss + b_dis_fake_loss) * 0.5\n\n # Step\n self.gen_optimizer.zero_grad()\n total_gan_loss.backward()\n self.gen_optimizer.step()\n\n self.dis_optimizer.zero_grad()\n a_dis_loss.backward()\n b_dis_loss.backward()\n self.dis_optimizer.step()\n\n for group in self.dis_optimizer.param_groups:\n for p in group['params']:\n state = self.dis_optimizer.state[p]\n if state['step'] >= 962:\n state['step'] = 962\n\n for group in self.gen_optimizer.param_groups:\n for p in group['params']:\n state = self.gen_optimizer.state[p]\n if state['step'] >= 962:\n state['step'] = 962\n\n if (step + 1) % 5 == 0:\n print(\"Epoch: (%3d) (%5d/%5d) | Gen Loss:%.2e | Dis Loss:%.2e\" % \n (epoch, step + 1, len(loader),\n total_gan_loss,a_dis_loss+b_dis_loss))\n\n step += 1\n self.save_checkpoint(epoch+1, args)\n self.gen_scheduler.step()\n self.dis_scheduler.step()\n step = 0\n\n\n \n\n\n\n\n\n"
] | [
[
"torch.utils.data.DataLoader",
"torch.nn.L1Loss",
"torch.no_grad",
"torch.cuda.is_available",
"torch.nn.Conv2d",
"torch.nn.ReflectionPad2d",
"torch.cat",
"torch.nn.Dropout",
"torch.nn.ConvTranspose2d",
"torch.ones_like",
"torch.ones",
"torch.load",
"torch.nn.MSELoss",
"torch.zeros_like",
"torch.nn.Tanh",
"torch.nn.Sequential",
"torch.zeros",
"torch.nn.ReLU",
"torch.nn.LeakyReLU"
]
] |
daheyinyin/GAN | [
"4ae0535738fd860c611868c5bdc76298809bba1d"
] | [
"cgan64.py"
] | [
"import os,sys\nimport matplotlib.pyplot as plt\nimport itertools\nimport pickle\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport torch.autograd as autograd\nfrom torchvision import datasets, transforms\nfrom torchvision.utils import save_image\nfrom torchvision.utils import save_image, make_grid\nfrom PIL import Image\nos.environ['CUDA_VISIBLE_DEVICES'] = '0'\n\n# training parameters\nproject = 'cgan'\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\nBATCH_SIZE = 64\nIMAGE_SIZE = 64\nFEATURES_GEN = 32#16\nFEATURES_CRITIC = 32#16\nCHANNELS_IMG =1\nNUM_CLASSES = 10\nGEN_EMBEDDING_SIZE = 100\nIMG_CHANNELS = 1\nZ_DIM = 100\nlr = 1e-4\nTRAIN_EPOCHS = 20\nCRITIC_ITERATIONS = 5\nLAMBDA_GP = 10\nfixed_noise = torch.randn([10, Z_DIM, 1, 1], dtype=torch.float32, device=device)\nfixed_labels = torch.arange(10, device=device)\n\n# results save folder\nrand_path = f'result/{project}/Random_results'\nfix_path = f'result/{project}/Fixed_results'\nif not os.path.isdir(f'result/'):\n os.mkdir(f'result/')\nif not os.path.isdir(f'result/{project}'):\n os.mkdir(f'result/{project}')\nif not os.path.isdir(rand_path):\n os.mkdir(rand_path)\nif not os.path.isdir(fix_path):\n os.mkdir(fix_path)\n\ntransform = transforms.Compose([\n transforms.Resize(IMAGE_SIZE),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.5 for _ in range(IMG_CHANNELS)],\n std=[0.5 for _ in range(IMG_CHANNELS)])\n ])\nmnist_data = datasets.MNIST('../data', train=True, download=True, transform=transform)\ntrain_loader = torch.utils.data.DataLoader(mnist_data,\n batch_size=BATCH_SIZE,\n shuffle=True)\nprint(mnist_data[0][0].shape,'mnist_data max',mnist_data[0][0].max(),mnist_data[0][0].min())\n\n\n\nclass Critic(nn.Module):\n def __init__(self, features_d, channels_img, num_classes, img_size):\n super(Critic, self).__init__()\n self.disc = nn.Sequential(\n # input: N x (channels_img+1) x 64 x 64 -> 32x32\n nn.Conv2d(channels_img+1, features_d, kernel_size=4, stride=2, padding=1),\n nn.LeakyReLU(0.2),\n # _block(in_channels, out_channels, kernel_size, stride, padding)\n self._block(features_d, features_d * 2, 4, 2, 1),# 32x32 -> 16x16\n self._block(features_d * 2, features_d * 4, 4, 2, 1),# 16x16 -> 8x8\n self._block(features_d * 4, features_d * 8, 4, 2, 1),# 8x8 -> 4x4\n # 4x4 -> 1x1\n nn.Conv2d(features_d * 8, 1, kernel_size=4, stride=2, padding=0),\n )\n self.img_size = img_size\n self.embed = nn.Embedding(num_classes,img_size*img_size)\n\n def _block(self, in_channels, out_channels, kernel_size, stride, padding):\n return nn.Sequential(\n nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding, bias=False,),\n nn.InstanceNorm2d(out_channels, affine=True),\n nn.LeakyReLU(0.2),\n )\n\n def forward(self, x,labels):\n embedding = self.embed(labels).view(labels.shape[0],1,self.img_size,self.img_size)\n x = torch.cat([x,embedding],dim=1)\n return self.disc(x)\n\n\n\nclass Generator(nn.Module):\n def __init__(self, features_g, channels_noise, channels_img, num_classes, img_size, embed_size):\n super(Generator, self).__init__()\n self.img_size = img_size\n self.net = nn.Sequential(\n # Input: N x channels_noise x 1 x 1\n self._block(channels_noise+embed_size, features_g * 16, 4, 1, 0), # img: 4x4\n self._block(features_g * 16, features_g * 8, 4, 2, 1), # img: 8x8\n self._block(features_g * 8, features_g * 4, 4, 2, 1), # img: 16x16\n self._block(features_g * 4, features_g * 2, 4, 2, 1), # img: 32x32\n nn.ConvTranspose2d(\n features_g * 2, channels_img, kernel_size=4, stride=2, padding=1\n ),\n # Output: N x channels_img x 64 x 64\n nn.Tanh(),\n )\n\n self.embed = nn.Embedding(num_classes, embed_size)\n\n def _block(self, in_channels, out_channels, kernel_size, stride, padding):\n return nn.Sequential(\n nn.ConvTranspose2d(in_channels, out_channels, kernel_size, stride, padding, bias=False,),\n # nn.BatchNorm2d(out_channels),\n nn.InstanceNorm2d(out_channels,affine=True),\n nn.ReLU(),\n )\n\n def forward(self, x,labels):\n embedding = self.embed(labels).unsqueeze(2).unsqueeze(3)\n x = torch.cat([x,embedding],dim=1)\n return self.net(x)\n\n\ndef weights_init(net):\n for m in net.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n if m.bias is not None:\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.Linear):\n nn.init.normal_(m.weight, 0, 0.02)\n nn.init.constant_(m.bias, 0)\n\n\n\n\ndef show_train_loss(hist, show = False, save = False, path = 'Train_loss.png'):\n x = range(len(hist['D_losses']))\n\n y1 = hist['D_losses']\n y2 = hist['G_losses']\n\n plt.plot(x, y1, label='D_loss')\n plt.plot(x, y2, label='G_loss')\n\n plt.xlabel('Epoch')\n plt.ylabel('Loss')\n\n plt.legend(loc=4)\n plt.grid(True)\n plt.tight_layout()\n\n if save:\n plt.savefig(path)\n if show:\n plt.show()\n else:\n plt.close()\n\ndef compute_gradient_penalty(critic, labels, real_samples, fake_samples):\n \"\"\"Calculates the gradient penalty loss for WGAN GP\"\"\"\n # Random weight term for interpolation between real and fake samples\n BATCH_SIZE, C, H, W = real_samples.shape\n alpha = torch.rand((BATCH_SIZE, 1, 1, 1), device=device).repeat(1, C, H, W)\n interpolates = alpha * real_samples + ((1 - alpha) * fake_samples)\n # critic scores\n mix_scores = critic(interpolates, labels)\n gradients = autograd.grad(\n outputs=mix_scores,\n inputs=interpolates,\n grad_outputs=torch.ones_like(mix_scores,device=device),\n create_graph=True,\n retain_graph=True,\n only_inputs=True,\n )[0]\n gradients = gradients.view(gradients.shape[0],-1)\n gradient_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean() # norm 返回指定维度上的P范数\n return gradient_penalty\n\nnetG = Generator(FEATURES_GEN, Z_DIM, CHANNELS_IMG, NUM_CLASSES, IMAGE_SIZE, GEN_EMBEDDING_SIZE).apply(weights_init).to(device)\nnetC = Critic(FEATURES_CRITIC, CHANNELS_IMG, NUM_CLASSES, IMAGE_SIZE).apply(weights_init).to(device)\n# the progression of the generator\n\n\n\n# Binary Cross Entropy loss\n# loss_fn = nn.BCELoss().to(device)\n# optimizer\n# optimizerG = optim.RMSprop(netG.parameters(), lr=lr)\n# optimizerC = optim.RMSprop(netC.parameters(), lr=lr)\noptimizerG = optim.Adam(netG.parameters(), lr=lr, betas=(0.0,0.9)) # beta的设置非常关键\noptimizerC = optim.Adam(netC.parameters(), lr=lr, betas=(0.0,0.9))\nlosses = {}\nlosses['D_losses'] = []\nlosses['G_losses'] = []\n\n\n\n# train\nfor epoch in range(TRAIN_EPOCHS):\n D_losses = []\n G_losses = []\n for step, (data, target) in enumerate(train_loader):\n # Update C network: maximize Epr[C(x)] - C(G(z))\n real_img = data.to(device)\n target = target.to(device)\n bs_size = real_img.shape[0]\n\n for _ in range(CRITIC_ITERATIONS):\n noise = torch.randn([bs_size, Z_DIM, 1, 1], dtype=torch.float32, device=device)\n fake_img = netG(noise,target)#.detach()\n fake_out = netC(fake_img,target).reshape(-1)\n real_out = netC(real_img,target).reshape(-1)\n gp = compute_gradient_penalty(netC,target, real_img,fake_img)\n loss_C = (-(real_out.mean() - fake_out.mean()) + LAMBDA_GP * gp)\n optimizerC.zero_grad()\n loss_C.backward(retain_graph = True)\n optimizerC.step()\n D_losses.append(loss_C.item())\n\n\n\n # Update G network: minimize Epr[C(x)] - C(G(z))\n # noise = torch.randn([bs_size, 100, 1, 1], dtype=torch.float32, device=device)\n # fake_img = netG(noise)\n output = netC(fake_img,target).reshape(-1)\n loss_G = -output.mean()\n optimizerG.zero_grad()\n loss_G.backward()\n optimizerG.step()\n G_losses.append(loss_G.item())\n\n losses['D_losses'].append(torch.mean(torch.FloatTensor(D_losses)))\n losses['G_losses'].append(torch.mean(torch.FloatTensor(G_losses)))\n\n print('[%d/%d]: loss_d: %.3f, loss_g: %.3f' % (\n (epoch + 1), TRAIN_EPOCHS, torch.FloatTensor(losses['D_losses']).mean(),\n torch.FloatTensor(losses['G_losses']).mean()))\n save_image(fake_img.data[:25], rand_path + '/epoch_{:04d}.png'.format(epoch), nrow=5, normalize=True)\n fix_result = netG(fixed_noise,fixed_labels)\n save_image(fix_result.data[:25], fix_path + '/epoch_{:04d}.png'.format(epoch), nrow=5, normalize=True)\n# 保存训练loss\nwith open(f\"result/{project}/train_hist.pkl\", 'wb') as f:\n pickle.dump(losses, f)\n# 画loss\nshow_train_loss(losses, save=True, path=f\"result/{project}/MNIST_GAN_train_hist.png\")\n\nprint(\"Training finish!... save training results\")\nif not os.path.isdir(f'result/{project}/checkpoints'):\n os.mkdir(f'result/{project}/checkpoints')\ntorch.save(netG.state_dict(), f\"result/{project}/generator_param.pkl\")\ntorch.save(netC.state_dict(), f\"result/{project}/critic_param.pkl\")\n\n\n# eval\nnetG.load_state_dict(torch.load(f\"result/{project}/generator_param.pkl\"))\nresult = netG(torch.randn((10, Z_DIM, 1, 1)).to(device),fixed_labels)\nsave_image(result.data[:10], f'result/{project}/eval.png', nrow=5, normalize=True)\n\n"
] | [
[
"torch.utils.data.DataLoader",
"matplotlib.pyplot.tight_layout",
"torch.rand",
"torch.cuda.is_available",
"matplotlib.pyplot.ylabel",
"torch.nn.Conv2d",
"torch.nn.InstanceNorm2d",
"matplotlib.pyplot.plot",
"torch.cat",
"torch.nn.ConvTranspose2d",
"torch.nn.init.kaiming_normal_",
"torch.randn",
"matplotlib.pyplot.savefig",
"torch.nn.init.normal_",
"torch.arange",
"torch.ones_like",
"torch.load",
"matplotlib.pyplot.close",
"torch.FloatTensor",
"matplotlib.pyplot.legend",
"torch.nn.init.constant_",
"matplotlib.pyplot.grid",
"torch.nn.Embedding",
"torch.nn.Tanh",
"matplotlib.pyplot.show",
"torch.nn.ReLU",
"matplotlib.pyplot.xlabel",
"torch.nn.LeakyReLU"
]
] |
zastari/tropycal | [
"6fce00ccd246b4c4454b9bf0a483b551909cf2a2"
] | [
"tests/tracks.py"
] | [
"\"\"\"Tests for the `skewt` module.\"\"\"\n\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport numpy.testing\nimport datetime as dt\nimport pytest\n\nfrom cartopy import crs as ccrs\nimport tropycal.tracks as tracks\n\ndef assert_almost_equal(actual, desired, decimal=7):\n \"\"\"Check that values are almost equal, including units.\n Wrapper around :func:`numpy.testing.assert_almost_equal`\n \"\"\"\n actual, desired = check_and_drop_units(actual, desired)\n numpy.testing.assert_almost_equal(actual, desired, decimal)\n\n\ndef assert_array_almost_equal(actual, desired, decimal=7):\n \"\"\"Check that arrays are almost equal, including units.\n Wrapper around :func:`numpy.testing.assert_array_almost_equal`\n \"\"\"\n actual, desired = check_and_drop_units(actual, desired)\n numpy.testing.assert_array_almost_equal(actual, desired, decimal)\n\n\ndef assert_array_equal(actual, desired):\n numpy.testing.assert_array_equal(actual, desired)\n\n#@pytest.mark.mpl_image_compare(tolerance=.03, remove_text=False, style='default')\ndef test_code():\n\n @pytest.mark.mpl_image_compare(tolerance=.03, remove_text=False, style='default')\n def test_plot(methodToRun, proj, use_ax, positional_arguments, keyword_arguments, use_figsize=(14,9), use_dpi=200, ax_in_dict=False):\n \n if use_ax == True:\n fig = plt.figure(figsize=use_figsize,dpi=use_dpi)\n ax = plt.axes(projection=proj)\n ax = methodToRun(*positional_arguments, ax=ax, **keyword_arguments)\n else:\n fig = plt.figure(figsize=use_figsize,dpi=use_dpi)\n ax = methodToRun(*positional_arguments, **keyword_arguments)\n \n if ax_in_dict == True:\n ax = ax['ax']\n \n return fig\n\n #method2(storm.plot, ['spam'], {'ham': 'ham'})\n \n #Retrieve HURDAT2 reanalysis dataset for North Atlantic\n hurdat_atl = tracks.TrackDataset()\n\n \n #Assign all tornadoes to storm\n hurdat_atl.assign_storm_tornadoes()\n \n #------------------------------------------------------------\n \n #Search name\n hurdat_atl.search_name('michael')\n \n #Test getting storm ID\n storm_id = hurdat_atl.get_storm_id(('michael', 2018))\n if storm_id != 'AL142018':\n raise AssertionError(\"Incorrect type\")\n \n #Test retrieving hurricane Michael (2018)\n storm = hurdat_atl.get_storm(('michael', 2018))\n \n #Cartopy proj\n proj = ccrs.PlateCarree(central_longitude=0.0)\n \n #Make plot of storm track\n test_plot(storm.plot, proj, True, [], {'return_ax': True}, use_figsize=(14,9))\n \n #Get NHC discussion\n disco = storm.get_nhc_discussion(forecast=1)\n \n #Plot NHC forecast\n test_plot(storm.plot_nhc_forecast, proj, True, [], {'forecast': 1, 'return_ax': True}, use_figsize=(14,9))\n #ax = storm.plot_nhc_forecast(forecast=1,return_ax=True)\n \n #Plot storm tornadoes\n #test_plot(storm.plot_tors, proj, [], {'return_ax': True})\n #ax = storm.plot_tors(return_ax=True)\n \n #Plot rotated tornadoes\n test_plot(storm.plot_TCtors_rotated, proj, False, [], {'return_ax': True}, use_figsize=(9,9), use_dpi=150)\n #ax = storm.plot_TCtors_rotated(return_ax=True)\n \n #Convert to datatypes\n storm.to_dict()\n storm.to_xarray()\n storm.to_dataframe()\n \n #------------------------------------------------------------\n \n #Test retrieving season\n ax = season = hurdat_atl.get_season(2017)\n \n #Make plot of season\n test_plot(season.plot, proj, True, [], {'return_ax': True}, use_figsize=(14,9))\n #ax = season.plot(return_ax=True)\n \n #Annual summary\n season.annual_summary()\n \n #Dataframe\n season.to_dataframe()\n \n #------------------------------------------------------------\n \n #Rank storms\n hurdat_atl.rank_storm('ace')\n \n #Gridded stats\n test_plot(hurdat_atl.gridded_stats, proj, True, ['maximum wind'], {}, use_figsize=(14,9))\n #hurdat_atl.gridded_stats('maximum wind',return_ax=True)\n "
] | [
[
"matplotlib.pyplot.figure",
"matplotlib.pyplot.axes"
]
] |
acobo/keras-YOLOv3-model-set | [
"6d7f7f2474dda43c112a9e0321447109a446ac69"
] | [
"yolo3/models/yolo3_nano.py"
] | [
"#!/usr/bin/env python3\r\n# -*- coding: utf-8 -*-\r\n\"\"\"YOLO_v3 Nano Model Defined in Keras.\"\"\"\r\n\r\nimport os\r\nfrom keras_applications.imagenet_utils import _obtain_input_shape\r\nfrom tensorflow.keras.utils import get_source_inputs, get_file\r\nfrom tensorflow.keras.layers import UpSampling2D, Concatenate, Dense, Multiply, Add, Lambda, Input\r\nfrom tensorflow.keras.layers import Conv2D, DepthwiseConv2D, Concatenate, BatchNormalization, ReLU, ZeroPadding2D, GlobalAveragePooling2D, GlobalMaxPooling2D, Softmax\r\nfrom tensorflow.keras.models import Model\r\nfrom tensorflow.keras import backend as K\r\n\r\nfrom yolo3.models.layers import compose, DarknetConv2D\r\n\r\n\r\ndef correct_pad(backend, inputs, kernel_size):\r\n \"\"\"Returns a tuple for zero-padding for 2D convolution with downsampling.\r\n\r\n # Arguments\r\n input_size: An integer or tuple/list of 2 integers.\r\n kernel_size: An integer or tuple/list of 2 integers.\r\n\r\n # Returns\r\n A tuple.\r\n \"\"\"\r\n img_dim = 2 if backend.image_data_format() == 'channels_first' else 1\r\n input_size = backend.int_shape(inputs)[img_dim:(img_dim + 2)]\r\n\r\n if isinstance(kernel_size, int):\r\n kernel_size = (kernel_size, kernel_size)\r\n\r\n if input_size[0] is None:\r\n adjust = (1, 1)\r\n else:\r\n adjust = (1 - input_size[0] % 2, 1 - input_size[1] % 2)\r\n\r\n correct = (kernel_size[0] // 2, kernel_size[1] // 2)\r\n\r\n return ((correct[0] - adjust[0], correct[0]),\r\n (correct[1] - adjust[1], correct[1]))\r\n\r\n\r\ndef NanoConv2D_BN_Relu6(*args, **kwargs):\r\n \"\"\"Darknet Convolution2D followed by BatchNormalization and ReLU6.\"\"\"\r\n nano_name = kwargs.get('name')\r\n if nano_name:\r\n name_kwargs = {'name': nano_name + '_conv2d'}\r\n name_kwargs.update(kwargs)\r\n bn_name = nano_name + '_BN'\r\n relu_name = nano_name + '_relu'\r\n else:\r\n name_kwargs = {}\r\n name_kwargs.update(kwargs)\r\n bn_name = None\r\n relu_name = None\r\n\r\n no_bias_kwargs = {'use_bias': False}\r\n no_bias_kwargs.update(name_kwargs)\r\n return compose(\r\n DarknetConv2D(*args, **no_bias_kwargs),\r\n BatchNormalization(name=bn_name),\r\n ReLU(6., name=relu_name))\r\n\r\n\r\ndef _ep_block(inputs, filters, stride, expansion, block_id):\r\n #in_channels = backend.int_shape(inputs)[-1]\r\n in_channels = inputs.shape.as_list()[-1]\r\n\r\n pointwise_conv_filters = int(filters)\r\n x = inputs\r\n prefix = 'ep_block_{}_'.format(block_id)\r\n\r\n # Expand\r\n x = Conv2D(int(expansion * in_channels), kernel_size=1, padding='same', use_bias=False, activation=None, name=prefix + 'expand')(x)\r\n x = BatchNormalization(epsilon=1e-3, momentum=0.999, name=prefix + 'expand_BN')(x)\r\n x = ReLU(6., name=prefix + 'expand_relu')(x)\r\n\r\n # Depthwise\r\n if stride == 2:\r\n x = ZeroPadding2D(padding=correct_pad(K, x, 3), name=prefix + 'pad')(x)\r\n\r\n x = DepthwiseConv2D(kernel_size=3, strides=stride, activation=None, use_bias=False, padding='same' if stride == 1 else 'valid', name=prefix + 'depthwise')(x)\r\n x = BatchNormalization(epsilon=1e-3, momentum=0.999, name=prefix + 'depthwise_BN')(x)\r\n x = ReLU(6., name=prefix + 'depthwise_relu')(x)\r\n\r\n # Project\r\n x = Conv2D(pointwise_conv_filters, kernel_size=1, padding='same', use_bias=False, activation=None, name=prefix + 'project')(x)\r\n x = BatchNormalization(epsilon=1e-3, momentum=0.999, name=prefix + 'project_BN')(x)\r\n\r\n if in_channels == pointwise_conv_filters and stride == 1:\r\n return Add(name=prefix + 'add')([inputs, x])\r\n return x\r\n\r\n\r\ndef _pep_block(inputs, proj_filters, filters, stride, expansion, block_id):\r\n #in_channels = backend.int_shape(inputs)[-1]\r\n in_channels = inputs.shape.as_list()[-1]\r\n\r\n pointwise_conv_filters = int(filters)\r\n x = inputs\r\n prefix = 'pep_block_{}_'.format(block_id)\r\n\r\n\r\n # Pre-project\r\n x = Conv2D(proj_filters, kernel_size=1, padding='same', use_bias=False, activation=None, name=prefix + 'preproject')(x)\r\n x = BatchNormalization(epsilon=1e-3, momentum=0.999, name=prefix + 'preproject_BN')(x)\r\n x = ReLU(6., name=prefix + 'preproject_relu')(x)\r\n\r\n # Expand\r\n #x = Conv2D(int(expansion * in_channels), kernel_size=1, padding='same', use_bias=False, activation=None, name=prefix + 'expand')(x)\r\n x = Conv2D(int(expansion * proj_filters), kernel_size=1, padding='same', use_bias=False, activation=None, name=prefix + 'expand')(x)\r\n x = BatchNormalization(epsilon=1e-3, momentum=0.999, name=prefix + 'expand_BN')(x)\r\n x = ReLU(6., name=prefix + 'expand_relu')(x)\r\n\r\n # Depthwise\r\n if stride == 2:\r\n x = ZeroPadding2D(padding=correct_pad(K, x, 3), name=prefix + 'pad')(x)\r\n\r\n x = DepthwiseConv2D(kernel_size=3, strides=stride, activation=None, use_bias=False, padding='same' if stride == 1 else 'valid', name=prefix + 'depthwise')(x)\r\n x = BatchNormalization(epsilon=1e-3, momentum=0.999, name=prefix + 'depthwise_BN')(x)\r\n x = ReLU(6., name=prefix + 'depthwise_relu')(x)\r\n\r\n # Project\r\n x = Conv2D(pointwise_conv_filters, kernel_size=1, padding='same', use_bias=False, activation=None, name=prefix + 'project')(x)\r\n x = BatchNormalization( epsilon=1e-3, momentum=0.999, name=prefix + 'project_BN')(x)\r\n\r\n if in_channels == pointwise_conv_filters and stride == 1:\r\n return Add(name=prefix + 'add')([inputs, x])\r\n return x\r\n\r\n\r\n#def expand_dims2d(x):\r\n #x = K.expand_dims(x, axis=1)\r\n #x = K.expand_dims(x, axis=2)\r\n #return x\r\n\r\n\r\ndef expand_upsampling2d(args):\r\n import tensorflow as tf\r\n x = args[0]\r\n inputs = args[1]\r\n in_shapes = K.shape(inputs)[1:3]\r\n x = K.expand_dims(x, axis=1)\r\n x = K.expand_dims(x, axis=2)\r\n x = tf.image.resize(x, in_shapes)\r\n return x\r\n\r\n\r\ndef _fca_block(inputs, reduct_ratio, block_id):\r\n in_channels = inputs.shape.as_list()[-1]\r\n #in_shapes = inputs.shape.as_list()[1:3]\r\n reduct_channels = int(in_channels // reduct_ratio)\r\n prefix = 'fca_block_{}_'.format(block_id)\r\n\r\n x = GlobalAveragePooling2D(name=prefix + 'average_pooling')(inputs)\r\n x = Dense(reduct_channels, activation='relu', name=prefix + 'fc1')(x)\r\n x = Dense(in_channels, activation='sigmoid', name=prefix + 'fc2')(x)\r\n #x = Lambda(expand_dims2d, name=prefix + 'expand_dims2d')(x)\r\n #x = UpSampling2D(in_shapes, name=prefix + 'upsample')(x)\r\n x = Lambda(expand_upsampling2d, name=prefix + 'expand_upsample')([x, inputs])\r\n x = Multiply(name=prefix + 'multiply')([x, inputs])\r\n return x\r\n\r\n\r\nEP_EXPANSION = 2\r\nPEP_EXPANSION = 2\r\n\r\ndef nano_net_body(x):\r\n '''YOLO Nano backbone network body'''\r\n x = NanoConv2D_BN_Relu6(12, (3,3), name='Conv_1')(x)\r\n x = NanoConv2D_BN_Relu6(24, (3,3), strides=2, name='Conv_2')(x)\r\n x = _pep_block(x, proj_filters=7, filters=24, stride=1, expansion=PEP_EXPANSION, block_id=1)\r\n x = _ep_block(x, filters=70, stride=2, expansion=EP_EXPANSION, block_id=1)\r\n x = _pep_block(x, proj_filters=25, filters=70, stride=1, expansion=PEP_EXPANSION, block_id=2)\r\n x = _pep_block(x, proj_filters=24, filters=70, stride=1, expansion=PEP_EXPANSION, block_id=3)\r\n x = _ep_block(x, filters=150, stride=2, expansion=EP_EXPANSION, block_id=2)\r\n x = _pep_block(x, proj_filters=56, filters=150, stride=1, expansion=PEP_EXPANSION, block_id=4)\r\n x = NanoConv2D_BN_Relu6(150, (1,1), name='Conv_pw_1')(x)\r\n x = _fca_block(x, reduct_ratio=8, block_id=1)\r\n x = _pep_block(x, proj_filters=73, filters=150, stride=1, expansion=PEP_EXPANSION, block_id=5)\r\n x = _pep_block(x, proj_filters=71, filters=150, stride=1, expansion=PEP_EXPANSION, block_id=6)\r\n x = _pep_block(x, proj_filters=75, filters=150, stride=1, expansion=PEP_EXPANSION, block_id=7)\r\n x = _ep_block(x, filters=325, stride=2, expansion=EP_EXPANSION, block_id=3)\r\n x = _pep_block(x, proj_filters=132, filters=325, stride=1, expansion=PEP_EXPANSION, block_id=8)\r\n x = _pep_block(x, proj_filters=124, filters=325, stride=1, expansion=PEP_EXPANSION, block_id=9)\r\n x = _pep_block(x, proj_filters=141, filters=325, stride=1, expansion=PEP_EXPANSION, block_id=10)\r\n x = _pep_block(x, proj_filters=140, filters=325, stride=1, expansion=PEP_EXPANSION, block_id=11)\r\n x = _pep_block(x, proj_filters=137, filters=325, stride=1, expansion=PEP_EXPANSION, block_id=12)\r\n x = _pep_block(x, proj_filters=135, filters=325, stride=1, expansion=PEP_EXPANSION, block_id=13)\r\n x = _pep_block(x, proj_filters=133, filters=325, stride=1, expansion=PEP_EXPANSION, block_id=14)\r\n x = _pep_block(x, proj_filters=140, filters=325, stride=1, expansion=PEP_EXPANSION, block_id=15)\r\n x = _ep_block(x, filters=545, stride=2, expansion=EP_EXPANSION, block_id=4)\r\n x = _pep_block(x, proj_filters=276, filters=545, stride=1, expansion=PEP_EXPANSION, block_id=16)\r\n x = NanoConv2D_BN_Relu6(230, (1,1), name='Conv_pw_2')(x)\r\n x = _ep_block(x, filters=489, stride=1, expansion=EP_EXPANSION, block_id=5)\r\n x = _pep_block(x, proj_filters=213, filters=469, stride=1, expansion=PEP_EXPANSION, block_id=17)\r\n x = NanoConv2D_BN_Relu6(189, (1,1), name='Conv_pw_3')(x)\r\n\r\n return x\r\n\r\n\r\ndef yolo3_nano_body(inputs, num_anchors, num_classes, weights_path=None):\r\n \"\"\"\r\n Create YOLO_V3 Nano model CNN body in Keras.\r\n\r\n Reference Paper:\r\n \"YOLO Nano: a Highly Compact You Only Look Once Convolutional Neural Network for Object Detection\"\r\n https://arxiv.org/abs/1910.01271\r\n \"\"\"\r\n nano_net = NanoNet(input_tensor=inputs, weights='imagenet', include_top=False)\r\n if weights_path is not None:\r\n nano_net.load_weights(weights_path, by_name=True)\r\n print('Load weights {}.'.format(weights_path))\r\n\r\n # input: 416 x 416 x 3\r\n # Conv_pw_3_relu: 13 x 13 x 189\r\n # pep_block_15_add: 26 x 26 x 325\r\n # pep_block_7_add: 52 x 52 x 150\r\n\r\n f1 = nano_net.get_layer('Conv_pw_3').output\r\n # f1 :13 x 13 x 189\r\n y1 = _ep_block(f1, filters=462, stride=1, expansion=EP_EXPANSION, block_id=6)\r\n y1 = DarknetConv2D(num_anchors * (num_classes + 5), (1,1))(y1)\r\n x = compose(\r\n NanoConv2D_BN_Relu6(105, (1,1)),\r\n UpSampling2D(2))(f1)\r\n\r\n\r\n f2 = nano_net.get_layer('pep_block_15_add').output\r\n # f2: 26 x 26 x 325\r\n x = Concatenate()([x,f2])\r\n\r\n x = _pep_block(x, proj_filters=113, filters=325, stride=1, expansion=PEP_EXPANSION, block_id=18)\r\n x = _pep_block(x, proj_filters=99, filters=207, stride=1, expansion=PEP_EXPANSION, block_id=19)\r\n x = DarknetConv2D(98, (1,1))(x)\r\n\r\n y2 = _ep_block(x, filters=183, stride=1, expansion=EP_EXPANSION, block_id=7)\r\n y2 = DarknetConv2D(num_anchors * (num_classes + 5), (1,1))(y2)\r\n\r\n x = compose(\r\n NanoConv2D_BN_Relu6(47, (1,1)),\r\n UpSampling2D(2))(x)\r\n\r\n\r\n f3 = nano_net.get_layer('pep_block_7_add').output\r\n # f3 : 52 x 52 x 150\r\n x = Concatenate()([x, f3])\r\n\r\n x = _pep_block(x, proj_filters=58, filters=122, stride=1, expansion=PEP_EXPANSION, block_id=20)\r\n x = _pep_block(x, proj_filters=52, filters=87, stride=1, expansion=PEP_EXPANSION, block_id=21)\r\n x = _pep_block(x, proj_filters=47, filters=93, stride=1, expansion=PEP_EXPANSION, block_id=22)\r\n y3 = DarknetConv2D(num_anchors * (num_classes + 5), (1,1))(x)\r\n\r\n\r\n return Model(inputs = inputs, outputs=[y1,y2,y3])\r\n\r\n\r\nBASE_WEIGHT_PATH = (\r\n 'https://github.com/david8862/keras-YOLOv3-model-set/'\r\n 'releases/download/v1.0.1/')\r\n\r\ndef NanoNet(input_shape=None,\r\n input_tensor=None,\r\n include_top=True,\r\n weights='imagenet',\r\n pooling=None,\r\n classes=1000,\r\n **kwargs):\r\n \"\"\"Generate nano net model for Imagenet classification.\"\"\"\r\n\r\n if not (weights in {'imagenet', None} or os.path.exists(weights)):\r\n raise ValueError('The `weights` argument should be either '\r\n '`None` (random initialization), `imagenet` '\r\n '(pre-training on ImageNet), '\r\n 'or the path to the weights file to be loaded.')\r\n\r\n if weights == 'imagenet' and include_top and classes != 1000:\r\n raise ValueError('If using `weights` as `\"imagenet\"` with `include_top`'\r\n ' as true, `classes` should be 1000')\r\n\r\n # Determine proper input shape\r\n input_shape = _obtain_input_shape(input_shape,\r\n default_size=224,\r\n min_size=28,\r\n data_format=K.image_data_format(),\r\n require_flatten=include_top,\r\n weights=weights)\r\n\r\n if input_tensor is None:\r\n img_input = Input(shape=input_shape)\r\n else:\r\n img_input = input_tensor\r\n\r\n x = nano_net_body(img_input)\r\n\r\n if include_top:\r\n model_name='nano_net'\r\n x = DarknetConv2D(classes, (1, 1))(x)\r\n x = GlobalAveragePooling2D(name='avg_pool')(x)\r\n x = Softmax()(x)\r\n else:\r\n model_name='nano_net_headless'\r\n if pooling == 'avg':\r\n x = GlobalAveragePooling2D(name='avg_pool')(x)\r\n elif pooling == 'max':\r\n x = GlobalMaxPooling2D(name='max_pool')(x)\r\n\r\n # Ensure that the model takes into account\r\n # any potential predecessors of `input_tensor`.\r\n if input_tensor is not None:\r\n inputs = get_source_inputs(input_tensor)\r\n else:\r\n inputs = img_input\r\n\r\n # Create model.\r\n model = Model(inputs, x, name=model_name)\r\n\r\n # Load weights.\r\n if weights == 'imagenet':\r\n if include_top:\r\n file_name = 'nanonet_weights_tf_dim_ordering_tf_kernels_224.h5'\r\n weight_path = BASE_WEIGHT_PATH + file_name\r\n else:\r\n file_name = 'nanonet_weights_tf_dim_ordering_tf_kernels_224_no_top.h5'\r\n weight_path = BASE_WEIGHT_PATH + file_name\r\n\r\n weights_path = get_file(file_name, weight_path, cache_subdir='models')\r\n model.load_weights(weights_path)\r\n elif weights is not None:\r\n model.load_weights(weights)\r\n\r\n return model\r\n\r\n"
] | [
[
"tensorflow.keras.layers.Concatenate",
"tensorflow.keras.layers.ReLU",
"tensorflow.keras.backend.expand_dims",
"tensorflow.keras.layers.Lambda",
"tensorflow.keras.layers.DepthwiseConv2D",
"tensorflow.keras.models.Model",
"tensorflow.keras.layers.Multiply",
"tensorflow.keras.layers.Add",
"tensorflow.keras.layers.Conv2D",
"tensorflow.keras.backend.image_data_format",
"tensorflow.keras.layers.GlobalMaxPooling2D",
"tensorflow.keras.layers.BatchNormalization",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.utils.get_source_inputs",
"tensorflow.image.resize",
"tensorflow.keras.layers.Softmax",
"tensorflow.keras.backend.shape",
"tensorflow.keras.layers.GlobalAveragePooling2D",
"tensorflow.keras.layers.UpSampling2D",
"tensorflow.keras.utils.get_file",
"tensorflow.keras.layers.Input"
]
] |
ljackson707/clustering-exercises | [
"a12eac030b01a0f6dc7abfc024272a004b64e307"
] | [
"summarize.py"
] | [
"import pandas as pd\n\n#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\ndef df_summary (df):\n info = df.info()\n describe = df.describe()\n nulls = df.isnull().sum()/len(df)*100\n value_count = df.value_counts()\n return info, describe, nulls, value_count\n\n#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\ndef missing_zero_values_table(df):\n '''This function will look at any data set and report back on zeros and nulls for every column while also giving percentages of total values\n and also the data types. The message prints out the shape of the data frame and also tells you how many columns have nulls '''\n zero_val = (df == 0.00).astype(int).sum(axis=0)\n null_count = df.isnull().sum()\n mis_val_percent = 100 * df.isnull().sum() / len(df)\n mz_table = pd.concat([zero_val, null_count, mis_val_percent], axis=1)\n mz_table = mz_table.rename(\n columns = {0 : 'Zero Values', 1 : 'null_count', 2 : '% of Total Values'})\n mz_table['Total Zeroes + Null Values'] = mz_table['Zero Values'] + mz_table['null_count']\n mz_table['% Total Zero + Null Values'] = 100 * mz_table['Total Zeroes + Null Values'] / len(df)\n mz_table['Data Type'] = df.dtypes\n mz_table = mz_table[\n mz_table.iloc[:,1] >= 0].sort_values(\n '% of Total Values', ascending=False).round(1)\n print (\"Your selected dataframe has \" + str(df.shape[1]) + \" columns and \" + str(df.shape[0]) + \" Rows.\\n\" \n \"There are \" + str((mz_table['null_count'] != 0).sum()) +\n \" columns that have NULL values.\")\n return mz_table, missing_zero_values_table(df)"
] | [
[
"pandas.concat"
]
] |
davidkirwan/tensorflow | [
"185a465225a520a1855145efda58b17b1a83d3a5"
] | [
"tensorflow/python/ops/variables.py"
] | [
"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Variable class.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport abc\nimport enum # pylint: disable=g-bad-import-order\nimport itertools\nimport functools\nimport os\nimport six\n\nfrom tensorflow.core.framework import attr_value_pb2\nfrom tensorflow.core.framework import variable_pb2\nfrom tensorflow.python import pywrap_tensorflow # pylint: disable=unused-import\nfrom tensorflow.python import _pywrap_utils\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import gen_array_ops\nfrom tensorflow.python.ops import gen_math_ops\nfrom tensorflow.python.ops import gen_state_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import state_ops\nfrom tensorflow.python.platform import tf_logging as logging\nfrom tensorflow.python.training.tracking import base as trackable\nfrom tensorflow.python.util import compat\nfrom tensorflow.python.util import object_identity\nfrom tensorflow.python.util import tf_should_use\nfrom tensorflow.python.util.deprecation import deprecated\nfrom tensorflow.python.util.tf_export import tf_export\n\n\ndef default_variable_creator(_, **kwds):\n del kwds\n raise NotImplementedError(\"variable_scope needs to be imported\")\n\n\ndef default_variable_creator_v2(_, **kwds):\n del kwds\n raise NotImplementedError(\"variable_scope needs to be imported\")\n\n\ndef _make_getter(captured_getter, captured_previous):\n \"\"\"To avoid capturing loop variables.\"\"\"\n\n def getter(**kwargs):\n return captured_getter(captured_previous, **kwargs)\n\n return getter\n\n\n@tf_export(\"VariableSynchronization\")\nclass VariableSynchronization(enum.Enum):\n \"\"\"Indicates when a distributed variable will be synced.\n\n * `AUTO`: Indicates that the synchronization will be determined by the current\n `DistributionStrategy` (eg. With `MirroredStrategy` this would be\n `ON_WRITE`).\n * `NONE`: Indicates that there will only be one copy of the variable, so\n there is no need to sync.\n * `ON_WRITE`: Indicates that the variable will be updated across devices\n every time it is written.\n * `ON_READ`: Indicates that the variable will be aggregated across devices\n when it is read (eg. when checkpointing or when evaluating an op that uses\n the variable).\n \"\"\"\n AUTO = 0\n NONE = 1\n ON_WRITE = 2\n ON_READ = 3\n\n\n# LINT.IfChange\n@tf_export(\"VariableAggregation\", v1=[])\nclass VariableAggregationV2(enum.Enum):\n \"\"\"Indicates how a distributed variable will be aggregated.\n\n `tf.distribute.Strategy` distributes a model by making multiple copies\n (called \"replicas\") acting data-parallel on different elements of the input\n batch. When performing some variable-update operation, say\n `var.assign_add(x)`, in a model, we need to resolve how to combine the\n different values for `x` computed in the different replicas.\n\n * `NONE`: This is the default, giving an error if you use a\n variable-update operation with multiple replicas.\n * `SUM`: Add the updates across replicas.\n * `MEAN`: Take the arithmetic mean (\"average\") of the updates across replicas.\n * `ONLY_FIRST_REPLICA`: This is for when every replica is performing the same\n update, but we only want to perform the update once. Used, e.g., for the\n global step counter.\n \"\"\"\n NONE = 0\n SUM = 1\n MEAN = 2\n ONLY_FIRST_REPLICA = 3\n\n def __hash__(self):\n return hash(self.value)\n\n def __eq__(self, other):\n if self is other:\n return True\n elif isinstance(other, VariableAggregation):\n return int(self.value) == int(other.value)\n else:\n return False\n\n\n@tf_export(v1=[\"VariableAggregation\"])\nclass VariableAggregation(enum.Enum):\n NONE = 0\n SUM = 1\n MEAN = 2\n ONLY_FIRST_REPLICA = 3\n ONLY_FIRST_TOWER = 3 # DEPRECATED\n\n def __hash__(self):\n return hash(self.value)\n\n\n# LINT.ThenChange(//tensorflow/core/framework/variable.proto)\n#\n# Note that we are currently relying on the integer values of the Python enums\n# matching the integer values of the proto enums.\n\nVariableAggregation.__doc__ = (\n VariableAggregationV2.__doc__ +\n \"* `ONLY_FIRST_TOWER`: Deprecated alias for `ONLY_FIRST_REPLICA`.\\n \")\n\n\ndef validate_synchronization_aggregation_trainable(synchronization, aggregation,\n trainable, name):\n \"\"\"Given user-provided variable properties, sets defaults and validates.\"\"\"\n if aggregation is None:\n aggregation = VariableAggregation.NONE\n else:\n if not isinstance(aggregation,\n (VariableAggregation, VariableAggregationV2)):\n try:\n aggregation = VariableAggregationV2(aggregation)\n except ValueError:\n raise ValueError(\n \"Invalid variable aggregation mode: {} for variable: {}\".format(\n aggregation, name))\n if synchronization is None:\n synchronization = VariableSynchronization.AUTO\n else:\n try:\n synchronization = VariableSynchronization(synchronization)\n except ValueError:\n raise ValueError(\n \"Invalid variable synchronization mode: {} for variable: {}\".format(\n synchronization, name))\n if trainable is None:\n trainable = synchronization != VariableSynchronization.ON_READ\n return synchronization, aggregation, trainable\n\n\nclass VariableMetaclass(type):\n \"\"\"Metaclass to allow construction of tf.Variable to be overridden.\"\"\"\n\n def _variable_v1_call(cls,\n initial_value=None,\n trainable=None,\n collections=None,\n validate_shape=True,\n caching_device=None,\n name=None,\n variable_def=None,\n dtype=None,\n expected_shape=None,\n import_scope=None,\n constraint=None,\n use_resource=None,\n synchronization=VariableSynchronization.AUTO,\n aggregation=VariableAggregation.NONE,\n shape=None):\n \"\"\"Call on Variable class. Useful to force the signature.\"\"\"\n previous_getter = lambda **kwargs: default_variable_creator(None, **kwargs)\n for _, getter in ops.get_default_graph()._variable_creator_stack: # pylint: disable=protected-access\n previous_getter = _make_getter(getter, previous_getter)\n\n # Reset `aggregation` that is explicitly set as `None` to the enum NONE.\n if aggregation is None:\n aggregation = VariableAggregation.NONE\n return previous_getter(\n initial_value=initial_value,\n trainable=trainable,\n collections=collections,\n validate_shape=validate_shape,\n caching_device=caching_device,\n name=name,\n variable_def=variable_def,\n dtype=dtype,\n expected_shape=expected_shape,\n import_scope=import_scope,\n constraint=constraint,\n use_resource=use_resource,\n synchronization=synchronization,\n aggregation=aggregation,\n shape=shape)\n\n def _variable_v2_call(cls,\n initial_value=None,\n trainable=None,\n validate_shape=True,\n caching_device=None,\n name=None,\n variable_def=None,\n dtype=None,\n import_scope=None,\n constraint=None,\n synchronization=VariableSynchronization.AUTO,\n aggregation=VariableAggregation.NONE,\n shape=None):\n \"\"\"Call on Variable class. Useful to force the signature.\"\"\"\n previous_getter = lambda **kws: default_variable_creator_v2(None, **kws)\n for _, getter in ops.get_default_graph()._variable_creator_stack: # pylint: disable=protected-access\n previous_getter = _make_getter(getter, previous_getter)\n\n # Reset `aggregation` that is explicitly set as `None` to the enum NONE.\n if aggregation is None:\n aggregation = VariableAggregation.NONE\n return previous_getter(\n initial_value=initial_value,\n trainable=trainable,\n validate_shape=validate_shape,\n caching_device=caching_device,\n name=name,\n variable_def=variable_def,\n dtype=dtype,\n import_scope=import_scope,\n constraint=constraint,\n synchronization=synchronization,\n aggregation=aggregation,\n shape=shape)\n\n def __call__(cls, *args, **kwargs):\n if cls is VariableV1:\n return cls._variable_v1_call(*args, **kwargs)\n elif cls is Variable:\n return cls._variable_v2_call(*args, **kwargs)\n else:\n return super(VariableMetaclass, cls).__call__(*args, **kwargs)\n\n\n@tf_export(\"Variable\", v1=[])\nclass Variable(six.with_metaclass(VariableMetaclass, trackable.Trackable)):\n \"\"\"See the [Variables Guide](https://tensorflow.org/guide/variables).\n\n A variable maintains state in the graph across calls to `run()`. You add a\n variable to the graph by constructing an instance of the class `Variable`.\n\n The `Variable()` constructor requires an initial value for the variable,\n which can be a `Tensor` of any type and shape. The initial value defines the\n type and shape of the variable. After construction, the type and shape of\n the variable are fixed. The value can be changed using one of the assign\n methods.\n\n If you want to change the shape of a variable later you have to use an\n `assign` Op with `validate_shape=False`.\n\n Just like any `Tensor`, variables created with `Variable()` can be used as\n inputs for other Ops in the graph. Additionally, all the operators\n overloaded for the `Tensor` class are carried over to variables, so you can\n also add nodes to the graph by just doing arithmetic on variables.\n\n ```python\n import tensorflow as tf\n\n # Create a variable.\n w = tf.Variable(<initial-value>, name=<optional-name>)\n\n # Use the variable in the graph like any Tensor.\n y = tf.matmul(w, ...another variable or tensor...)\n\n # The overloaded operators are available too.\n z = tf.sigmoid(w + y)\n\n # Assign a new value to the variable with `assign()` or a related method.\n w.assign(w + 1.0)\n w.assign_add(1.0)\n ```\n\n When you launch the graph, variables have to be explicitly initialized before\n you can run Ops that use their value. You can initialize a variable by\n running its *initializer op*, restoring the variable from a save file, or\n simply running an `assign` Op that assigns a value to the variable. In fact,\n the variable *initializer op* is just an `assign` Op that assigns the\n variable's initial value to the variable itself.\n\n ```python\n # Launch the graph in a session.\n with tf.compat.v1.Session() as sess:\n # Run the variable initializer.\n sess.run(w.initializer)\n # ...you now can run ops that use the value of 'w'...\n ```\n\n The most common initialization pattern is to use the convenience function\n `global_variables_initializer()` to add an Op to the graph that initializes\n all the variables. You then run that Op after launching the graph.\n\n ```python\n # Add an Op to initialize global variables.\n init_op = tf.compat.v1.global_variables_initializer()\n\n # Launch the graph in a session.\n with tf.compat.v1.Session() as sess:\n # Run the Op that initializes global variables.\n sess.run(init_op)\n # ...you can now run any Op that uses variable values...\n ```\n\n If you need to create a variable with an initial value dependent on another\n variable, use the other variable's `initialized_value()`. This ensures that\n variables are initialized in the right order.\n\n All variables are automatically collected in the graph where they are\n created. By default, the constructor adds the new variable to the graph\n collection `GraphKeys.GLOBAL_VARIABLES`. The convenience function\n `global_variables()` returns the contents of that collection.\n\n When building a machine learning model it is often convenient to distinguish\n between variables holding the trainable model parameters and other variables\n such as a `global step` variable used to count training steps. To make this\n easier, the variable constructor supports a `trainable=<bool>` parameter. If\n `True`, the new variable is also added to the graph collection\n `GraphKeys.TRAINABLE_VARIABLES`. The convenience function\n `trainable_variables()` returns the contents of this collection. The\n various `Optimizer` classes use this collection as the default list of\n variables to optimize.\n \"\"\"\n\n def __init__(self,\n initial_value=None,\n trainable=None,\n validate_shape=True,\n caching_device=None,\n name=None,\n variable_def=None,\n dtype=None,\n import_scope=None,\n constraint=None,\n synchronization=VariableSynchronization.AUTO,\n aggregation=VariableAggregation.NONE,\n shape=None):\n \"\"\"Creates a new variable with value `initial_value`.\n\n The new variable is added to the graph collections listed in `collections`,\n which defaults to `[GraphKeys.GLOBAL_VARIABLES]`.\n\n If `trainable` is `True` the variable is also added to the graph collection\n `GraphKeys.TRAINABLE_VARIABLES`.\n\n This constructor creates both a `variable` Op and an `assign` Op to set the\n variable to its initial value.\n\n Args:\n initial_value: A `Tensor`, or Python object convertible to a `Tensor`,\n which is the initial value for the Variable. The initial value must have\n a shape specified unless `validate_shape` is set to False. Can also be a\n callable with no argument that returns the initial value when called. In\n that case, `dtype` must be specified. (Note that initializer functions\n from init_ops.py must first be bound to a shape before being used here.)\n trainable: If `True`, GradientTapes automatically watch uses of this\n variable. Defaults to `True`, unless `synchronization` is set to\n `ON_READ`, in which case it defaults to `False`.\n validate_shape: If `False`, allows the variable to be initialized with a\n value of unknown shape. If `True`, the default, the shape of\n `initial_value` must be known.\n caching_device: Optional device string describing where the Variable\n should be cached for reading. Defaults to the Variable's device. If not\n `None`, caches on another device. Typical use is to cache on the device\n where the Ops using the Variable reside, to deduplicate copying through\n `Switch` and other conditional statements.\n name: Optional name for the variable. Defaults to `'Variable'` and gets\n uniquified automatically.\n variable_def: `VariableDef` protocol buffer. If not `None`, recreates the\n Variable object with its contents, referencing the variable's nodes in\n the graph, which must already exist. The graph is not changed.\n `variable_def` and the other arguments are mutually exclusive.\n dtype: If set, initial_value will be converted to the given type. If\n `None`, either the datatype will be kept (if `initial_value` is a\n Tensor), or `convert_to_tensor` will decide.\n import_scope: Optional `string`. Name scope to add to the `Variable.` Only\n used when initializing from protocol buffer.\n constraint: An optional projection function to be applied to the variable\n after being updated by an `Optimizer` (e.g. used to implement norm\n constraints or value constraints for layer weights). The function must\n take as input the unprojected Tensor representing the value of the\n variable and return the Tensor for the projected value (which must have\n the same shape). Constraints are not safe to use when doing asynchronous\n distributed training.\n synchronization: Indicates when a distributed a variable will be\n aggregated. Accepted values are constants defined in the class\n `tf.VariableSynchronization`. By default the synchronization is set to\n `AUTO` and the current `DistributionStrategy` chooses when to\n synchronize.\n aggregation: Indicates how a distributed variable will be aggregated.\n Accepted values are constants defined in the class\n `tf.VariableAggregation`.\n shape: (optional) The shape of this variable. If None, the shape of\n `initial_value` will be used. When setting this argument to\n `tf.TensorShape(None)` (representing an unspecified shape), the variable\n can be assigned with values of different shapes.\n\n Raises:\n ValueError: If both `variable_def` and initial_value are specified.\n ValueError: If the initial value is not specified, or does not have a\n shape and `validate_shape` is `True`.\n RuntimeError: If eager execution is enabled.\n \"\"\"\n raise NotImplementedError\n\n def __repr__(self):\n raise NotImplementedError\n\n def value(self):\n \"\"\"Returns the last snapshot of this variable.\n\n You usually do not need to call this method as all ops that need the value\n of the variable call it automatically through a `convert_to_tensor()` call.\n\n Returns a `Tensor` which holds the value of the variable. You can not\n assign a new value to this tensor as it is not a reference to the variable.\n\n To avoid copies, if the consumer of the returned value is on the same device\n as the variable, this actually returns the live value of the variable, not\n a copy. Updates to the variable are seen by the consumer. If the consumer\n is on a different device it will get a copy of the variable.\n\n Returns:\n A `Tensor` containing the value of the variable.\n \"\"\"\n raise NotImplementedError\n\n def read_value(self):\n \"\"\"Returns the value of this variable, read in the current context.\n\n Can be different from value() if it's on another device, with control\n dependencies, etc.\n\n Returns:\n A `Tensor` containing the value of the variable.\n \"\"\"\n raise NotImplementedError\n\n def set_shape(self, shape):\n \"\"\"Overrides the shape for this variable.\n\n Args:\n shape: the `TensorShape` representing the overridden shape.\n \"\"\"\n raise NotImplementedError\n\n @property\n def trainable(self):\n raise NotImplementedError\n\n @property\n def synchronization(self):\n raise NotImplementedError\n\n @property\n def aggregation(self):\n raise NotImplementedError\n\n def eval(self, session=None):\n \"\"\"In a session, computes and returns the value of this variable.\n\n This is not a graph construction method, it does not add ops to the graph.\n\n This convenience method requires a session where the graph\n containing this variable has been launched. If no session is\n passed, the default session is used. See `tf.compat.v1.Session` for more\n information on launching a graph and on sessions.\n\n ```python\n v = tf.Variable([1, 2])\n init = tf.compat.v1.global_variables_initializer()\n\n with tf.compat.v1.Session() as sess:\n sess.run(init)\n # Usage passing the session explicitly.\n print(v.eval(sess))\n # Usage with the default session. The 'with' block\n # above makes 'sess' the default session.\n print(v.eval())\n ```\n\n Args:\n session: The session to use to evaluate this variable. If none, the\n default session is used.\n\n Returns:\n A numpy `ndarray` with a copy of the value of this variable.\n \"\"\"\n raise NotImplementedError\n\n @deprecated(\n None, \"Use Variable.read_value. Variables in 2.X are initialized \"\n \"automatically both in eager and graph (inside tf.defun) contexts.\")\n def initialized_value(self):\n \"\"\"Returns the value of the initialized variable.\n\n You should use this instead of the variable itself to initialize another\n variable with a value that depends on the value of this variable.\n\n ```python\n # Initialize 'v' with a random tensor.\n v = tf.Variable(tf.random.truncated_normal([10, 40]))\n # Use `initialized_value` to guarantee that `v` has been\n # initialized before its value is used to initialize `w`.\n # The random values are picked only once.\n w = tf.Variable(v.initialized_value() * 2.0)\n ```\n\n Returns:\n A `Tensor` holding the value of this variable after its initializer\n has run.\n \"\"\"\n with ops.init_scope():\n return control_flow_ops.cond(\n is_variable_initialized(self), self.read_value,\n lambda: self.initial_value)\n\n @property\n def initial_value(self):\n \"\"\"Returns the Tensor used as the initial value for the variable.\n\n Note that this is different from `initialized_value()` which runs\n the op that initializes the variable before returning its value.\n This method returns the tensor that is used by the op that initializes\n the variable.\n\n Returns:\n A `Tensor`.\n \"\"\"\n raise NotImplementedError\n\n @property\n def constraint(self):\n \"\"\"Returns the constraint function associated with this variable.\n\n Returns:\n The constraint function that was passed to the variable constructor.\n Can be `None` if no constraint was passed.\n \"\"\"\n raise NotImplementedError\n\n def assign(self, value, use_locking=False, name=None, read_value=True):\n \"\"\"Assigns a new value to the variable.\n\n This is essentially a shortcut for `assign(self, value)`.\n\n Args:\n value: A `Tensor`. The new value for this variable.\n use_locking: If `True`, use locking during the assignment.\n name: The name of the operation to be created\n read_value: if True, will return something which evaluates to the new\n value of the variable; if False will return the assign op.\n\n Returns:\n A `Tensor` that will hold the new value of this variable after\n the assignment has completed.\n \"\"\"\n raise NotImplementedError\n\n def assign_add(self, delta, use_locking=False, name=None, read_value=True):\n \"\"\"Adds a value to this variable.\n\n This is essentially a shortcut for `assign_add(self, delta)`.\n\n Args:\n delta: A `Tensor`. The value to add to this variable.\n use_locking: If `True`, use locking during the operation.\n name: The name of the operation to be created\n read_value: if True, will return something which evaluates to the new\n value of the variable; if False will return the assign op.\n\n Returns:\n A `Tensor` that will hold the new value of this variable after\n the addition has completed.\n \"\"\"\n raise NotImplementedError\n\n def assign_sub(self, delta, use_locking=False, name=None, read_value=True):\n \"\"\"Subtracts a value from this variable.\n\n This is essentially a shortcut for `assign_sub(self, delta)`.\n\n Args:\n delta: A `Tensor`. The value to subtract from this variable.\n use_locking: If `True`, use locking during the operation.\n name: The name of the operation to be created\n read_value: if True, will return something which evaluates to the new\n value of the variable; if False will return the assign op.\n\n Returns:\n A `Tensor` that will hold the new value of this variable after\n the subtraction has completed.\n \"\"\"\n raise NotImplementedError\n\n def scatter_sub(self, sparse_delta, use_locking=False, name=None):\n \"\"\"Subtracts `tf.IndexedSlices` from this variable.\n\n Args:\n sparse_delta: `tf.IndexedSlices` to be subtracted from this variable.\n use_locking: If `True`, use locking during the operation.\n name: the name of the operation.\n\n Returns:\n A `Tensor` that will hold the new value of this variable after\n the scattered subtraction has completed.\n\n Raises:\n TypeError: if `sparse_delta` is not an `IndexedSlices`.\n \"\"\"\n raise NotImplementedError\n\n def scatter_add(self, sparse_delta, use_locking=False, name=None):\n \"\"\"Adds `tf.IndexedSlices` to this variable.\n\n Args:\n sparse_delta: `tf.IndexedSlices` to be added to this variable.\n use_locking: If `True`, use locking during the operation.\n name: the name of the operation.\n\n Returns:\n A `Tensor` that will hold the new value of this variable after\n the scattered addition has completed.\n\n Raises:\n TypeError: if `sparse_delta` is not an `IndexedSlices`.\n \"\"\"\n raise NotImplementedError\n\n def scatter_max(self, sparse_delta, use_locking=False, name=None):\n \"\"\"Updates this variable with the max of `tf.IndexedSlices` and itself.\n\n Args:\n sparse_delta: `tf.IndexedSlices` to use as an argument of max with this\n variable.\n use_locking: If `True`, use locking during the operation.\n name: the name of the operation.\n\n Returns:\n A `Tensor` that will hold the new value of this variable after\n the scattered maximization has completed.\n\n Raises:\n TypeError: if `sparse_delta` is not an `IndexedSlices`.\n \"\"\"\n raise NotImplementedError\n\n def scatter_min(self, sparse_delta, use_locking=False, name=None):\n \"\"\"Updates this variable with the min of `tf.IndexedSlices` and itself.\n\n Args:\n sparse_delta: `tf.IndexedSlices` to use as an argument of min with this\n variable.\n use_locking: If `True`, use locking during the operation.\n name: the name of the operation.\n\n Returns:\n A `Tensor` that will hold the new value of this variable after\n the scattered minimization has completed.\n\n Raises:\n TypeError: if `sparse_delta` is not an `IndexedSlices`.\n \"\"\"\n raise NotImplementedError\n\n def scatter_mul(self, sparse_delta, use_locking=False, name=None):\n \"\"\"Multiply this variable by `tf.IndexedSlices`.\n\n Args:\n sparse_delta: `tf.IndexedSlices` to multiply this variable by.\n use_locking: If `True`, use locking during the operation.\n name: the name of the operation.\n\n Returns:\n A `Tensor` that will hold the new value of this variable after\n the scattered multiplication has completed.\n\n Raises:\n TypeError: if `sparse_delta` is not an `IndexedSlices`.\n \"\"\"\n raise NotImplementedError\n\n def scatter_div(self, sparse_delta, use_locking=False, name=None):\n \"\"\"Divide this variable by `tf.IndexedSlices`.\n\n Args:\n sparse_delta: `tf.IndexedSlices` to divide this variable by.\n use_locking: If `True`, use locking during the operation.\n name: the name of the operation.\n\n Returns:\n A `Tensor` that will hold the new value of this variable after\n the scattered division has completed.\n\n Raises:\n TypeError: if `sparse_delta` is not an `IndexedSlices`.\n \"\"\"\n raise NotImplementedError\n\n def scatter_update(self, sparse_delta, use_locking=False, name=None):\n \"\"\"Assigns `tf.IndexedSlices` to this variable.\n\n Args:\n sparse_delta: `tf.IndexedSlices` to be assigned to this variable.\n use_locking: If `True`, use locking during the operation.\n name: the name of the operation.\n\n Returns:\n A `Tensor` that will hold the new value of this variable after\n the scattered assignment has completed.\n\n Raises:\n TypeError: if `sparse_delta` is not an `IndexedSlices`.\n \"\"\"\n raise NotImplementedError\n\n def batch_scatter_update(self, sparse_delta, use_locking=False, name=None):\n \"\"\"Assigns `tf.IndexedSlices` to this variable batch-wise.\n\n Analogous to `batch_gather`. This assumes that this variable and the\n sparse_delta IndexedSlices have a series of leading dimensions that are the\n same for all of them, and the updates are performed on the last dimension of\n indices. In other words, the dimensions should be the following:\n\n `num_prefix_dims = sparse_delta.indices.ndims - 1`\n `batch_dim = num_prefix_dims + 1`\n `sparse_delta.updates.shape = sparse_delta.indices.shape + var.shape[\n batch_dim:]`\n\n where\n\n `sparse_delta.updates.shape[:num_prefix_dims]`\n `== sparse_delta.indices.shape[:num_prefix_dims]`\n `== var.shape[:num_prefix_dims]`\n\n And the operation performed can be expressed as:\n\n `var[i_1, ..., i_n,\n sparse_delta.indices[i_1, ..., i_n, j]] = sparse_delta.updates[\n i_1, ..., i_n, j]`\n\n When sparse_delta.indices is a 1D tensor, this operation is equivalent to\n `scatter_update`.\n\n To avoid this operation one can looping over the first `ndims` of the\n variable and using `scatter_update` on the subtensors that result of slicing\n the first dimension. This is a valid option for `ndims = 1`, but less\n efficient than this implementation.\n\n Args:\n sparse_delta: `tf.IndexedSlices` to be assigned to this variable.\n use_locking: If `True`, use locking during the operation.\n name: the name of the operation.\n\n Returns:\n A `Tensor` that will hold the new value of this variable after\n the scattered assignment has completed.\n\n Raises:\n TypeError: if `sparse_delta` is not an `IndexedSlices`.\n \"\"\"\n raise NotImplementedError\n\n def scatter_nd_sub(self, indices, updates, name=None):\n \"\"\"Applies sparse subtraction to individual values or slices in a Variable.\n\n Assuming the variable has rank `P` and `indices` is a `Tensor` of rank `Q`.\n\n `indices` must be integer tensor, containing indices into self.\n It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.\n\n The innermost dimension of `indices` (with length `K`) corresponds to\n indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th\n dimension of self.\n\n `updates` is `Tensor` of rank `Q-1+P-K` with shape:\n\n ```\n [d_0, ..., d_{Q-2}, self.shape[K], ..., self.shape[P-1]].\n ```\n\n For example, say we want to add 4 scattered elements to a rank-1 tensor to\n 8 elements. In Python, that update would look like this:\n\n ```python\n v = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])\n indices = tf.constant([[4], [3], [1] ,[7]])\n updates = tf.constant([9, 10, 11, 12])\n op = v.scatter_nd_sub(indices, updates)\n with tf.compat.v1.Session() as sess:\n print sess.run(op)\n ```\n\n The resulting update to v would look like this:\n\n [1, -9, 3, -6, -6, 6, 7, -4]\n\n See `tf.scatter_nd` for more details about how to make updates to\n slices.\n\n Args:\n indices: The indices to be used in the operation.\n updates: The values to be used in the operation.\n name: the name of the operation.\n\n Returns:\n A `Tensor` that will hold the new value of this variable after\n the scattered subtraction has completed.\n \"\"\"\n raise NotImplementedError\n\n def scatter_nd_add(self, indices, updates, name=None):\n \"\"\"Applies sparse addition to individual values or slices in a Variable.\n\n The Variable has rank `P` and `indices` is a `Tensor` of rank `Q`.\n\n `indices` must be integer tensor, containing indices into self.\n It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.\n\n The innermost dimension of `indices` (with length `K`) corresponds to\n indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th\n dimension of self.\n\n `updates` is `Tensor` of rank `Q-1+P-K` with shape:\n\n ```\n [d_0, ..., d_{Q-2}, self.shape[K], ..., self.shape[P-1]].\n ```\n\n For example, say we want to add 4 scattered elements to a rank-1 tensor to\n 8 elements. In Python, that update would look like this:\n\n ```python\n v = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])\n indices = tf.constant([[4], [3], [1] ,[7]])\n updates = tf.constant([9, 10, 11, 12])\n add = v.scatter_nd_add(indices, updates)\n with tf.compat.v1.Session() as sess:\n print sess.run(add)\n ```\n\n The resulting update to v would look like this:\n\n [1, 13, 3, 14, 14, 6, 7, 20]\n\n See `tf.scatter_nd` for more details about how to make updates to\n slices.\n\n Args:\n indices: The indices to be used in the operation.\n updates: The values to be used in the operation.\n name: the name of the operation.\n\n Returns:\n A `Tensor` that will hold the new value of this variable after\n the scattered addition has completed.\n \"\"\"\n raise NotImplementedError\n\n def scatter_nd_update(self, indices, updates, name=None):\n \"\"\"Applies sparse assignment to individual values or slices in a Variable.\n\n The Variable has rank `P` and `indices` is a `Tensor` of rank `Q`.\n\n `indices` must be integer tensor, containing indices into self.\n It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.\n\n The innermost dimension of `indices` (with length `K`) corresponds to\n indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th\n dimension of self.\n\n `updates` is `Tensor` of rank `Q-1+P-K` with shape:\n\n ```\n [d_0, ..., d_{Q-2}, self.shape[K], ..., self.shape[P-1]].\n ```\n\n For example, say we want to add 4 scattered elements to a rank-1 tensor to\n 8 elements. In Python, that update would look like this:\n\n ```python\n v = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])\n indices = tf.constant([[4], [3], [1] ,[7]])\n updates = tf.constant([9, 10, 11, 12])\n op = v.scatter_nd_assign(indices, updates)\n with tf.compat.v1.Session() as sess:\n print sess.run(op)\n ```\n\n The resulting update to v would look like this:\n\n [1, 11, 3, 10, 9, 6, 7, 12]\n\n See `tf.scatter_nd` for more details about how to make updates to\n slices.\n\n Args:\n indices: The indices to be used in the operation.\n updates: The values to be used in the operation.\n name: the name of the operation.\n\n Returns:\n A `Tensor` that will hold the new value of this variable after\n the scattered assignment has completed.\n \"\"\"\n raise NotImplementedError\n\n def sparse_read(self, indices, name=None):\n r\"\"\"Gather slices from params axis axis according to indices.\n\n This function supports a subset of tf.gather, see tf.gather for details on\n usage.\n\n Args:\n indices: The index `Tensor`. Must be one of the following types: `int32`,\n `int64`. Must be in range `[0, params.shape[axis])`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor`. Has the same type as `params`.\n \"\"\"\n raise AttributeError\n\n def gather_nd(self, indices, name=None):\n r\"\"\"Gather slices from `params` into a Tensor with shape specified by `indices`.\n\n See tf.gather_nd for details.\n\n Args:\n indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.\n Index tensor.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor`. Has the same type as `params`.\n \"\"\"\n raise AttributeError\n\n @deprecated(None, \"Prefer Dataset.range instead.\")\n def count_up_to(self, limit):\n \"\"\"Increments this variable until it reaches `limit`.\n\n When that Op is run it tries to increment the variable by `1`. If\n incrementing the variable would bring it above `limit` then the Op raises\n the exception `OutOfRangeError`.\n\n If no error is raised, the Op outputs the value of the variable before\n the increment.\n\n This is essentially a shortcut for `count_up_to(self, limit)`.\n\n Args:\n limit: value at which incrementing the variable raises an error.\n\n Returns:\n A `Tensor` that will hold the variable value before the increment. If no\n other Op modifies this variable, the values produced will all be\n distinct.\n \"\"\"\n raise NotImplementedError\n\n @deprecated(None,\n \"Prefer Variable.assign which has equivalent behavior in 2.X.\")\n def load(self, value, session=None):\n \"\"\"Load new value into this variable.\n\n Writes new value to variable's memory. Doesn't add ops to the graph.\n\n This convenience method requires a session where the graph\n containing this variable has been launched. If no session is\n passed, the default session is used. See `tf.compat.v1.Session` for more\n information on launching a graph and on sessions.\n\n ```python\n v = tf.Variable([1, 2])\n init = tf.compat.v1.global_variables_initializer()\n\n with tf.compat.v1.Session() as sess:\n sess.run(init)\n # Usage passing the session explicitly.\n v.load([2, 3], sess)\n print(v.eval(sess)) # prints [2 3]\n # Usage with the default session. The 'with' block\n # above makes 'sess' the default session.\n v.load([3, 4], sess)\n print(v.eval()) # prints [3 4]\n ```\n\n Args:\n value: New variable value\n session: The session to use to evaluate this variable. If none, the\n default session is used.\n\n Raises:\n ValueError: Session is not passed and no default session\n \"\"\"\n if context.executing_eagerly():\n self.assign(value)\n else:\n session = session or ops.get_default_session()\n if session is None:\n raise ValueError(\n \"Either session argument should be provided or default session \"\n \"should be established\")\n session.run(self.initializer, {self.initializer.inputs[1]: value})\n\n # Conversion to tensor.\n @staticmethod\n def _TensorConversionFunction(v, dtype=None, name=None, as_ref=False): # pylint: disable=invalid-name\n \"\"\"Utility function for converting a Variable to a Tensor.\"\"\"\n _ = name\n if dtype and not dtype.is_compatible_with(v.dtype):\n raise ValueError(\n \"Incompatible type conversion requested to type '%s' for variable \"\n \"of type '%s'\" % (dtype.name, v.dtype.name))\n if as_ref:\n return v._ref() # pylint: disable=protected-access\n else:\n return v.value()\n\n @classmethod\n def _OverloadAllOperators(cls): # pylint: disable=invalid-name\n \"\"\"Register overloads for all operators.\"\"\"\n for operator in ops.Tensor.OVERLOADABLE_OPERATORS:\n cls._OverloadOperator(operator)\n # For slicing, bind getitem differently than a tensor (use SliceHelperVar\n # instead)\n # pylint: disable=protected-access\n setattr(cls, \"__getitem__\", array_ops._SliceHelperVar)\n\n @classmethod\n def _OverloadOperator(cls, operator): # pylint: disable=invalid-name\n \"\"\"Defer an operator overload to `ops.Tensor`.\n\n We pull the operator out of ops.Tensor dynamically to avoid ordering issues.\n\n Args:\n operator: string. The operator name.\n \"\"\"\n # We can't use the overload mechanism on __eq__ & __ne__ since __eq__ is\n # called when adding a variable to sets. As a result we call a.value() which\n # causes infinite recursion when operating within a GradientTape\n # TODO(gjn): Consider removing this\n if operator == \"__eq__\" or operator == \"__ne__\":\n return\n\n tensor_oper = getattr(ops.Tensor, operator)\n\n def _run_op(a, *args, **kwargs):\n # pylint: disable=protected-access\n return tensor_oper(a.value(), *args, **kwargs)\n\n functools.update_wrapper(_run_op, tensor_oper)\n setattr(cls, operator, _run_op)\n\n def __hash__(self):\n if ops.Tensor._USE_EQUALITY and ops.executing_eagerly_outside_functions(): # pylint: disable=protected-access\n raise TypeError(\"Variable is unhashable if Tensor equality is enabled. \"\n \"Instead, use tensor.experimental_ref() as the key.\")\n else:\n return id(self)\n\n # TODO(gjn): duplicate of math_ops.tensor_equals, consider removing\n def __eq__(self, other):\n \"\"\"Compares two variables element-wise for equality.\"\"\"\n if ops.Tensor._USE_EQUALITY and ops.executing_eagerly_outside_functions(): # pylint: disable=protected-access\n return gen_math_ops.equal(self, other)\n else:\n # In legacy graph mode, tensor equality is object equality\n return self is other\n\n # TODO(gjn): duplicate of math_ops.tensor_not_equals, consider removing\n def __ne__(self, other):\n \"\"\"Compares two variables element-wise for equality.\"\"\"\n if ops.Tensor._USE_EQUALITY and ops.executing_eagerly_outside_functions(): # pylint: disable=protected-access\n return gen_math_ops.not_equal(self, other)\n else:\n # In legacy graph mode, tensor equality is object equality\n return self is not other\n\n def __iter__(self):\n \"\"\"Dummy method to prevent iteration.\n\n Do not call.\n\n NOTE(mrry): If we register __getitem__ as an overloaded operator,\n Python will valiantly attempt to iterate over the variable's Tensor from 0\n to infinity. Declaring this method prevents this unintended behavior.\n\n Raises:\n TypeError: when invoked.\n \"\"\"\n raise TypeError(\"'Variable' object is not iterable.\")\n\n # NOTE(mrry): This enables the Variable's overloaded \"right\" binary\n # operators to run when the left operand is an ndarray, because it\n # accords the Variable class higher priority than an ndarray, or a\n # numpy matrix.\n # TODO(mrry): Convert this to using numpy's __numpy_ufunc__\n # mechanism, which allows more control over how Variables interact\n # with ndarrays.\n __array_priority__ = 100\n\n @property\n def name(self):\n \"\"\"The name of this variable.\"\"\"\n raise NotImplementedError\n\n @property\n def _shared_name(self):\n \"\"\"The shared name of the variable.\n\n Unlike name(), shared_name doesn't have \":0\" suffix. It is user-specified\n name with name scope prefix.\n\n Returns:\n variable name.\n \"\"\"\n return self.name[:self.name.index(\":\")]\n\n @property\n def initializer(self):\n \"\"\"The initializer operation for this variable.\"\"\"\n raise NotImplementedError\n\n @property\n def device(self):\n \"\"\"The device of this variable.\"\"\"\n raise NotImplementedError\n\n @property\n def dtype(self):\n \"\"\"The `DType` of this variable.\"\"\"\n raise NotImplementedError\n\n @property\n def op(self):\n \"\"\"The `Operation` of this variable.\"\"\"\n raise NotImplementedError\n\n @property\n def graph(self):\n \"\"\"The `Graph` of this variable.\"\"\"\n raise NotImplementedError\n\n @property\n def shape(self):\n \"\"\"The `TensorShape` of this variable.\n\n Returns:\n A `TensorShape`.\n \"\"\"\n raise NotImplementedError\n\n def get_shape(self):\n \"\"\"Alias of `Variable.shape`.\"\"\"\n return self.shape\n\n def _gather_saveables_for_checkpoint(self):\n \"\"\"For implementing `Trackable`. This object is saveable on its own.\"\"\"\n return {trackable.VARIABLE_VALUE_KEY: self}\n\n def to_proto(self, export_scope=None):\n \"\"\"Converts a `Variable` to a `VariableDef` protocol buffer.\n\n Args:\n export_scope: Optional `string`. Name scope to remove.\n\n Returns:\n A `VariableDef` protocol buffer, or `None` if the `Variable` is not\n in the specified name scope.\n \"\"\"\n raise NotImplementedError\n\n @staticmethod\n def from_proto(variable_def, import_scope=None):\n \"\"\"Returns a `Variable` object created from `variable_def`.\"\"\"\n return RefVariable(variable_def=variable_def, import_scope=import_scope)\n\n def _set_save_slice_info(self, save_slice_info):\n \"\"\"Sets the slice info for this `Variable`.\n\n Args:\n save_slice_info: A `Variable.SaveSliceInfo` object.\n \"\"\"\n self._save_slice_info = save_slice_info\n\n def _get_save_slice_info(self):\n return self._save_slice_info\n\n def experimental_ref(self):\n # tf.Tensor also has the same experimental_ref() API. If you update the\n # documenation here, please update tf.Tensor.experimental_ref() as well.\n \"\"\"Returns a hashable reference object to this Variable.\n\n Warning: Experimental API that could be changed or removed.\n\n The primary usecase for this API is to put variables in a set/dictionary.\n We can't put variables in a set/dictionary as `variable.__hash__()` is no\n longer available starting Tensorflow 2.0.\n\n ```python\n import tensorflow as tf\n\n x = tf.Variable(5)\n y = tf.Variable(10)\n z = tf.Variable(10)\n\n # The followings will raise an exception starting 2.0\n # TypeError: Variable is unhashable if Variable equality is enabled.\n variable_set = {x, y, z}\n variable_dict = {x: 'five', y: 'ten'}\n ```\n\n Instead, we can use `variable.experimental_ref()`.\n\n ```python\n variable_set = {x.experimental_ref(),\n y.experimental_ref(),\n z.experimental_ref()}\n\n print(x.experimental_ref() in variable_set)\n ==> True\n\n variable_dict = {x.experimental_ref(): 'five',\n y.experimental_ref(): 'ten',\n z.experimental_ref(): 'ten'}\n\n print(variable_dict[y.experimental_ref()])\n ==> ten\n ```\n\n Also, the reference object provides `.deref()` function that returns the\n original Variable.\n\n ```python\n x = tf.Variable(5)\n print(x.experimental_ref().deref())\n ==> <tf.Variable 'Variable:0' shape=() dtype=int32, numpy=5>\n ```\n \"\"\"\n return object_identity.Reference(self)\n\n class SaveSliceInfo(object):\n \"\"\"Information on how to save this Variable as a slice.\n\n Provides internal support for saving variables as slices of a larger\n variable. This API is not public and is subject to change.\n\n Available properties:\n\n * full_name\n * full_shape\n * var_offset\n * var_shape\n \"\"\"\n\n def __init__(self,\n full_name=None,\n full_shape=None,\n var_offset=None,\n var_shape=None,\n save_slice_info_def=None,\n import_scope=None):\n \"\"\"Create a `SaveSliceInfo`.\n\n Args:\n full_name: Name of the full variable of which this `Variable` is a\n slice.\n full_shape: Shape of the full variable, as a list of int.\n var_offset: Offset of this `Variable` into the full variable, as a list\n of int.\n var_shape: Shape of this `Variable`, as a list of int.\n save_slice_info_def: `SaveSliceInfoDef` protocol buffer. If not `None`,\n recreates the SaveSliceInfo object its contents. `save_slice_info_def`\n and other arguments are mutually exclusive.\n import_scope: Optional `string`. Name scope to add. Only used when\n initializing from protocol buffer.\n \"\"\"\n if save_slice_info_def:\n assert isinstance(save_slice_info_def, variable_pb2.SaveSliceInfoDef)\n self.full_name = ops.prepend_name_scope(\n save_slice_info_def.full_name, import_scope=import_scope)\n self.full_shape = [i for i in save_slice_info_def.full_shape]\n self.var_offset = [i for i in save_slice_info_def.var_offset]\n self.var_shape = [i for i in save_slice_info_def.var_shape]\n else:\n self.full_name = full_name\n self.full_shape = full_shape\n self.var_offset = var_offset\n self.var_shape = var_shape\n\n @property\n def spec(self):\n \"\"\"Computes the spec string used for saving.\"\"\"\n full_shape_str = \" \".join([\"%d\" % d for d in self.full_shape]) + \" \"\n sl_spec = \":\".join(\n [\"%d,%d\" % (o, s) for o, s in zip(self.var_offset, self.var_shape)])\n return full_shape_str + sl_spec\n\n def to_proto(self, export_scope=None):\n \"\"\"Returns a SaveSliceInfoDef() proto.\n\n Args:\n export_scope: Optional `string`. Name scope to remove.\n\n Returns:\n A `SaveSliceInfoDef` protocol buffer, or None if the `Variable` is not\n in the specified name scope.\n \"\"\"\n if (export_scope is None or self.full_name.startswith(export_scope)):\n save_slice_info_def = variable_pb2.SaveSliceInfoDef()\n save_slice_info_def.full_name = ops.strip_name_scope(\n self.full_name, export_scope)\n for i in self.full_shape:\n save_slice_info_def.full_shape.append(i)\n for i in self.var_offset:\n save_slice_info_def.var_offset.append(i)\n for i in self.var_shape:\n save_slice_info_def.var_shape.append(i)\n return save_slice_info_def\n else:\n return None\n\n\nVariable._OverloadAllOperators() # pylint: disable=protected-access\n_pywrap_utils.RegisterType(\"Variable\", Variable)\n\n\n@tf_export(v1=[\"Variable\"])\nclass VariableV1(Variable):\n \"\"\"See the [Variables Guide](https://tensorflow.org/guide/variables).\n\n A variable maintains state in the graph across calls to `run()`. You add a\n variable to the graph by constructing an instance of the class `Variable`.\n\n The `Variable()` constructor requires an initial value for the variable,\n which can be a `Tensor` of any type and shape. The initial value defines the\n type and shape of the variable. After construction, the type and shape of\n the variable are fixed. The value can be changed using one of the assign\n methods.\n\n If you want to change the shape of a variable later you have to use an\n `assign` Op with `validate_shape=False`.\n\n Just like any `Tensor`, variables created with `Variable()` can be used as\n inputs for other Ops in the graph. Additionally, all the operators\n overloaded for the `Tensor` class are carried over to variables, so you can\n also add nodes to the graph by just doing arithmetic on variables.\n\n ```python\n import tensorflow as tf\n\n # Create a variable.\n w = tf.Variable(<initial-value>, name=<optional-name>)\n\n # Use the variable in the graph like any Tensor.\n y = tf.matmul(w, ...another variable or tensor...)\n\n # The overloaded operators are available too.\n z = tf.sigmoid(w + y)\n\n # Assign a new value to the variable with `assign()` or a related method.\n w.assign(w + 1.0)\n w.assign_add(1.0)\n ```\n\n When you launch the graph, variables have to be explicitly initialized before\n you can run Ops that use their value. You can initialize a variable by\n running its *initializer op*, restoring the variable from a save file, or\n simply running an `assign` Op that assigns a value to the variable. In fact,\n the variable *initializer op* is just an `assign` Op that assigns the\n variable's initial value to the variable itself.\n\n ```python\n # Launch the graph in a session.\n with tf.compat.v1.Session() as sess:\n # Run the variable initializer.\n sess.run(w.initializer)\n # ...you now can run ops that use the value of 'w'...\n ```\n\n The most common initialization pattern is to use the convenience function\n `global_variables_initializer()` to add an Op to the graph that initializes\n all the variables. You then run that Op after launching the graph.\n\n ```python\n # Add an Op to initialize global variables.\n init_op = tf.compat.v1.global_variables_initializer()\n\n # Launch the graph in a session.\n with tf.compat.v1.Session() as sess:\n # Run the Op that initializes global variables.\n sess.run(init_op)\n # ...you can now run any Op that uses variable values...\n ```\n\n If you need to create a variable with an initial value dependent on another\n variable, use the other variable's `initialized_value()`. This ensures that\n variables are initialized in the right order.\n\n All variables are automatically collected in the graph where they are\n created. By default, the constructor adds the new variable to the graph\n collection `GraphKeys.GLOBAL_VARIABLES`. The convenience function\n `global_variables()` returns the contents of that collection.\n\n When building a machine learning model it is often convenient to distinguish\n between variables holding the trainable model parameters and other variables\n such as a `global step` variable used to count training steps. To make this\n easier, the variable constructor supports a `trainable=<bool>` parameter. If\n `True`, the new variable is also added to the graph collection\n `GraphKeys.TRAINABLE_VARIABLES`. The convenience function\n `trainable_variables()` returns the contents of this collection. The\n various `Optimizer` classes use this collection as the default list of\n variables to optimize.\n\n WARNING: tf.Variable objects by default have a non-intuitive memory model. A\n Variable is represented internally as a mutable Tensor which can\n non-deterministically alias other Tensors in a graph. The set of operations\n which consume a Variable and can lead to aliasing is undetermined and can\n change across TensorFlow versions. Avoid writing code which relies on the\n value of a Variable either changing or not changing as other operations\n happen. For example, using Variable objects or simple functions thereof as\n predicates in a `tf.cond` is dangerous and error-prone:\n\n ```\n v = tf.Variable(True)\n tf.cond(v, lambda: v.assign(False), my_false_fn) # Note: this is broken.\n ```\n\n Here, adding `use_resource=True` when constructing the variable will\n fix any nondeterminism issues:\n ```\n v = tf.Variable(True, use_resource=True)\n tf.cond(v, lambda: v.assign(False), my_false_fn)\n ```\n\n To use the replacement for variables which does\n not have these issues:\n\n * Add `use_resource=True` when constructing `tf.Variable`;\n * Call `tf.compat.v1.get_variable_scope().set_use_resource(True)` inside a\n `tf.compat.v1.variable_scope` before the `tf.compat.v1.get_variable()` call.\n \"\"\"\n\n def __init__(\n self, # pylint: disable=super-init-not-called\n initial_value=None,\n trainable=None,\n collections=None,\n validate_shape=True,\n caching_device=None,\n name=None,\n variable_def=None,\n dtype=None,\n expected_shape=None,\n import_scope=None,\n constraint=None,\n use_resource=None,\n synchronization=VariableSynchronization.AUTO,\n aggregation=VariableAggregation.NONE,\n shape=None):\n \"\"\"Creates a new variable with value `initial_value`.\n\n The new variable is added to the graph collections listed in `collections`,\n which defaults to `[GraphKeys.GLOBAL_VARIABLES]`.\n\n If `trainable` is `True` the variable is also added to the graph collection\n `GraphKeys.TRAINABLE_VARIABLES`.\n\n This constructor creates both a `variable` Op and an `assign` Op to set the\n variable to its initial value.\n\n Args:\n initial_value: A `Tensor`, or Python object convertible to a `Tensor`,\n which is the initial value for the Variable. The initial value must have\n a shape specified unless `validate_shape` is set to False. Can also be a\n callable with no argument that returns the initial value when called. In\n that case, `dtype` must be specified. (Note that initializer functions\n from init_ops.py must first be bound to a shape before being used here.)\n trainable: If `True`, also adds the variable to the graph collection\n `GraphKeys.TRAINABLE_VARIABLES`. This collection is used as the default\n list of variables to use by the `Optimizer` classes. Defaults to `True`,\n unless `synchronization` is set to `ON_READ`, in which case it defaults\n to `False`.\n collections: List of graph collections keys. The new variable is added to\n these collections. Defaults to `[GraphKeys.GLOBAL_VARIABLES]`.\n validate_shape: If `False`, allows the variable to be initialized with a\n value of unknown shape. If `True`, the default, the shape of\n `initial_value` must be known.\n caching_device: Optional device string describing where the Variable\n should be cached for reading. Defaults to the Variable's device. If not\n `None`, caches on another device. Typical use is to cache on the device\n where the Ops using the Variable reside, to deduplicate copying through\n `Switch` and other conditional statements.\n name: Optional name for the variable. Defaults to `'Variable'` and gets\n uniquified automatically.\n variable_def: `VariableDef` protocol buffer. If not `None`, recreates the\n Variable object with its contents, referencing the variable's nodes in\n the graph, which must already exist. The graph is not changed.\n `variable_def` and the other arguments are mutually exclusive.\n dtype: If set, initial_value will be converted to the given type. If\n `None`, either the datatype will be kept (if `initial_value` is a\n Tensor), or `convert_to_tensor` will decide.\n expected_shape: A TensorShape. If set, initial_value is expected to have\n this shape.\n import_scope: Optional `string`. Name scope to add to the `Variable.` Only\n used when initializing from protocol buffer.\n constraint: An optional projection function to be applied to the variable\n after being updated by an `Optimizer` (e.g. used to implement norm\n constraints or value constraints for layer weights). The function must\n take as input the unprojected Tensor representing the value of the\n variable and return the Tensor for the projected value (which must have\n the same shape). Constraints are not safe to use when doing asynchronous\n distributed training.\n use_resource: whether to use resource variables.\n synchronization: Indicates when a distributed a variable will be\n aggregated. Accepted values are constants defined in the class\n `tf.VariableSynchronization`. By default the synchronization is set to\n `AUTO` and the current `DistributionStrategy` chooses when to\n synchronize.\n aggregation: Indicates how a distributed variable will be aggregated.\n Accepted values are constants defined in the class\n `tf.VariableAggregation`.\n shape: (optional) The shape of this variable. If None, the shape of\n `initial_value` will be used. When setting this argument to\n `tf.TensorShape(None)` (representing an unspecified shape), the variable\n can be assigned with values of different shapes.\n\n Raises:\n ValueError: If both `variable_def` and initial_value are specified.\n ValueError: If the initial value is not specified, or does not have a\n shape and `validate_shape` is `True`.\n RuntimeError: If eager execution is enabled.\n \"\"\"\n\n SaveSliceInfo = Variable.SaveSliceInfo\n\n\n# TODO(apassos): do not repeat all comments here\nclass RefVariable(VariableV1):\n \"\"\"Ref-based implementation of variables.\"\"\"\n\n def __init__(\n self, # pylint: disable=super-init-not-called\n initial_value=None,\n trainable=None,\n collections=None,\n validate_shape=True,\n caching_device=None,\n name=None,\n variable_def=None,\n dtype=None,\n expected_shape=None,\n import_scope=None,\n constraint=None,\n synchronization=None,\n aggregation=None,\n shape=None):\n \"\"\"Creates a new variable with value `initial_value`.\n\n The new variable is added to the graph collections listed in `collections`,\n which defaults to `[GraphKeys.GLOBAL_VARIABLES]`.\n\n If `trainable` is `True` the variable is also added to the graph collection\n `GraphKeys.TRAINABLE_VARIABLES`.\n\n This constructor creates both a `variable` Op and an `assign` Op to set the\n variable to its initial value.\n\n Args:\n initial_value: A `Tensor`, or Python object convertible to a `Tensor`,\n which is the initial value for the Variable. The initial value must have\n a shape specified unless `validate_shape` is set to False. Can also be a\n callable with no argument that returns the initial value when called. In\n that case, `dtype` must be specified. (Note that initializer functions\n from init_ops.py must first be bound to a shape before being used here.)\n trainable: If `True`, also adds the variable to the graph collection\n `GraphKeys.TRAINABLE_VARIABLES`. This collection is used as the default\n list of variables to use by the `Optimizer` classes. Defaults to `True`,\n unless `synchronization` is set to `ON_READ`, in which case it defaults\n to `False`.\n collections: List of graph collections keys. The new variable is added to\n these collections. Defaults to `[GraphKeys.GLOBAL_VARIABLES]`.\n validate_shape: If `False`, allows the variable to be initialized with a\n value of unknown shape. If `True`, the default, the shape of\n `initial_value` must be known.\n caching_device: Optional device string describing where the Variable\n should be cached for reading. Defaults to the Variable's device. If not\n `None`, caches on another device. Typical use is to cache on the device\n where the Ops using the Variable reside, to deduplicate copying through\n `Switch` and other conditional statements.\n name: Optional name for the variable. Defaults to `'Variable'` and gets\n uniquified automatically.\n variable_def: `VariableDef` protocol buffer. If not `None`, recreates the\n Variable object with its contents, referencing the variable's nodes in\n the graph, which must already exist. The graph is not changed.\n `variable_def` and the other arguments are mutually exclusive.\n dtype: If set, initial_value will be converted to the given type. If\n `None`, either the datatype will be kept (if `initial_value` is a\n Tensor), or `convert_to_tensor` will decide.\n expected_shape: A TensorShape. If set, initial_value is expected to have\n this shape.\n import_scope: Optional `string`. Name scope to add to the `Variable.` Only\n used when initializing from protocol buffer.\n constraint: An optional projection function to be applied to the variable\n after being updated by an `Optimizer` (e.g. used to implement norm\n constraints or value constraints for layer weights). The function must\n take as input the unprojected Tensor representing the value of the\n variable and return the Tensor for the projected value (which must have\n the same shape). Constraints are not safe to use when doing asynchronous\n distributed training.\n synchronization: Indicates when a distributed a variable will be\n aggregated. Accepted values are constants defined in the class\n `tf.VariableSynchronization`. By default the synchronization is set to\n `AUTO` and the current `DistributionStrategy` chooses when to\n synchronize.\n aggregation: Indicates how a distributed variable will be aggregated.\n Accepted values are constants defined in the class\n `tf.VariableAggregation`.\n shape: (optional) The shape of this variable. If None, the shape of\n `initial_value` will be used. When setting this argument to\n `tf.TensorShape(None)` (representing an unspecified shape), the variable\n can be assigned with values of different shapes.\n\n Raises:\n ValueError: If both `variable_def` and initial_value are specified.\n ValueError: If the initial value is not specified, or does not have a\n shape and `validate_shape` is `True`.\n RuntimeError: If eager execution is enabled.\n \"\"\"\n self._in_graph_mode = True\n if variable_def:\n # If variable_def is provided, recreates the variable from its fields.\n if initial_value:\n raise ValueError(\"variable_def and initial_value are mutually \"\n \"exclusive.\")\n self._init_from_proto(variable_def, import_scope=import_scope)\n else:\n # Create from initial_value.\n self._init_from_args(\n initial_value=initial_value,\n trainable=trainable,\n collections=collections,\n validate_shape=validate_shape,\n caching_device=caching_device,\n name=name,\n dtype=dtype,\n expected_shape=expected_shape,\n constraint=constraint,\n synchronization=synchronization,\n aggregation=aggregation,\n shape=shape)\n\n def __repr__(self):\n if context.executing_eagerly() and not self._in_graph_mode:\n return \"<tf.Variable '%s' shape=%s dtype=%s, numpy=%s>\" % (\n self.name, self.get_shape(), self.dtype.name,\n ops.numpy_text(self.read_value(), is_repr=True))\n else:\n return \"<tf.Variable '%s' shape=%s dtype=%s>\" % (\n self.name, self.get_shape(), self.dtype.name)\n\n def _init_from_args(self,\n initial_value=None,\n trainable=None,\n collections=None,\n validate_shape=True,\n caching_device=None,\n name=None,\n dtype=None,\n expected_shape=None,\n constraint=None,\n synchronization=None,\n aggregation=None,\n shape=None):\n \"\"\"Creates a new variable from arguments.\n\n Args:\n initial_value: A `Tensor`, or Python object convertible to a `Tensor`,\n which is the initial value for the Variable. The initial value must have\n a shape specified unless `validate_shape` is set to False. Can also be a\n callable with no argument that returns the initial value when called.\n (Note that initializer functions from init_ops.py must first be bound to\n a shape before being used here.)\n trainable: If `True`, also adds the variable to the graph collection\n `GraphKeys.TRAINABLE_VARIABLES`. This collection is used as the default\n list of variables to use by the `Optimizer` classes. Defaults to `True`,\n unless `synchronization` is set to `ON_READ`, in which case it defaults\n to `False`.\n collections: List of graph collections keys. The new variable is added to\n these collections. Defaults to `[GraphKeys.GLOBAL_VARIABLES]`.\n validate_shape: If `False`, allows the variable to be initialized with a\n value of unknown shape. If `True`, the default, the shape of\n `initial_value` must be known.\n caching_device: Optional device string or function describing where the\n Variable should be cached for reading. Defaults to the Variable's\n device. If not `None`, caches on another device. Typical use is to\n cache on the device where the Ops using the Variable reside, to\n deduplicate copying through `Switch` and other conditional statements.\n name: Optional name for the variable. Defaults to `'Variable'` and gets\n uniquified automatically.\n dtype: If set, initial_value will be converted to the given type. If None,\n either the datatype will be kept (if initial_value is a Tensor) or\n float32 will be used (if it is a Python object convertible to a Tensor).\n expected_shape: Deprecated. Ignored.\n constraint: An optional projection function to be applied to the variable\n after being updated by an `Optimizer` (e.g. used to implement norm\n constraints or value constraints for layer weights). The function must\n take as input the unprojected Tensor representing the value of the\n variable and return the Tensor for the projected value (which must have\n the same shape). Constraints are not safe to use when doing asynchronous\n distributed training.\n synchronization: Indicates when a distributed a variable will be\n aggregated. Accepted values are constants defined in the class\n `tf.VariableSynchronization`. By default the synchronization is set to\n `AUTO` and the current `DistributionStrategy` chooses when to\n synchronize.\n aggregation: Indicates how a distributed variable will be aggregated.\n Accepted values are constants defined in the class\n `tf.VariableAggregation`.\n shape: (optional) The shape of this variable. If None, the shape of\n `initial_value` will be used. When setting this argument to\n `tf.TensorShape(None)` (representing an unspecified shape), the variable\n can be assigned with values of different shapes.\n\n Raises:\n ValueError: If the initial value is not specified, or does not have a\n shape and `validate_shape` is `True`.\n RuntimeError: If lifted into the eager context.\n \"\"\"\n _ = expected_shape\n if initial_value is None:\n raise ValueError(\"initial_value must be specified.\")\n init_from_fn = callable(initial_value)\n\n if collections is None:\n collections = [ops.GraphKeys.GLOBAL_VARIABLES]\n if not isinstance(collections, (list, tuple, set)):\n raise ValueError(\n \"collections argument to Variable constructor must be a list, tuple, \"\n \"or set. Got %s of type %s\" % (collections, type(collections)))\n if constraint is not None and not callable(constraint):\n raise ValueError(\"The `constraint` argument must be a callable.\")\n\n # Store the graph key so optimizers know how to only retrieve variables from\n # this graph.\n self._graph_key = ops.get_default_graph()._graph_key # pylint: disable=protected-access\n if isinstance(initial_value, trackable.CheckpointInitialValue):\n self._maybe_initialize_trackable()\n self._update_uid = initial_value.checkpoint_position.restore_uid\n initial_value = initial_value.wrapped_value\n\n synchronization, aggregation, trainable = (\n validate_synchronization_aggregation_trainable(synchronization,\n aggregation, trainable,\n name))\n self._synchronization = synchronization\n self._aggregation = aggregation\n self._trainable = trainable\n if trainable and ops.GraphKeys.TRAINABLE_VARIABLES not in collections:\n collections = list(collections) + [ops.GraphKeys.TRAINABLE_VARIABLES]\n with ops.init_scope():\n # Ensure that we weren't lifted into the eager context.\n if context.executing_eagerly():\n raise RuntimeError(\n \"RefVariable not supported when eager execution is enabled. \")\n with ops.name_scope(name, \"Variable\",\n [] if init_from_fn else [initial_value]) as name:\n\n if init_from_fn:\n # Use attr_scope and device(None) to simulate the behavior of\n # colocate_with when the variable we want to colocate with doesn't\n # yet exist.\n true_name = ops.name_from_scope_name(name) # pylint: disable=protected-access\n attr = attr_value_pb2.AttrValue(\n list=attr_value_pb2.AttrValue.ListValue(\n s=[compat.as_bytes(\"loc:@%s\" % true_name)]))\n # pylint: disable=protected-access\n with ops.get_default_graph()._attr_scope({\"_class\": attr}):\n with ops.name_scope(\"Initializer\"), ops.device(None):\n self._initial_value = ops.convert_to_tensor(\n initial_value(), name=\"initial_value\", dtype=dtype)\n if shape is None:\n shape = (\n self._initial_value.get_shape()\n if validate_shape else tensor_shape.unknown_shape())\n self._variable = state_ops.variable_op_v2(\n shape, self._initial_value.dtype.base_dtype, name=name)\n # pylint: enable=protected-access\n\n # Or get the initial value from a Tensor or Python object.\n else:\n self._initial_value = ops.convert_to_tensor(\n initial_value, name=\"initial_value\", dtype=dtype)\n # pylint: disable=protected-access\n if self._initial_value.op._get_control_flow_context() is not None:\n raise ValueError(\n \"Initializer for variable %s is from inside a control-flow \"\n \"construct, such as a loop or conditional. When creating a \"\n \"variable inside a loop or conditional, use a lambda as the \"\n \"initializer.\" % name)\n if shape is None:\n # pylint: enable=protected-access\n shape = (\n self._initial_value.get_shape()\n if validate_shape else tensor_shape.unknown_shape())\n # In this case, the variable op can't be created until after the\n # initial_value has been converted to a Tensor with a known type.\n self._variable = state_ops.variable_op_v2(\n shape, self._initial_value.dtype.base_dtype, name=name)\n\n # Cache the name in `self`, because some APIs call `Variable.name` in a\n # tight loop, and this halves the cost.\n self._name = self._variable.name\n\n # Manually overrides the variable's shape with the initial value's.\n if validate_shape:\n initial_value_shape = self._initial_value.get_shape()\n if not initial_value_shape.is_fully_defined():\n raise ValueError(\"initial_value must have a shape specified: %s\" %\n self._initial_value)\n\n # If 'initial_value' makes use of other variables, make sure we don't\n # have an issue if these other variables aren't initialized first by\n # using their initialized_value() method.\n self._initializer_op = state_ops.assign(\n self._variable,\n _try_guard_against_uninitialized_dependencies(\n name, self._initial_value),\n validate_shape=validate_shape).op\n\n # TODO(vrv): Change this class to not take caching_device, but\n # to take the op to colocate the snapshot with, so we can use\n # colocation rather than devices.\n if caching_device is not None:\n with ops.device(caching_device):\n self._snapshot = array_ops.identity(self._variable, name=\"read\")\n else:\n with ops.colocate_with(self._variable.op):\n self._snapshot = array_ops.identity(self._variable, name=\"read\")\n ops.add_to_collections(collections, self)\n\n self._caching_device = caching_device\n self._save_slice_info = None\n self._constraint = constraint\n\n def _init_from_proto(self, variable_def, import_scope=None):\n \"\"\"Recreates the Variable object from a `VariableDef` protocol buffer.\n\n Args:\n variable_def: `VariableDef` protocol buffer, describing a variable whose\n nodes already exists in the graph.\n import_scope: Optional `string`. Name scope to add.\n \"\"\"\n assert isinstance(variable_def, variable_pb2.VariableDef)\n # Create from variable_def.\n g = ops.get_default_graph()\n self._variable = g.as_graph_element(\n ops.prepend_name_scope(\n variable_def.variable_name, import_scope=import_scope))\n self._name = self._variable.name\n self._initializer_op = g.as_graph_element(\n ops.prepend_name_scope(\n variable_def.initializer_name, import_scope=import_scope))\n # Tests whether initial_value_name exists first for backwards compatibility.\n if (hasattr(variable_def, \"initial_value_name\") and\n variable_def.initial_value_name):\n self._initial_value = g.as_graph_element(\n ops.prepend_name_scope(\n variable_def.initial_value_name, import_scope=import_scope))\n else:\n self._initial_value = None\n synchronization, aggregation, trainable = (\n validate_synchronization_aggregation_trainable(\n variable_def.synchronization, variable_def.aggregation,\n variable_def.trainable, variable_def.variable_name))\n self._synchronization = synchronization\n self._aggregation = aggregation\n self._trainable = trainable\n self._snapshot = g.as_graph_element(\n ops.prepend_name_scope(\n variable_def.snapshot_name, import_scope=import_scope))\n if variable_def.HasField(\"save_slice_info_def\"):\n self._save_slice_info = Variable.SaveSliceInfo(\n save_slice_info_def=variable_def.save_slice_info_def,\n import_scope=import_scope)\n else:\n self._save_slice_info = None\n self._caching_device = None\n self._constraint = None\n\n def _as_graph_element(self):\n \"\"\"Conversion function for Graph.as_graph_element().\"\"\"\n return self._variable\n\n def value(self):\n \"\"\"Returns the last snapshot of this variable.\n\n You usually do not need to call this method as all ops that need the value\n of the variable call it automatically through a `convert_to_tensor()` call.\n\n Returns a `Tensor` which holds the value of the variable. You can not\n assign a new value to this tensor as it is not a reference to the variable.\n\n To avoid copies, if the consumer of the returned value is on the same device\n as the variable, this actually returns the live value of the variable, not\n a copy. Updates to the variable are seen by the consumer. If the consumer\n is on a different device it will get a copy of the variable.\n\n Returns:\n A `Tensor` containing the value of the variable.\n \"\"\"\n return self._snapshot\n\n def read_value(self):\n \"\"\"Returns the value of this variable, read in the current context.\n\n Can be different from value() if it's on another device, with control\n dependencies, etc.\n\n Returns:\n A `Tensor` containing the value of the variable.\n \"\"\"\n return array_ops.identity(self._variable, name=\"read\")\n\n def _ref(self):\n \"\"\"Returns a reference to this variable.\n\n You usually do not need to call this method as all ops that need a reference\n to the variable call it automatically.\n\n Returns is a `Tensor` which holds a reference to the variable. You can\n assign a new value to the variable by passing the tensor to an assign op.\n See `tf.Variable.value` if you want to get the value of the\n variable.\n\n Returns:\n A `Tensor` that is a reference to the variable.\n \"\"\"\n return self._variable\n\n def set_shape(self, shape):\n \"\"\"Overrides the shape for this variable.\n\n Args:\n shape: the `TensorShape` representing the overridden shape.\n \"\"\"\n self._ref().set_shape(shape)\n self.value().set_shape(shape)\n\n @property\n def trainable(self):\n return self._trainable\n\n @property\n def synchronization(self):\n return self._synchronization\n\n @property\n def aggregation(self):\n return self._aggregation\n\n def eval(self, session=None):\n \"\"\"In a session, computes and returns the value of this variable.\n\n This is not a graph construction method, it does not add ops to the graph.\n\n This convenience method requires a session where the graph\n containing this variable has been launched. If no session is\n passed, the default session is used. See `tf.compat.v1.Session` for more\n information on launching a graph and on sessions.\n\n ```python\n v = tf.Variable([1, 2])\n init = tf.compat.v1.global_variables_initializer()\n\n with tf.compat.v1.Session() as sess:\n sess.run(init)\n # Usage passing the session explicitly.\n print(v.eval(sess))\n # Usage with the default session. The 'with' block\n # above makes 'sess' the default session.\n print(v.eval())\n ```\n\n Args:\n session: The session to use to evaluate this variable. If none, the\n default session is used.\n\n Returns:\n A numpy `ndarray` with a copy of the value of this variable.\n \"\"\"\n return self._variable.eval(session=session)\n\n @property\n def initial_value(self):\n \"\"\"Returns the Tensor used as the initial value for the variable.\n\n Note that this is different from `initialized_value()` which runs\n the op that initializes the variable before returning its value.\n This method returns the tensor that is used by the op that initializes\n the variable.\n\n Returns:\n A `Tensor`.\n \"\"\"\n return self._initial_value\n\n @property\n def constraint(self):\n \"\"\"Returns the constraint function associated with this variable.\n\n Returns:\n The constraint function that was passed to the variable constructor.\n Can be `None` if no constraint was passed.\n \"\"\"\n return self._constraint\n\n def assign(self, value, use_locking=False, name=None, read_value=True):\n \"\"\"Assigns a new value to the variable.\n\n This is essentially a shortcut for `assign(self, value)`.\n\n Args:\n value: A `Tensor`. The new value for this variable.\n use_locking: If `True`, use locking during the assignment.\n name: The name of the operation to be created\n read_value: if True, will return something which evaluates to the new\n value of the variable; if False will return the assign op.\n\n Returns:\n A `Tensor` that will hold the new value of this variable after\n the assignment has completed.\n \"\"\"\n assign = state_ops.assign(\n self._variable, value, use_locking=use_locking, name=name)\n if read_value:\n return assign\n return assign.op\n\n def assign_add(self, delta, use_locking=False, name=None, read_value=True):\n \"\"\"Adds a value to this variable.\n\n This is essentially a shortcut for `assign_add(self, delta)`.\n\n Args:\n delta: A `Tensor`. The value to add to this variable.\n use_locking: If `True`, use locking during the operation.\n name: The name of the operation to be created\n read_value: if True, will return something which evaluates to the new\n value of the variable; if False will return the assign op.\n\n Returns:\n A `Tensor` that will hold the new value of this variable after\n the addition has completed.\n \"\"\"\n assign = state_ops.assign_add(\n self._variable, delta, use_locking=use_locking, name=name)\n if read_value:\n return assign\n return assign.op\n\n def assign_sub(self, delta, use_locking=False, name=None, read_value=True):\n \"\"\"Subtracts a value from this variable.\n\n This is essentially a shortcut for `assign_sub(self, delta)`.\n\n Args:\n delta: A `Tensor`. The value to subtract from this variable.\n use_locking: If `True`, use locking during the operation.\n name: The name of the operation to be created\n read_value: if True, will return something which evaluates to the new\n value of the variable; if False will return the assign op.\n\n Returns:\n A `Tensor` that will hold the new value of this variable after\n the subtraction has completed.\n \"\"\"\n assign = state_ops.assign_sub(\n self._variable, delta, use_locking=use_locking, name=name)\n if read_value:\n return assign\n return assign.op\n\n def scatter_sub(self, sparse_delta, use_locking=False, name=None):\n \"\"\"Subtracts `tf.IndexedSlices` from this variable.\n\n Args:\n sparse_delta: `tf.IndexedSlices` to be subtracted from this variable.\n use_locking: If `True`, use locking during the operation.\n name: the name of the operation.\n\n Returns:\n A `Tensor` that will hold the new value of this variable after\n the scattered subtraction has completed.\n\n Raises:\n TypeError: if `sparse_delta` is not an `IndexedSlices`.\n \"\"\"\n if not isinstance(sparse_delta, ops.IndexedSlices):\n raise TypeError(\"sparse_delta is not IndexedSlices: %s\" % sparse_delta)\n return gen_state_ops.scatter_sub(\n self._variable,\n sparse_delta.indices,\n sparse_delta.values,\n use_locking=use_locking,\n name=name)\n\n def scatter_add(self, sparse_delta, use_locking=False, name=None):\n \"\"\"Adds `tf.IndexedSlices` to this variable.\n\n Args:\n sparse_delta: `tf.IndexedSlices` to be added to this variable.\n use_locking: If `True`, use locking during the operation.\n name: the name of the operation.\n\n Returns:\n A `Tensor` that will hold the new value of this variable after\n the scattered addition has completed.\n\n Raises:\n TypeError: if `sparse_delta` is not an `IndexedSlices`.\n \"\"\"\n if not isinstance(sparse_delta, ops.IndexedSlices):\n raise TypeError(\"sparse_delta is not IndexedSlices: %s\" % sparse_delta)\n return gen_state_ops.scatter_add(\n self._variable,\n sparse_delta.indices,\n sparse_delta.values,\n use_locking=use_locking,\n name=name)\n\n def scatter_max(self, sparse_delta, use_locking=False, name=None):\n \"\"\"Updates this variable with the max of `tf.IndexedSlices` and itself.\n\n Args:\n sparse_delta: `tf.IndexedSlices` to use as an argument of max with this\n variable.\n use_locking: If `True`, use locking during the operation.\n name: the name of the operation.\n\n Returns:\n A `Tensor` that will hold the new value of this variable after\n the scattered maximization has completed.\n\n Raises:\n TypeError: if `sparse_delta` is not an `IndexedSlices`.\n \"\"\"\n if not isinstance(sparse_delta, ops.IndexedSlices):\n raise TypeError(\"sparse_delta is not IndexedSlices: %s\" % sparse_delta)\n return gen_state_ops.scatter_max(\n self._variable,\n sparse_delta.indices,\n sparse_delta.values,\n use_locking=use_locking,\n name=name)\n\n def scatter_min(self, sparse_delta, use_locking=False, name=None):\n \"\"\"Updates this variable with the min of `tf.IndexedSlices` and itself.\n\n Args:\n sparse_delta: `tf.IndexedSlices` to use as an argument of min with this\n variable.\n use_locking: If `True`, use locking during the operation.\n name: the name of the operation.\n\n Returns:\n A `Tensor` that will hold the new value of this variable after\n the scattered minimization has completed.\n\n Raises:\n TypeError: if `sparse_delta` is not an `IndexedSlices`.\n \"\"\"\n if not isinstance(sparse_delta, ops.IndexedSlices):\n raise TypeError(\"sparse_delta is not IndexedSlices: %s\" % sparse_delta)\n return gen_state_ops.scatter_min(\n self._variable,\n sparse_delta.indices,\n sparse_delta.values,\n use_locking=use_locking,\n name=name)\n\n def scatter_mul(self, sparse_delta, use_locking=False, name=None):\n \"\"\"Multiply this variable by `tf.IndexedSlices`.\n\n Args:\n sparse_delta: `tf.IndexedSlices` to multiply this variable by.\n use_locking: If `True`, use locking during the operation.\n name: the name of the operation.\n\n Returns:\n A `Tensor` that will hold the new value of this variable after\n the scattered multiplication has completed.\n\n Raises:\n TypeError: if `sparse_delta` is not an `IndexedSlices`.\n \"\"\"\n if not isinstance(sparse_delta, ops.IndexedSlices):\n raise TypeError(\"sparse_delta is not IndexedSlices: %s\" % sparse_delta)\n return gen_state_ops.scatter_mul(\n self._variable,\n sparse_delta.indices,\n sparse_delta.values,\n use_locking=use_locking,\n name=name)\n\n def scatter_div(self, sparse_delta, use_locking=False, name=None):\n \"\"\"Divide this variable by `tf.IndexedSlices`.\n\n Args:\n sparse_delta: `tf.IndexedSlices` to divide this variable by.\n use_locking: If `True`, use locking during the operation.\n name: the name of the operation.\n\n Returns:\n A `Tensor` that will hold the new value of this variable after\n the scattered division has completed.\n\n Raises:\n TypeError: if `sparse_delta` is not an `IndexedSlices`.\n \"\"\"\n if not isinstance(sparse_delta, ops.IndexedSlices):\n raise TypeError(\"sparse_delta is not IndexedSlices: %s\" % sparse_delta)\n return gen_state_ops.scatter_div(\n self._variable,\n sparse_delta.indices,\n sparse_delta.values,\n use_locking=use_locking,\n name=name)\n\n def scatter_update(self, sparse_delta, use_locking=False, name=None):\n \"\"\"Assigns `tf.IndexedSlices` to this variable.\n\n Args:\n sparse_delta: `tf.IndexedSlices` to be assigned to this variable.\n use_locking: If `True`, use locking during the operation.\n name: the name of the operation.\n\n Returns:\n A `Tensor` that will hold the new value of this variable after\n the scattered assignment has completed.\n\n Raises:\n TypeError: if `sparse_delta` is not an `IndexedSlices`.\n \"\"\"\n if not isinstance(sparse_delta, ops.IndexedSlices):\n raise TypeError(\"sparse_delta is not IndexedSlices: %s\" % sparse_delta)\n return gen_state_ops.scatter_update(\n self._variable,\n sparse_delta.indices,\n sparse_delta.values,\n use_locking=use_locking,\n name=name)\n\n def batch_scatter_update(self, sparse_delta, use_locking=False, name=None):\n \"\"\"Assigns `tf.IndexedSlices` to this variable batch-wise.\n\n Analogous to `batch_gather`. This assumes that this variable and the\n sparse_delta IndexedSlices have a series of leading dimensions that are the\n same for all of them, and the updates are performed on the last dimension of\n indices. In other words, the dimensions should be the following:\n\n `num_prefix_dims = sparse_delta.indices.ndims - 1`\n `batch_dim = num_prefix_dims + 1`\n `sparse_delta.updates.shape = sparse_delta.indices.shape + var.shape[\n batch_dim:]`\n\n where\n\n `sparse_delta.updates.shape[:num_prefix_dims]`\n `== sparse_delta.indices.shape[:num_prefix_dims]`\n `== var.shape[:num_prefix_dims]`\n\n And the operation performed can be expressed as:\n\n `var[i_1, ..., i_n,\n sparse_delta.indices[i_1, ..., i_n, j]] = sparse_delta.updates[\n i_1, ..., i_n, j]`\n\n When sparse_delta.indices is a 1D tensor, this operation is equivalent to\n `scatter_update`.\n\n To avoid this operation one can looping over the first `ndims` of the\n variable and using `scatter_update` on the subtensors that result of slicing\n the first dimension. This is a valid option for `ndims = 1`, but less\n efficient than this implementation.\n\n Args:\n sparse_delta: `tf.IndexedSlices` to be assigned to this variable.\n use_locking: If `True`, use locking during the operation.\n name: the name of the operation.\n\n Returns:\n A `Tensor` that will hold the new value of this variable after\n the scattered assignment has completed.\n\n Raises:\n TypeError: if `sparse_delta` is not an `IndexedSlices`.\n \"\"\"\n return state_ops.batch_scatter_update(\n self,\n sparse_delta.indices,\n sparse_delta.values,\n use_locking=use_locking,\n name=name)\n\n def scatter_nd_sub(self, indices, updates, name=None):\n \"\"\"Applies sparse subtraction to individual values or slices in a Variable.\n\n `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.\n\n `indices` must be integer tensor, containing indices into `ref`.\n It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.\n\n The innermost dimension of `indices` (with length `K`) corresponds to\n indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th\n dimension of `ref`.\n\n `updates` is `Tensor` of rank `Q-1+P-K` with shape:\n\n ```\n [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].\n ```\n\n For example, say we want to add 4 scattered elements to a rank-1 tensor to\n 8 elements. In Python, that update would look like this:\n\n ```python\n ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])\n indices = tf.constant([[4], [3], [1] ,[7]])\n updates = tf.constant([9, 10, 11, 12])\n op = ref.scatter_nd_sub(indices, updates)\n with tf.compat.v1.Session() as sess:\n print sess.run(op)\n ```\n\n The resulting update to ref would look like this:\n\n [1, -9, 3, -6, -6, 6, 7, -4]\n\n See `tf.scatter_nd` for more details about how to make updates to\n slices.\n\n Args:\n indices: The indices to be used in the operation.\n updates: The values to be used in the operation.\n name: the name of the operation.\n\n Returns:\n A `Tensor` that will hold the new value of this variable after\n the scattered subtraction has completed.\n \"\"\"\n return gen_state_ops.scatter_nd_sub(\n self._variable, indices, updates, use_locking=True, name=name)\n\n def scatter_nd_add(self, indices, updates, name=None):\n \"\"\"Applies sparse addition to individual values or slices in a Variable.\n\n `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.\n\n `indices` must be integer tensor, containing indices into `ref`.\n It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.\n\n The innermost dimension of `indices` (with length `K`) corresponds to\n indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th\n dimension of `ref`.\n\n `updates` is `Tensor` of rank `Q-1+P-K` with shape:\n\n ```\n [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].\n ```\n\n For example, say we want to add 4 scattered elements to a rank-1 tensor to\n 8 elements. In Python, that update would look like this:\n\n ```python\n ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])\n indices = tf.constant([[4], [3], [1] ,[7]])\n updates = tf.constant([9, 10, 11, 12])\n add = ref.scatter_nd_add(indices, updates)\n with tf.compat.v1.Session() as sess:\n print sess.run(add)\n ```\n\n The resulting update to ref would look like this:\n\n [1, 13, 3, 14, 14, 6, 7, 20]\n\n See `tf.scatter_nd` for more details about how to make updates to\n slices.\n\n Args:\n indices: The indices to be used in the operation.\n updates: The values to be used in the operation.\n name: the name of the operation.\n\n Returns:\n A `Tensor` that will hold the new value of this variable after\n the scattered addition has completed.\n \"\"\"\n return gen_state_ops.scatter_nd_add(\n self._variable, indices, updates, use_locking=True, name=name)\n\n def scatter_nd_update(self, indices, updates, name=None):\n \"\"\"Applies sparse assignment to individual values or slices in a Variable.\n\n `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.\n\n `indices` must be integer tensor, containing indices into `ref`.\n It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.\n\n The innermost dimension of `indices` (with length `K`) corresponds to\n indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th\n dimension of `ref`.\n\n `updates` is `Tensor` of rank `Q-1+P-K` with shape:\n\n ```\n [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].\n ```\n\n For example, say we want to add 4 scattered elements to a rank-1 tensor to\n 8 elements. In Python, that update would look like this:\n\n ```python\n ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])\n indices = tf.constant([[4], [3], [1] ,[7]])\n updates = tf.constant([9, 10, 11, 12])\n op = ref.scatter_nd_update(indices, updates)\n with tf.compat.v1.Session() as sess:\n print sess.run(op)\n ```\n\n The resulting update to ref would look like this:\n\n [1, 11, 3, 10, 9, 6, 7, 12]\n\n See `tf.scatter_nd` for more details about how to make updates to\n slices.\n\n Args:\n indices: The indices to be used in the operation.\n updates: The values to be used in the operation.\n name: the name of the operation.\n\n Returns:\n A `Tensor` that will hold the new value of this variable after\n the scattered assignment has completed.\n \"\"\"\n return gen_state_ops.scatter_nd_update(\n self._variable, indices, updates, use_locking=True, name=name)\n\n def _strided_slice_assign(self, begin, end, strides, value, name, begin_mask,\n end_mask, ellipsis_mask, new_axis_mask,\n shrink_axis_mask):\n return gen_array_ops.strided_slice_assign(\n ref=self._ref(),\n begin=begin,\n end=end,\n strides=strides,\n value=value,\n name=name,\n begin_mask=begin_mask,\n end_mask=end_mask,\n ellipsis_mask=ellipsis_mask,\n new_axis_mask=new_axis_mask,\n shrink_axis_mask=shrink_axis_mask)\n\n @deprecated(None, \"Prefer Dataset.range instead.\")\n def count_up_to(self, limit):\n \"\"\"Increments this variable until it reaches `limit`.\n\n When that Op is run it tries to increment the variable by `1`. If\n incrementing the variable would bring it above `limit` then the Op raises\n the exception `OutOfRangeError`.\n\n If no error is raised, the Op outputs the value of the variable before\n the increment.\n\n This is essentially a shortcut for `count_up_to(self, limit)`.\n\n Args:\n limit: value at which incrementing the variable raises an error.\n\n Returns:\n A `Tensor` that will hold the variable value before the increment. If no\n other Op modifies this variable, the values produced will all be\n distinct.\n \"\"\"\n return state_ops.count_up_to(self._variable, limit=limit)\n\n # Conversion to tensor.\n @staticmethod\n def _TensorConversionFunction(v, dtype=None, name=None, as_ref=False): # pylint: disable=invalid-name\n \"\"\"Utility function for converting a Variable to a Tensor.\"\"\"\n _ = name\n if dtype and not dtype.is_compatible_with(v.dtype):\n raise ValueError(\n \"Incompatible type conversion requested to type '%s' for variable \"\n \"of type '%s'\" % (dtype.name, v.dtype.name))\n if as_ref:\n return v._ref() # pylint: disable=protected-access\n else:\n return v.value()\n\n # NOTE(mrry): This enables the Variable's overloaded \"right\" binary\n # operators to run when the left operand is an ndarray, because it\n # accords the Variable class higher priority than an ndarray, or a\n # numpy matrix.\n # TODO(mrry): Convert this to using numpy's __numpy_ufunc__\n # mechanism, which allows more control over how Variables interact\n # with ndarrays.\n __array_priority__ = 100\n\n @property\n def name(self):\n \"\"\"The name of this variable.\"\"\"\n return self._name\n\n @property\n def initializer(self):\n \"\"\"The initializer operation for this variable.\"\"\"\n return self._initializer_op\n\n @property\n def device(self):\n \"\"\"The device of this variable.\"\"\"\n return self._variable.device\n\n @property\n def dtype(self):\n \"\"\"The `DType` of this variable.\"\"\"\n return self._variable.dtype\n\n @property\n def op(self):\n \"\"\"The `Operation` of this variable.\"\"\"\n return self._variable.op\n\n @property\n def graph(self):\n \"\"\"The `Graph` of this variable.\"\"\"\n return self._variable.graph\n\n @property\n def _distribute_strategy(self):\n \"\"\"The `tf.distribute.Strategy` that this variable was created under.\"\"\"\n return None # Ref variables are never created inside a strategy.\n\n @property\n def shape(self):\n \"\"\"The `TensorShape` of this variable.\n\n Returns:\n A `TensorShape`.\n \"\"\"\n return self._variable.get_shape()\n\n def to_proto(self, export_scope=None):\n \"\"\"Converts a `Variable` to a `VariableDef` protocol buffer.\n\n Args:\n export_scope: Optional `string`. Name scope to remove.\n\n Returns:\n A `VariableDef` protocol buffer, or `None` if the `Variable` is not\n in the specified name scope.\n \"\"\"\n if (export_scope is None or self._variable.name.startswith(export_scope)):\n var_def = variable_pb2.VariableDef()\n var_def.variable_name = ops.strip_name_scope(self._variable.name,\n export_scope)\n if self._initial_value is not None:\n # For backwards compatibility.\n var_def.initial_value_name = ops.strip_name_scope(\n self._initial_value.name, export_scope)\n var_def.trainable = self.trainable\n var_def.synchronization = self.synchronization.value\n var_def.aggregation = self.aggregation.value\n var_def.initializer_name = ops.strip_name_scope(self.initializer.name,\n export_scope)\n var_def.snapshot_name = ops.strip_name_scope(self._snapshot.name,\n export_scope)\n if self._save_slice_info:\n var_def.save_slice_info_def.MergeFrom(\n self._save_slice_info.to_proto(export_scope=export_scope))\n return var_def\n else:\n return None\n\n def __iadd__(self, other):\n logging.log_first_n(\n logging.WARN, \"Variable += will be deprecated. Use variable.assign_add\"\n \" if you want assignment to the variable value or 'x = x + y'\"\n \" if you want a new python Tensor object.\", 1)\n return self + other\n\n def __isub__(self, other):\n logging.log_first_n(\n logging.WARN, \"Variable -= will be deprecated. Use variable.assign_sub\"\n \" if you want assignment to the variable value or 'x = x - y'\"\n \" if you want a new python Tensor object.\", 1)\n return self - other\n\n def __imul__(self, other):\n logging.log_first_n(\n logging.WARN,\n \"Variable *= will be deprecated. Use `var.assign(var * other)`\"\n \" if you want assignment to the variable value or `x = x * y`\"\n \" if you want a new python Tensor object.\", 1)\n return self * other\n\n def __idiv__(self, other):\n logging.log_first_n(\n logging.WARN,\n \"Variable /= will be deprecated. Use `var.assign(var / other)`\"\n \" if you want assignment to the variable value or `x = x / y`\"\n \" if you want a new python Tensor object.\", 1)\n return self / other\n\n def __itruediv__(self, other):\n logging.log_first_n(\n logging.WARN,\n \"Variable /= will be deprecated. Use `var.assign(var / other)`\"\n \" if you want assignment to the variable value or `x = x / y`\"\n \" if you want a new python Tensor object.\", 1)\n return self / other\n\n def __irealdiv__(self, other):\n logging.log_first_n(\n logging.WARN,\n \"Variable /= will be deprecated. Use `var.assign(var / other)`\"\n \" if you want assignment to the variable value or `x = x / y`\"\n \" if you want a new python Tensor object.\", 1)\n return self / other\n\n def __ipow__(self, other):\n logging.log_first_n(\n logging.WARN,\n \"Variable **= will be deprecated. Use `var.assign(var ** other)`\"\n \" if you want assignment to the variable value or `x = x ** y`\"\n \" if you want a new python Tensor object.\", 1)\n return self**other\n\n\ndef _try_guard_against_uninitialized_dependencies(name, initial_value):\n \"\"\"Attempt to guard against dependencies on uninitialized variables.\n\n Replace references to variables in `initial_value` with references to the\n variable's initialized values. The initialized values are essentially\n conditional TensorFlow graphs that return a variable's value if it is\n initialized or its `initial_value` if it hasn't been initialized. This\n replacement is done on a best effort basis:\n\n - If the `initial_value` graph contains cycles, we don't do any\n replacements for that graph.\n - If the variables that `initial_value` depends on are not present in the\n `GLOBAL_VARIABLES` or `LOCAL_VARIABLES` we don't replace them.\n\n In these cases, it is up to the caller to ensure that the `initial_value`\n graph uses initialized variables or that they guard access to variables\n using their `initialized_value` method.\n\n Args:\n name: Variable name.\n initial_value: `Tensor`. The initial value.\n\n Returns:\n A `Tensor` suitable to initialize a variable.\n Raises:\n TypeError: If `initial_value` is not a `Tensor`.\n \"\"\"\n if not isinstance(initial_value, ops.Tensor):\n raise TypeError(\"initial_value needs to be a Tensor: %s\" % initial_value)\n\n # Don't modify initial_value if it contains any cyclic dependencies.\n if _has_cycle(initial_value.op, state={}):\n return initial_value\n return _safe_initial_value_from_tensor(name, initial_value, op_cache={})\n\n\n_UNKNOWN, _STARTED, _FINISHED = range(3)\n\n\ndef _has_cycle(op, state):\n \"\"\"Detect cycles in the dependencies of `initial_value`.\"\"\"\n op_state = state.get(op.name, _UNKNOWN)\n if op_state == _STARTED:\n return True\n elif op_state == _FINISHED:\n return False\n\n state[op.name] = _STARTED\n for i in itertools.chain((i.op for i in op.inputs), op.control_inputs):\n if _has_cycle(i, state):\n return True\n state[op.name] = _FINISHED\n return False\n\n\ndef _safe_initial_value_from_tensor(name, tensor, op_cache):\n \"\"\"Replace dependencies on variables with their initialized values.\n\n Args:\n name: Variable name.\n tensor: A `Tensor`. The tensor to replace.\n op_cache: A dict mapping operation names to `Operation`s. Used to memoize\n the results so as to avoid creating redundant operations.\n\n Returns:\n A `Tensor` compatible with `tensor`. Any inputs that lead to variable\n values will be replaced with a corresponding graph that uses the\n variable's initialized values. This is done on a best-effort basis. If no\n modifications need to be made then `tensor` will be returned unchanged.\n \"\"\"\n op = tensor.op\n new_op = op_cache.get(op.name)\n if new_op is None:\n new_op = _safe_initial_value_from_op(name, op, op_cache)\n op_cache[op.name] = new_op\n return new_op.outputs[tensor.value_index]\n\n\ndef _safe_initial_value_from_op(name, op, op_cache):\n \"\"\"Replace dependencies on variables with their initialized values.\n\n Args:\n name: Variable name.\n op: An `Operation`. The operation to replace.\n op_cache: A dict mapping operation names to `Operation`s. Used to memoize\n the results so as to avoid creating redundant operations.\n\n Returns:\n An `Operation` compatible with `op`. Any inputs that lead to variable\n values will be replaced with a corresponding graph that uses the\n variable's initialized values. This is done on a best-effort basis. If no\n modifications need to be made then `op` will be returned unchanged.\n \"\"\"\n op_type = op.node_def.op\n if op_type in (\"IsVariableInitialized\", \"VarIsInitializedOp\",\n \"ReadVariableOp\", \"If\"):\n return op\n\n # Attempt to find the initialized_value of any variable reference / handles.\n # TODO(b/70206927): Fix handling of ResourceVariables.\n if op_type in (\"Variable\", \"VariableV2\", \"VarHandleOp\"):\n initialized_value = _find_initialized_value_for_variable(op)\n return op if initialized_value is None else initialized_value.op\n\n # Recursively build initializer expressions for inputs.\n modified = False\n new_op_inputs = []\n for op_input in op.inputs:\n new_op_input = _safe_initial_value_from_tensor(name, op_input, op_cache)\n new_op_inputs.append(new_op_input)\n modified = modified or (new_op_input != op_input)\n\n # If at least one input was modified, replace the op.\n if modified:\n new_op_type = op_type\n if new_op_type == \"RefSwitch\":\n new_op_type = \"Switch\"\n new_op_name = op.node_def.name + \"_\" + name\n new_op_name = new_op_name.replace(\":\", \"_\")\n return op.graph.create_op(\n new_op_type,\n new_op_inputs,\n op._output_types, # pylint: disable=protected-access\n name=new_op_name,\n attrs=op.node_def.attr)\n\n return op\n\n\ndef _find_initialized_value_for_variable(variable_op):\n \"\"\"Find the initialized value for a variable op.\n\n To do so, lookup the variable op in the variables collection.\n\n Args:\n variable_op: A variable `Operation`.\n\n Returns:\n A `Tensor` representing the initialized value for the variable or `None`\n if the initialized value could not be found.\n \"\"\"\n try:\n var_names = [variable_op.node_def.name, variable_op.node_def.name + \":0\"]\n for collection_name in (ops.GraphKeys.GLOBAL_VARIABLES,\n ops.GraphKeys.LOCAL_VARIABLES):\n for var in variable_op.graph.get_collection(collection_name):\n if var.name in var_names:\n return var.initialized_value()\n except AttributeError:\n # Return None when an incomplete user-defined variable type was put in\n # the collection.\n return None\n return None\n\n\nclass PartitionedVariable(object):\n \"\"\"A container for partitioned `Variable` objects.\n\n @compatibility(eager) `tf.PartitionedVariable` is not compatible with\n eager execution. Use `tf.Variable` instead which is compatible\n with both eager execution and graph construction. See [the\n TensorFlow Eager Execution\n guide](https://www.tensorflow.org/guide/eager#variables_and_optimizers)\n for details on how variables work in eager execution.\n @end_compatibility\n \"\"\"\n\n def __init__(self, name, shape, dtype, variable_list, partitions):\n \"\"\"Creates a new partitioned variable wrapper.\n\n Variables passed via the variable_list must contain a save_slice_info\n field. Concatenation and iteration is in lexicographic order according\n to the var_offset property of the save_slice_info.\n\n Args:\n name: String. Overall name of the variables.\n shape: List of integers. Overall shape of the variables.\n dtype: Type of the variables.\n variable_list: List of `Variable` that comprise this partitioned variable.\n partitions: List of integers. Number of partitions for each dimension.\n\n Raises:\n TypeError: If `variable_list` is not a list of `Variable` objects, or\n `partitions` is not a list.\n ValueError: If `variable_list` is empty, or the `Variable` shape\n information does not match `shape`, or `partitions` has invalid values.\n \"\"\"\n if not isinstance(variable_list, (list, tuple)):\n raise TypeError(\"variable_list is not a list or tuple: %s\" %\n variable_list)\n if not isinstance(partitions, (list, tuple)):\n raise TypeError(\"partitions is not a list or tuple: %s\" % partitions)\n if not all(p >= 1 for p in partitions):\n raise ValueError(\"partition values must be positive: %s\" % partitions)\n if not variable_list:\n raise ValueError(\"variable_list may not be empty\")\n # pylint: disable=protected-access\n for v in variable_list:\n # Sort the variable_list lexicographically according to var offset value.\n if not all(v._get_save_slice_info() is not None for v in variable_list):\n raise ValueError(\n \"All variables must have a save_slice_info available: %s\" %\n [v.name for v in variable_list])\n if len(shape) != len(partitions):\n raise ValueError(\"len(shape) != len(partitions): %s vs. %s\" %\n (shape, partitions))\n if v._get_save_slice_info().full_shape != shape:\n raise ValueError(\"All variables' full shapes must match shape: %s; \"\n \"but full shapes were: %s\" %\n (shape, str([v._get_save_slice_info().full_shape])))\n self._variable_list = sorted(\n variable_list, key=lambda v: v._get_save_slice_info().var_offset)\n # pylint: enable=protected-access\n\n self._name = name\n self._shape = shape\n self._dtype = dtype\n self._partitions = partitions\n self._as_tensor = None\n\n def __iter__(self):\n \"\"\"Return an iterable for accessing the underlying partition Variables.\"\"\"\n return iter(self._variable_list)\n\n def __len__(self):\n num_partition_axes = len(self._partition_axes())\n if num_partition_axes > 1:\n raise ValueError(\"Cannot get a length for %d > 1 partition axes\" %\n num_partition_axes)\n return len(self._variable_list)\n\n def _partition_axes(self):\n if all(p == 1 for p in self._partitions):\n return [0]\n else:\n return [i for i, p in enumerate(self._partitions) if p > 1]\n\n def _concat(self):\n \"\"\"Returns the overall concatenated value as a `Tensor`.\n\n This is different from using the partitioned variable directly as a tensor\n (through tensor conversion and `as_tensor`) in that it creates a new set of\n operations that keeps the control dependencies from its scope.\n\n Returns:\n `Tensor` containing the concatenated value.\n \"\"\"\n if len(self._variable_list) == 1:\n with ops.name_scope(None):\n return array_ops.identity(self._variable_list[0], name=self._name)\n\n partition_axes = self._partition_axes()\n\n if len(partition_axes) > 1:\n raise NotImplementedError(\n \"Cannot concatenate along more than one dimension: %s. \"\n \"Multi-axis partition concat is not supported\" % str(partition_axes))\n partition_ix = partition_axes[0]\n\n with ops.name_scope(self._name + \"/ConcatPartitions/\"):\n concatenated = array_ops.concat(self._variable_list, partition_ix)\n\n with ops.name_scope(None):\n return array_ops.identity(concatenated, name=self._name)\n\n def as_tensor(self):\n \"\"\"Returns the overall concatenated value as a `Tensor`.\n\n The returned tensor will not inherit the control dependencies from the scope\n where the value is used, which is similar to getting the value of\n `Variable`.\n\n Returns:\n `Tensor` containing the concatenated value.\n \"\"\"\n with ops.control_dependencies(None):\n return self._concat()\n\n @staticmethod\n def _TensorConversionFunction(v, dtype=None, name=None, as_ref=False):\n # pylint: disable=invalid-name\n _ = name\n if dtype is not None and not dtype.is_compatible_with(v.dtype):\n raise ValueError(\n \"Incompatible type conversion requested to type '%s' for variable \"\n \"of type '%s'\" % (dtype.name, v.dtype.name))\n if as_ref:\n raise NotImplementedError(\n \"PartitionedVariable doesn't support being used as a reference.\")\n else:\n return v.as_tensor()\n\n @property\n def name(self):\n return self._name\n\n @property\n def dtype(self):\n return self._dtype\n\n @property\n def shape(self):\n return self.get_shape()\n\n @property\n def _distribute_strategy(self):\n \"\"\"The `tf.distribute.Strategy` that this variable was created under.\"\"\"\n # NOTE(yuefengz): Today, no partitioned variables in a distribute strategy.\n return None\n\n def get_shape(self):\n return self._shape\n\n def _get_variable_list(self):\n return self._variable_list\n\n def _get_partitions(self):\n return self._partitions\n\n def _apply_assign_fn(self, assign_fn, value):\n partition_axes = self._partition_axes()\n if len(partition_axes) > 1:\n raise NotImplementedError(\n \"Cannot do assign action along more than one dimension: %s. \"\n \"Multi-axis partition assign action is not supported \" %\n str(partition_axes))\n if isinstance(value, list):\n assert len(value) == len(self._variable_list)\n value_list = value\n elif isinstance(value, PartitionedVariable):\n value_list = [var_part for var_part in value]\n else:\n partition_ix = partition_axes[0]\n size_splits_list = [\n tensor_shape.dimension_value(var.shape[partition_ix])\n for var in self._variable_list\n ]\n value_list = array_ops.split(value, size_splits_list, axis=partition_ix)\n\n op_list = [\n assign_fn(var, value_list[idx])\n for idx, var in enumerate(self._variable_list)\n ]\n return op_list\n\n def assign(self, value, use_locking=False, name=None, read_value=True):\n assign_fn = lambda var, r_value: var.assign(\n r_value, use_locking=use_locking, name=name, read_value=read_value)\n assign_list = self._apply_assign_fn(assign_fn, value)\n if read_value:\n return assign_list\n return [assign.op for assign in assign_list]\n\n def assign_add(self, value, use_locking=False, name=None, read_value=True):\n assign_fn = lambda var, r_value: var.assign_add(\n r_value, use_locking=use_locking, name=name, read_value=read_value)\n assign_list = self._apply_assign_fn(assign_fn, value)\n if read_value:\n return assign_list\n return [assign.op for assign in assign_list]\n\n def assign_sub(self, value, use_locking=False, name=None, read_value=True):\n assign_fn = lambda var, r_value: var.assign_sub(\n r_value, use_locking=use_locking, name=name, read_value=read_value)\n assign_list = self._apply_assign_fn(assign_fn, value)\n if read_value:\n return assign_list\n return [assign.op for assign in assign_list]\n\n\n# Register a conversion function which reads the value of the variable,\n# allowing instances of the class to be used as tensors.\nops.register_tensor_conversion_function(RefVariable,\n RefVariable._TensorConversionFunction) # pylint: disable=protected-access\nops.register_dense_tensor_like_type(RefVariable)\n\n\n@tf_export(v1=[\"global_variables\"])\ndef global_variables(scope=None):\n \"\"\"Returns global variables.\n\n Global variables are variables that are shared across machines in a\n distributed environment. The `Variable()` constructor or `get_variable()`\n automatically adds new variables to the graph collection\n `GraphKeys.GLOBAL_VARIABLES`.\n This convenience function returns the contents of that collection.\n\n An alternative to global variables are local variables. See\n `tf.compat.v1.local_variables`\n\n Args:\n scope: (Optional.) A string. If supplied, the resulting list is filtered to\n include only items whose `name` attribute matches `scope` using\n `re.match`. Items without a `name` attribute are never returned if a scope\n is supplied. The choice of `re.match` means that a `scope` without special\n tokens filters by prefix.\n\n Returns:\n A list of `Variable` objects.\n \"\"\"\n return ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES, scope)\n\n\n@tf_export(v1=[\"all_variables\"])\n@deprecated(\"2017-03-02\", \"Please use tf.global_variables instead.\")\ndef all_variables():\n \"\"\"Use `tf.compat.v1.global_variables` instead.\"\"\"\n return global_variables()\n\n\ndef _all_saveable_objects(scope=None):\n \"\"\"Returns all variables and `SaveableObject`s that must be checkpointed.\n\n Args:\n scope: (Optional.) A string. If supplied, the resulting list is filtered to\n include only items whose `name` attribute matches `scope` using\n `re.match`. Items without a `name` attribute are never returned if a scope\n is supplied. The choice of `re.match` means that a `scope` without special\n tokens filters by prefix.\n\n Returns:\n A list of `Variable` and `SaveableObject` to be checkpointed\n \"\"\"\n # TODO(andreasst): make this function public once things are settled.\n return (ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES, scope) +\n ops.get_collection(ops.GraphKeys.SAVEABLE_OBJECTS, scope))\n\n\n@tf_export(v1=[\"local_variables\"])\ndef local_variables(scope=None):\n \"\"\"Returns local variables.\n\n Local variables - per process variables, usually not saved/restored to\n checkpoint and used for temporary or intermediate values.\n For example, they can be used as counters for metrics computation or\n number of epochs this machine has read data.\n The `tf.contrib.framework.local_variable()` function automatically adds the\n new variable to `GraphKeys.LOCAL_VARIABLES`.\n This convenience function returns the contents of that collection.\n\n An alternative to local variables are global variables. See\n `tf.compat.v1.global_variables`\n\n Args:\n scope: (Optional.) A string. If supplied, the resulting list is filtered to\n include only items whose `name` attribute matches `scope` using\n `re.match`. Items without a `name` attribute are never returned if a scope\n is supplied. The choice of `re.match` means that a `scope` without special\n tokens filters by prefix.\n\n Returns:\n A list of local `Variable` objects.\n \"\"\"\n return ops.get_collection(ops.GraphKeys.LOCAL_VARIABLES, scope)\n\n\n@tf_export(v1=[\"model_variables\"])\ndef model_variables(scope=None):\n \"\"\"Returns all variables in the MODEL_VARIABLES collection.\n\n Args:\n scope: (Optional.) A string. If supplied, the resulting list is filtered to\n include only items whose `name` attribute matches `scope` using\n `re.match`. Items without a `name` attribute are never returned if a scope\n is supplied. The choice of `re.match` means that a `scope` without special\n tokens filters by prefix.\n\n Returns:\n A list of local Variable objects.\n \"\"\"\n return ops.get_collection(ops.GraphKeys.MODEL_VARIABLES, scope)\n\n\n@tf_export(v1=[\"trainable_variables\"])\ndef trainable_variables(scope=None):\n \"\"\"Returns all variables created with `trainable=True`.\n\n When passed `trainable=True`, the `Variable()` constructor automatically\n adds new variables to the graph collection\n `GraphKeys.TRAINABLE_VARIABLES`. This convenience function returns the\n contents of that collection.\n\n Args:\n scope: (Optional.) A string. If supplied, the resulting list is filtered to\n include only items whose `name` attribute matches `scope` using\n `re.match`. Items without a `name` attribute are never returned if a scope\n is supplied. The choice of `re.match` means that a `scope` without special\n tokens filters by prefix.\n\n Returns:\n A list of Variable objects.\n \"\"\"\n return ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES, scope)\n\n\n@tf_export(v1=[\"moving_average_variables\"])\ndef moving_average_variables(scope=None):\n \"\"\"Returns all variables that maintain their moving averages.\n\n If an `ExponentialMovingAverage` object is created and the `apply()`\n method is called on a list of variables, these variables will\n be added to the `GraphKeys.MOVING_AVERAGE_VARIABLES` collection.\n This convenience function returns the contents of that collection.\n\n Args:\n scope: (Optional.) A string. If supplied, the resulting list is filtered to\n include only items whose `name` attribute matches `scope` using\n `re.match`. Items without a `name` attribute are never returned if a scope\n is supplied. The choice of `re.match` means that a `scope` without special\n tokens filters by prefix.\n\n Returns:\n A list of Variable objects.\n \"\"\"\n return ops.get_collection(ops.GraphKeys.MOVING_AVERAGE_VARIABLES, scope)\n\n\n@tf_export(v1=[\"initializers.variables\", \"variables_initializer\"])\ndef variables_initializer(var_list, name=\"init\"):\n \"\"\"Returns an Op that initializes a list of variables.\n\n After you launch the graph in a session, you can run the returned Op to\n initialize all the variables in `var_list`. This Op runs all the\n initializers of the variables in `var_list` in parallel.\n\n Calling `initialize_variables()` is equivalent to passing the list of\n initializers to `Group()`.\n\n If `var_list` is empty, however, the function still returns an Op that can\n be run. That Op just has no effect.\n\n Args:\n var_list: List of `Variable` objects to initialize.\n name: Optional name for the returned operation.\n\n Returns:\n An Op that run the initializers of all the specified variables.\n \"\"\"\n if var_list and not context.executing_eagerly():\n return control_flow_ops.group(*[v.initializer for v in var_list], name=name)\n return control_flow_ops.no_op(name=name)\n\n\n@tf_export(v1=[\"initialize_variables\"])\n@tf_should_use.should_use_result\n@deprecated(\"2017-03-02\", \"Use `tf.variables_initializer` instead.\")\ndef initialize_variables(var_list, name=\"init\"):\n \"\"\"See `tf.compat.v1.variables_initializer`.\"\"\"\n return variables_initializer(var_list, name=name)\n\n\n@tf_export(v1=[\"initializers.global_variables\", \"global_variables_initializer\"])\ndef global_variables_initializer():\n \"\"\"Returns an Op that initializes global variables.\n\n This is just a shortcut for `variables_initializer(global_variables())`\n\n Returns:\n An Op that initializes global variables in the graph.\n \"\"\"\n if context.executing_eagerly():\n return control_flow_ops.no_op(name=\"global_variables_initializer\")\n return variables_initializer(global_variables())\n\n\n@tf_export(v1=[\"initialize_all_variables\"])\n@tf_should_use.should_use_result\n@deprecated(\"2017-03-02\", \"Use `tf.global_variables_initializer` instead.\")\ndef initialize_all_variables():\n \"\"\"See `tf.compat.v1.global_variables_initializer`.\"\"\"\n return global_variables_initializer()\n\n\n@tf_export(v1=[\"initializers.local_variables\", \"local_variables_initializer\"])\ndef local_variables_initializer():\n \"\"\"Returns an Op that initializes all local variables.\n\n This is just a shortcut for `variables_initializer(local_variables())`\n\n Returns:\n An Op that initializes all local variables in the graph.\n \"\"\"\n if context.executing_eagerly():\n return control_flow_ops.no_op(name=\"local_variables_initializer\")\n return variables_initializer(local_variables())\n\n\n@tf_export(v1=[\"initialize_local_variables\"])\n@tf_should_use.should_use_result\n@deprecated(\"2017-03-02\", \"Use `tf.local_variables_initializer` instead.\")\ndef initialize_local_variables():\n \"\"\"See `tf.compat.v1.local_variables_initializer`.\"\"\"\n return local_variables_initializer()\n\n\n@tf_export(v1=[\"is_variable_initialized\"])\n@tf_should_use.should_use_result\ndef is_variable_initialized(variable):\n \"\"\"Tests if a variable has been initialized.\n\n Args:\n variable: A `Variable`.\n\n Returns:\n Returns a scalar boolean Tensor, `True` if the variable has been\n initialized, `False` otherwise.\n \"\"\"\n return state_ops.is_variable_initialized(variable)\n\n\n@tf_export(v1=[\"assert_variables_initialized\"])\n@tf_should_use.should_use_result\ndef assert_variables_initialized(var_list=None):\n \"\"\"Returns an Op to check if variables are initialized.\n\n NOTE: This function is obsolete and will be removed in 6 months. Please\n change your implementation to use `report_uninitialized_variables()`.\n\n When run, the returned Op will raise the exception `FailedPreconditionError`\n if any of the variables has not yet been initialized.\n\n Note: This function is implemented by trying to fetch the values of the\n variables. If one of the variables is not initialized a message may be\n logged by the C++ runtime. This is expected.\n\n Args:\n var_list: List of `Variable` objects to check. Defaults to the value of\n `global_variables().`\n\n Returns:\n An Op, or None if there are no variables.\n \"\"\"\n if var_list is None:\n var_list = global_variables() + local_variables()\n # Backwards compatibility for old-style variables. TODO(touts): remove.\n if not var_list:\n var_list = []\n for op in ops.get_default_graph().get_operations():\n if op.type in [\"Variable\", \"VariableV2\", \"AutoReloadVariable\"]:\n var_list.append(op.outputs[0])\n if not var_list:\n return None\n else:\n ranks = []\n for var in var_list:\n with ops.colocate_with(var.op):\n ranks.append(array_ops.rank_internal(var, optimize=False))\n if len(ranks) == 1:\n return ranks[0]\n else:\n return array_ops.stack(ranks)\n\n\n@tf_export(v1=[\"report_uninitialized_variables\"])\n@tf_should_use.should_use_result\ndef report_uninitialized_variables(var_list=None,\n name=\"report_uninitialized_variables\"):\n \"\"\"Adds ops to list the names of uninitialized variables.\n\n When run, it returns a 1-D tensor containing the names of uninitialized\n variables if there are any, or an empty array if there are none.\n\n Args:\n var_list: List of `Variable` objects to check. Defaults to the value of\n `global_variables() + local_variables()`\n name: Optional name of the `Operation`.\n\n Returns:\n A 1-D tensor containing names of the uninitialized variables, or an empty\n 1-D tensor if there are no variables or no uninitialized variables.\n \"\"\"\n if var_list is None:\n var_list = global_variables() + local_variables()\n # Backwards compatibility for old-style variables. TODO(touts): remove.\n if not var_list:\n var_list = []\n for op in ops.get_default_graph().get_operations():\n if op.type in [\"Variable\", \"VariableV2\", \"AutoReloadVariable\"]:\n var_list.append(op.outputs[0])\n with ops.name_scope(name):\n # Run all operations on CPU\n if var_list:\n init_vars = [state_ops.is_variable_initialized(v) for v in var_list]\n local_device = os.environ.get(\n \"TF_DEVICE_FOR_UNINITIALIZED_VARIABLE_REPORTING\", \"/cpu:0\")\n with ops.device(local_device):\n if not var_list:\n # Return an empty tensor so we only need to check for returned tensor\n # size being 0 as an indication of model ready.\n return array_ops.constant([], dtype=dtypes.string)\n else:\n # Get a 1-D boolean tensor listing whether each variable is initialized.\n variables_mask = math_ops.logical_not(array_ops.stack(init_vars))\n # Get a 1-D string tensor containing all the variable names.\n variable_names_tensor = array_ops.constant(\n [s.op.name for s in var_list])\n # Return a 1-D tensor containing all the names of\n # uninitialized variables.\n return array_ops.boolean_mask(variable_names_tensor, variables_mask)\n\n\nops.register_tensor_conversion_function(\n PartitionedVariable, PartitionedVariable._TensorConversionFunction) # pylint: disable=protected-access\n\n\nclass AbstractVariableMetaclass(VariableMetaclass, abc.ABCMeta):\n \"\"\"Metaclass combining `VariableMetaclass` and `abc.ABCMeta`.\"\"\"\n pass\n\n\[email protected]_metaclass(AbstractVariableMetaclass)\nclass AbstractVariable(Variable):\n \"\"\"`Variable`, but abstract.\"\"\"\n pass\n"
] | [
[
"tensorflow.python.framework.tensor_shape.unknown_shape",
"tensorflow.python.util.deprecation.deprecated",
"tensorflow.python.framework.ops.colocate_with",
"tensorflow.python.framework.ops.register_tensor_conversion_function",
"tensorflow.python.ops.state_ops.assign_sub",
"tensorflow.python.framework.ops.get_default_session",
"tensorflow.python.ops.array_ops.rank_internal",
"tensorflow.python.ops.state_ops.variable_op_v2",
"tensorflow.python.ops.gen_state_ops.scatter_update",
"tensorflow.python.framework.tensor_shape.dimension_value",
"tensorflow.python.framework.ops.add_to_collections",
"tensorflow.python.ops.state_ops.is_variable_initialized",
"tensorflow.python.ops.gen_state_ops.scatter_sub",
"tensorflow.python.ops.gen_state_ops.scatter_div",
"tensorflow.python.framework.ops.name_from_scope_name",
"tensorflow.python.util.object_identity.Reference",
"tensorflow.python.util.tf_export.tf_export",
"tensorflow.core.framework.variable_pb2.SaveSliceInfoDef",
"tensorflow.python.ops.gen_state_ops.scatter_min",
"tensorflow.python.platform.tf_logging.log_first_n",
"tensorflow.python.framework.ops.register_dense_tensor_like_type",
"tensorflow.python.framework.ops.strip_name_scope",
"tensorflow.python.ops.state_ops.assign_add",
"tensorflow.python.ops.gen_state_ops.scatter_nd_update",
"tensorflow.python.ops.control_flow_ops.group",
"tensorflow.python.framework.ops.device",
"tensorflow.python.ops.gen_state_ops.scatter_nd_sub",
"tensorflow.python.ops.array_ops.identity",
"tensorflow.python.framework.ops.prepend_name_scope",
"tensorflow.python.framework.ops.control_dependencies",
"tensorflow.python.util.compat.as_bytes",
"tensorflow.python.ops.array_ops.constant",
"tensorflow.python.ops.state_ops.count_up_to",
"tensorflow.python.ops.gen_math_ops.equal",
"tensorflow.python.framework.ops.init_scope",
"tensorflow.python._pywrap_utils.RegisterType",
"tensorflow.python.framework.ops.convert_to_tensor",
"tensorflow.python.ops.gen_state_ops.scatter_nd_add",
"tensorflow.python.ops.state_ops.assign",
"tensorflow.python.ops.gen_state_ops.scatter_add",
"tensorflow.python.framework.ops.get_default_graph",
"tensorflow.python.ops.gen_math_ops.not_equal",
"tensorflow.python.ops.array_ops.boolean_mask",
"tensorflow.python.ops.gen_state_ops.scatter_max",
"tensorflow.python.framework.ops.get_collection",
"tensorflow.python.ops.array_ops.split",
"tensorflow.python.framework.ops.name_scope",
"tensorflow.python.ops.state_ops.batch_scatter_update",
"tensorflow.python.ops.array_ops.stack",
"tensorflow.python.ops.array_ops.concat",
"tensorflow.python.ops.gen_state_ops.scatter_mul",
"tensorflow.python.eager.context.executing_eagerly",
"tensorflow.python.ops.control_flow_ops.no_op",
"tensorflow.core.framework.variable_pb2.VariableDef",
"tensorflow.python.framework.ops.executing_eagerly_outside_functions"
]
] |
vitorenesduarte/planet_sim | [
"aea6e324fdce7802976cbafa88a516ed40609ce9"
] | [
"fantoch_bote/plot/plot.py"
] | [
"import matplotlib\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfile = open(\"out.dat\", \"r\")\n\n# parse data in file\nfor (id, line) in enumerate(file):\n min_n = 3\n max_n = 0\n # a2 and f2 are initialized with a latency 0 since for n = 3 we don't have a latency value\n data = {\"a1\": [], \"a2\": [0], \"f1\": [], \"f2\": [0], \"e\": [],\n \"a1C\": [], \"a2C\": [0], \"f1C\": [], \"f2C\": [0], \"eC\": []}\n\n # drop last \" \" and split by \"|\"\n for n_entry in line.strip()[:-1].split(\"|\"):\n # now we have an entry like:\n # - \"[n=3] a1=(330, 0.19, 52.91) f1=(442, 0.21, 75.15) e=(330, 0.19, 52.91)\"\n\n # if we split by \" \" we can get n\n splitted = n_entry.strip().split(\" \")\n n = int(splitted[0][1:-1].split(\"=\")[1])\n\n # update max n\n max_n = max(max_n, n)\n\n # join the remaining back to have something like:\n # - \"a1=(330, 0.19, 52.91) f1=(442, 0.21, 75.15) e=(330, 0.19, 52.91)\"\n n_entry = \" \".join(splitted[1:])\n\n # replace \") \" with \"|\" and split by it\n for protocol_stats in n_entry.replace(\") \", \"|\").split(\"|\"):\n # now we have something like:\n # - \"a1=(330, 0.19, 52.91\"\n\n # if we split by \"=(\" we can get protocol name and its latency (1st entry in the tuple)\n splitted = protocol_stats.split(\"=(\")\n protocol = splitted[0]\n latency = int(splitted[1].split(\",\")[0])\n\n # save it\n data[protocol].append(latency)\n\n # computes xs\n labels = list(range(min_n, max_n + 1, 2))\n xs = np.arange(len(labels))\n\n # style stuff\n width = 0.12\n ylimits = [0, 500]\n\n for (suffix, title) in [(\"\", \"everywhere\"), (\"C\", \"colocated\")]:\n # create plot\n fig, ax = plt.subplots()\n\n ax.bar(xs - 4*width/2, data[\"a1\" + suffix], width, label='Atlas f=1')\n ax.bar(xs - 2*width/2, data[\"a2\" + suffix], width, label='Atlas f=2')\n ax.bar(xs, data[\"e\" + suffix], width, label='EPaxos')\n ax.bar(xs + 2*width/2, data[\"f1\" + suffix], width, label='FPaxos f=1')\n ax.bar(xs + 4*width/2, data[\"f2\" + suffix], width, label='FPaxos f=2')\n\n # set title, y limits, legend and labels\n ax.set_title(title)\n ax.set_ylim(ylimits)\n ax.legend(loc='upper right', shadow=True)\n ax.set(xlabel='#sites', ylabel='(ms)')\n ax.grid()\n\n # tickcs\n ax.set_xticks(xs)\n ax.set_xticklabels(labels)\n\n # save figure in png\n fig.savefig(str(id) + title + \".png\", dpi=400, format='png')\n"
] | [
[
"matplotlib.pyplot.subplots"
]
] |
QIU023/LifeLong-Segmentation | [
"f479d1641f461e9344dcf661d0ada7484fb80896"
] | [
"seg-part/train.py"
] | [
"import argparse\nimport os\nimport numpy as np\nfrom tqdm import tqdm\nfrom ipdb import set_trace\nfrom mypath import Path\nfrom dataloaders import make_data_loader\nfrom modeling.sync_batchnorm.replicate import patch_replication_callback\nfrom modeling.deeplab import *\nfrom modeling.unet import *\nfrom utils.loss import SegmentationLosses\nfrom utils.calculate_weights import calculate_weigths_labels\nfrom utils.lr_scheduler import LR_Scheduler\nfrom utils.saver import Saver\nfrom utils.summaries import TensorboardSummary\nfrom utils.metrics import Evaluator\nfrom dataloaders.utils import encode_segmap\n\nclass Trainer(object):\n def __init__(self, args):\n self.args = args\n\n # Define Saver\n self.saver = Saver(args)\n self.saver.save_experiment_config()\n # Define Tensorboard Summary\n self.summary = TensorboardSummary(self.saver.experiment_dir)\n self.writer = self.summary.create_summary()\n\n # Define Dataloader\n kwargs = {'num_workers': args.workers, 'pin_memory': True}\n self.train_loader, self.val_loader, self.test_loader, self.nclass = make_data_loader(args, **kwargs)\n\n # Define network\n if args.model == 'deeplab':\n model = DeepLab(num_classes=self.nclass,\n backbone=args.backbone,\n output_stride=args.out_stride,\n sync_bn=args.sync_bn,\n freeze_bn=args.freeze_bn)\n model_params = [{'params': model.get_1x_lr_params(), 'lr': args.lr},\n {'params': model.get_10x_lr_params(), 'lr': args.lr * 10}]\n elif args.model == 'unet':\n model = UNet(n_channels=3,\n n_classes=self.nclass)\n model_params = model.parameters()\n\n # Define Optimizer\n optimizer = torch.optim.SGD(model_params, lr=args.lr,\n momentum=args.momentum,\n weight_decay=args.weight_decay,\n nesterov=args.nesterov)\n\n # Define Criterion\n # whether to use class balanced weights\n if args.use_balanced_weights:\n classes_weights_path = os.path.join(Path.db_root_dir(args.dataset), args.dataset+'_classes_weights.npy')\n if os.path.isfile(classes_weights_path):\n weight = np.load(classes_weights_path)\n else:\n weight = calculate_weigths_labels(args.dataset, self.train_loader, self.nclass)\n weight = torch.from_numpy(weight.astype(np.float32))\n else:\n weight = None\n self.criterion = SegmentationLosses(weight=weight, cuda=args.cuda).build_loss(mode=args.loss_type)\n self.model, self.optimizer = model, optimizer\n\n # Define Evaluator\n self.evaluator = Evaluator(self.nclass)\n # Define lr scheduler\n self.scheduler = LR_Scheduler(args.lr_scheduler, args.lr,\n args.epochs, len(self.train_loader))\n\n # Using cuda\n if args.cuda:\n self.model = torch.nn.DataParallel(self.model, device_ids=self.args.gpu_ids)\n patch_replication_callback(self.model)\n self.model = self.model.cuda()\n\n # Resuming checkpoint\n self.best_pred = 0.0\n if args.resume is not None:\n if not os.path.isfile(args.resume):\n raise RuntimeError(\"=> no checkpoint found at '{}'\" .format(args.resume))\n checkpoint = torch.load(args.resume)\n args.start_epoch = checkpoint['epoch']\n if args.cuda:\n self.model.module.load_state_dict(checkpoint['state_dict'])\n else:\n self.model.load_state_dict(checkpoint['state_dict'])\n if not args.ft:\n self.optimizer.load_state_dict(checkpoint['optimizer'])\n self.best_pred = checkpoint['best_pred']\n print(\"=> loaded checkpoint '{}' (epoch {})\"\n .format(args.resume, checkpoint['epoch']))\n\n # Clear start epoch if fine-tuning\n if args.ft:\n args.start_epoch = 0\n\n def training(self, epoch):\n train_loss = 0.0\n self.model.train()\n tbar = tqdm(self.train_loader)\n num_img_tr = len(self.train_loader)\n #$set_trace()\n for i, sample in enumerate(tbar):\n image, target = sample['image'], sample['label']\n if self.args.cuda:\n image, target = image.cuda(), target.cuda()\n self.scheduler(self.optimizer, i, epoch, self.best_pred)\n self.optimizer.zero_grad()\n if self.args.model == 'deeplab':\n output, fuse = self.model(image)\n elif self.args.model == 'unet':\n output = self.model(image)\n #print(\"lalala\",output.shape)\n loss = self.criterion(output, target)\n loss.backward()\n self.optimizer.step()\n train_loss += loss.item()\n tbar.set_description('Train loss: %.3f' % (train_loss / (i + 1)))\n self.writer.add_scalar('train/total_loss_iter', loss.item(), i + num_img_tr * epoch)\n\n # Show 10 * 3 inference results each epoch\n if i % (num_img_tr // 10) == 0:\n global_step = i + num_img_tr * epoch\n self.summary.visualize_image(self.writer, self.args.dataset, image, target, output, global_step)\n\n self.writer.add_scalar('train/total_loss_epoch', train_loss, epoch)\n print('[Epoch: %d, numImages: %5d]' % (epoch, i * self.args.batch_size + image.data.shape[0]))\n print('Loss: %.3f' % train_loss)\n\n if self.args.no_val == 'True':\n # save checkpoint every epoch\n is_best = False\n self.saver.save_checkpoint({\n 'epoch': epoch + 1,\n 'state_dict': self.model.module.state_dict(),\n 'optimizer': self.optimizer.state_dict(),\n 'best_pred': self.best_pred,\n }, is_best)\n\n\n def validation(self, epoch):\n self.model.eval()\n self.evaluator.reset()\n tbar = tqdm(self.val_loader, desc='\\r')\n test_loss = 0.0\n for i, sample in enumerate(tbar):\n image, target = sample['image'], sample['label']\n if self.args.cuda:\n image, target = image.cuda(), target.cuda()\n with torch.no_grad():\n if self.args.model == 'deeplab':\n output, fuse = self.model(image)\n else:\n output = self.model(image)\n loss = self.criterion(output, target)\n test_loss += loss.item()\n tbar.set_description('Test loss: %.3f' % (test_loss / (i + 1)))\n pred = output.data.cpu().numpy()\n target = target.cpu().numpy()\n pred = np.argmax(pred, axis=1)\n # Add batch sample into evaluator\n self.evaluator.add_batch(target, pred)\n\t\n # Fast test during the training\n Acc = self.evaluator.Pixel_Accuracy()\n Acc_class = self.evaluator.Pixel_Accuracy_Class()\n mIoU = self.evaluator.Mean_Intersection_over_Union()\n FWIoU = self.evaluator.Frequency_Weighted_Intersection_over_Union()\n Jaccard = self.evaluator.Jaccard()\n Dice = self.evaluator.Dice()\n self.writer.add_scalar('val/total_loss_epoch', test_loss, epoch)\n self.writer.add_scalar('val/mIoU', mIoU, epoch)\n self.writer.add_scalar('val/Acc', Acc, epoch)\n self.writer.add_scalar('val/Acc_class', Acc_class, epoch)\n self.writer.add_scalar('val/fwIoU', FWIoU, epoch)\n self.writer.add_scalar('val/dice', Dice, epoch)\n self.writer.add_scalar('val/jaccard', Jaccard, epoch)\n print('Validation:')\n print('[Epoch: %d, numImages: %5d]' % (epoch, i * self.args.batch_size + image.data.shape[0]))\n print(\"Acc:{}, Acc_class:{}, mIoU:{}, fwIoU: {}\".format(Acc, Acc_class, mIoU, FWIoU))\n print('Loss: %.3f' % test_loss)\n\n new_pred = mIoU\n if new_pred > self.best_pred:\n is_best = True\n self.best_pred = new_pred\n self.saver.save_checkpoint({\n 'epoch': epoch + 1,\n 'state_dict': self.model.module.state_dict(),\n 'optimizer': self.optimizer.state_dict(),\n 'best_pred': self.best_pred,\n }, is_best)\n\ndef main():\n parser = argparse.ArgumentParser(description=\"PyTorch DeeplabV3Plus Training\")\n parser.add_argument('--model', type=str, default='deeplab',\n choices=['deeplab', 'unet'], help='model name (default:deeplab)')\n parser.add_argument('--backbone', type=str, default='resnet',\n choices=['resnet', 'xception', 'drn', 'mobilenet'],\n help='backbone name (default: resnet)')\n parser.add_argument('--out-stride', type=int, default=16,\n help='network output stride (default: 8)')\n parser.add_argument('--dataset', type=str, default='pascal',\n choices=['pascal', 'coco', 'cityscapes', 'isic','chaos1','chaos2'],\n help='dataset name (default: pascal)')\n parser.add_argument('--train-percent',type=float,default=0.8,help='the percent of train_dataset and total_dataset')\n parser.add_argument('--use-sbd', action='store_true', default=False,\n help='whether to use SBD dataset (default: True)')\n parser.add_argument('--high-confidence', action='store_true', default=False,\n help='whether use high-confidence (default: Frue)')\n parser.add_argument('--workers', type=int, default=4,\n metavar='N', help='dataloader threads')\n parser.add_argument('--base-size', type=int, default=513,\n help='base image size')\n parser.add_argument('--crop-size', type=int, default=256,\n help='crop image size')\n parser.add_argument('--sync-bn', type=bool, default=None,\n help='whether to use sync bn (default: auto)')\n parser.add_argument('--freeze-bn', type=bool, default=False,\n help='whether to freeze bn parameters (default: False)')\n parser.add_argument('--loss-type', type=str, default='ce',\n choices=['ce', 'focal'],\n help='loss func type (default: ce)')\n # training hyper params\n parser.add_argument('--epochs', type=int, default=None, metavar='N',\n help='number of epochs to train (default: auto)')\n parser.add_argument('--start_epoch', type=int, default=0,\n metavar='N', help='start epochs (default:0)')\n parser.add_argument('--batch-size', type=int, default=None,\n metavar='N', help='input batch size for \\\n training (default: auto)')\n parser.add_argument('--test-batch-size', type=int, default=None,\n metavar='N', help='input batch size for \\\n testing (default: auto)')\n parser.add_argument('--use-balanced-weights', action='store_true', default=False,\n help='whether to use balanced weights (default: False)')\n # optimizer params\n parser.add_argument('--lr', type=float, default=None, metavar='LR',\n help='learning rate (default: auto)')\n parser.add_argument('--lr-scheduler', type=str, default='poly',\n choices=['poly', 'step', 'cos'],\n help='lr scheduler mode: (default: poly)')\n parser.add_argument('--momentum', type=float, default=0.9,\n metavar='M', help='momentum (default: 0.9)')\n parser.add_argument('--percent', type=float, default=0.6,\n metavar='M', help='percent of pseudo labels (default: 0.6)')\n parser.add_argument('--label-percent', type=float, default=0.6,\n metavar='M', help='percent of pseudo labels (default: 0.6)')\n parser.add_argument('--weight-decay', type=float, default=5e-4,\n metavar='M', help='w-decay (default: 5e-4)')\n parser.add_argument('--nesterov', action='store_true', default=False,\n help='whether use nesterov (default: False)')\n # cuda, seed and logging\n parser.add_argument('--no-cuda', action='store_true', default=\n False, help='disables CUDA training')\n parser.add_argument('--gpu-ids', type=str, default='0',\n help='use which gpu to train, must be a \\\n comma-separated list of integers only (default=0)')\n parser.add_argument('--seed', type=int, default=1, metavar='S',\n help='random seed (default: 1)')\n # checking point\n parser.add_argument('--resume', type=str, default=None,\n help='put the path to resuming file if needed')\n parser.add_argument('--checkname', type=str, default=None,\n help='set the checkpoint name')\n # finetuning pre-trained models\n parser.add_argument('--ft', action='store_true', default=False,\n help='finetuning on a different dataset')\n # evaluation option\n parser.add_argument('--eval-interval', type=int, default=1,\n help='evaluuation interval (default: 1)')\n parser.add_argument('--no-val', type=str, default=False,\n help='skip validation during training')\n\n args = parser.parse_args()\n args.cuda = not args.no_cuda and torch.cuda.is_available()\n if args.cuda:\n try:\n args.gpu_ids = [int(s) for s in args.gpu_ids.split(',')]\n except ValueError:\n raise ValueError('Argument --gpu_ids must be a comma-separated list of integers only')\n\n if args.sync_bn is None:\n if args.cuda and len(args.gpu_ids) > 1:\n args.sync_bn = True\n else:\n args.sync_bn = False\n\n # default settings for epochs, batch_size and lr\n if args.epochs is None:\n epoches = {\n 'coco': 30,\n 'cityscapes': 200,\n 'pascal': 50,\n }\n args.epochs = epoches[args.dataset.lower()]\n\n if args.batch_size is None:\n args.batch_size = 4 * len(args.gpu_ids)\n\n if args.test_batch_size is None:\n args.test_batch_size = args.batch_size\n\n if args.lr is None:\n lrs = {\n 'coco': 0.1,\n 'cityscapes': 0.01,\n 'pascal': 0.007,\n }\n args.lr = lrs[args.dataset.lower()] / (4 * len(args.gpu_ids)) * args.batch_size\n\n\n if args.checkname is None:\n args.checkname = 'deeplab-'+str(args.backbone)\n print(args)\n #torch.manual_seed(args.seed)\n trainer = Trainer(args)\n print('Starting Epoch:', trainer.args.start_epoch)\n print('Total Epoches:', trainer.args.epochs)\n for epoch in range(trainer.args.start_epoch, trainer.args.epochs):\n trainer.training(epoch)\n if not trainer.args.no_val and epoch % args.eval_interval == (args.eval_interval - 1):\n trainer.validation(epoch)\n\n trainer.writer.close()\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"numpy.load",
"numpy.argmax"
]
] |
EmilRyberg/marathon_ros2 | [
"a7a979e322de41c868e94e91fa7a8188dede6acb"
] | [
"marathon_ros2_csv/marathon_ros2_csv/plot_csv.py"
] | [
"from IPython.core.display import HTML, SVG\nimport pandas as pd\nimport numpy as np\nimport xport\nimport IPython\nfrom ipywidgets import Layout\nfrom ipywidgets import widgets\nfrom IPython.display import display\n\nimport matplotlib.ticker as ticker\nimport matplotlib.cm as cm\nimport matplotlib as mpl\nfrom matplotlib.gridspec import GridSpec\n\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\ninput = pd.read_csv('/home/jgines/Desktop/topics_to_csv_12_02_2020_17_21_44.csv')\nsource = []\n#rango dinámico\nfor i in range(0,1857):\n for l in input:\n #if l == 'time':\n # continue\n #if l == 'distance':\n # continue\n #if l == 'recovery_behavior_executed':\n # continue\n #if l == 'vel_x':\n # continue\n #if l == 'vel_theta':\n # continue\n\n new_reg = {\n 'time':i,\n 'distance':input['distance'][i],\n 'recovery_behavior_executed':input['recovery_behavior_executed'][i],\n 'vel_x':input['vel_x'][i],\n 'vel_theta':input['vel_theta'][i]\n }\n source.append(new_reg)\n\ndata = pd.DataFrame(source)\n#print(data)\nsns.relplot(x=\"time\", y=\"distance\", kind=\"line\", data=data)\nplt.show()"
] | [
[
"pandas.read_csv",
"matplotlib.pyplot.show",
"pandas.DataFrame"
]
] |
elementary-robotics/atom | [
"36aea078c0e029f03e7b9b4768729a683fb32a88"
] | [
"languages/python/tests/test_atom.py"
] | [
"import copy\nimport gc\nimport os\nimport random\nimport time\nfrom multiprocessing import Process, Queue\nfrom threading import Thread\n\nimport numpy as np\nimport pytest\nimport redis\nfrom atom import AtomError, Element, MetricsLevel, SetEmptyError\nfrom atom.config import (\n ATOM_CALLBACK_FAILED,\n ATOM_COMMAND_NO_ACK,\n ATOM_COMMAND_NO_RESPONSE,\n ATOM_COMMAND_UNSUPPORTED,\n ATOM_NO_ERROR,\n ATOM_USER_ERRORS_BEGIN,\n COMMAND_LIST_COMMAND,\n DEFAULT_REDIS_PORT,\n DEFAULT_REDIS_SOCKET,\n HEALTHCHECK_COMMAND,\n HEALTHCHECK_RETRY_INTERVAL,\n LANG,\n REDIS_PIPELINE_POOL_SIZE,\n VERSION,\n VERSION_COMMAND,\n)\nfrom atom.element import ElementConnectionTimeoutError\nfrom atom.messages import Response, StreamHandler\nfrom msgpack import unpackb\nfrom redistimeseries.client import Client as RedisTimeSeries\n\npytest.caller_incrementor = 0\npytest.responder_incrementor = 0\n\nTEST_REDIS_SOCKET = os.getenv(\"TEST_REDIS_SOCKET\", DEFAULT_REDIS_SOCKET)\nTEST_REDIS_HOST = os.getenv(\"TEST_REDIS_HOST\", None)\nTEST_REDIS_PORT = os.getenv(\"TEST_REDIS_PORT\", DEFAULT_REDIS_PORT)\n\n\nclass TestAtom:\n def _assert_cleaned_up(self, element):\n for s in element.streams:\n private_sn = element._make_stream_id(element.name, s)\n exists_val = element._rclient.exists(private_sn)\n assert not exists_val, \"private redis stream key %s should not exist\" % (\n private_sn,\n )\n\n def _element_create(\n self,\n name,\n host=TEST_REDIS_HOST,\n port=TEST_REDIS_PORT,\n socket_path=TEST_REDIS_SOCKET,\n conn_timeout_ms=2000,\n data_timeout_ms=5000,\n ):\n # Make sure metrics is enabled. Some tests turn it off\n os.environ[\"ATOM_USE_METRICS\"] = \"TRUE\"\n return Element(\n name,\n host=host,\n port=port,\n socket_path=socket_path,\n conn_timeout_ms=conn_timeout_ms,\n data_timeout_ms=data_timeout_ms,\n )\n\n def _element_start(\n self,\n element,\n caller,\n read_block_ms=500,\n do_healthcheck=True,\n healthcheck_interval=0.5,\n ):\n element.command_loop(block=False, read_block_ms=read_block_ms)\n if do_healthcheck:\n caller.wait_for_elements_healthy(\n [element.name], retry_interval=healthcheck_interval\n )\n\n def _element_cleanup(self, element):\n element.command_loop_shutdown(block=True)\n element._clean_up()\n\n def _get_redis_client(self):\n if TEST_REDIS_HOST is not None:\n client = redis.StrictRedis(host=TEST_REDIS_HOST, port=TEST_REDIS_PORT)\n else:\n client = redis.StrictRedis(unix_socket_path=TEST_REDIS_SOCKET)\n\n return client\n\n @pytest.fixture(autouse=True)\n def client(self):\n \"\"\"\n Run at setup, creates a redis client and flushes\n all existing keys in the DB to ensure no interaction\n between the tests and a fresh startup state between the\n tests\n \"\"\"\n\n client = self._get_redis_client()\n client.flushall()\n keys = client.keys()\n assert keys == []\n yield client\n\n del client\n\n @pytest.fixture\n def caller(self, client, check_redis_end, metrics):\n \"\"\"\n Sets up the caller before each test function is run.\n Tears down the caller after each test is run.\n \"\"\"\n # Want to be at the highest log level for testing\n os.environ[\"ATOM_LOG_LEVEL\"] = \"DEBUG\"\n\n caller_name = \"test_caller_%s\" % (pytest.caller_incrementor,)\n caller = self._element_create(caller_name)\n yield caller, caller_name\n pytest.caller_incrementor += 1\n\n # Need to manually call the delete method to\n # clean up the object since garbage collection\n # won't get to it until all fixtures have run and\n # then the check_redis_end fixture won't be able\n # to see how well we cleaned up\n caller._clean_up()\n\n @pytest.fixture\n def responder(self, client, check_redis_end, metrics):\n \"\"\"\n Sets up the responder before each test function is run.\n Tears down the responder after each test is run.\n \"\"\"\n responder_name = \"test_responder_%s\" % (pytest.responder_incrementor,)\n responder = self._element_create(responder_name)\n yield responder, responder_name\n pytest.responder_incrementor += 1\n\n # Need to manually call the delete method to\n # clean up the object since garbage collection\n # won't get to it until all fixtures have run and\n # then the check_redis_end fixture won't be able\n # to see how well we cleaned up\n responder._clean_up()\n\n @pytest.fixture(autouse=True)\n def check_redis_end(self):\n \"\"\"\n Runs at end -- IMPORTANT: must depend on caller and responder\n in order to ensure it runs after the caller and responder\n cleanup.\n \"\"\"\n\n client = self._get_redis_client()\n yield client\n\n keys = client.keys()\n assert keys == [] or keys == [b\"log\"]\n\n del client\n\n @pytest.fixture\n def metrics(self):\n metrics = RedisTimeSeries(unix_socket_path=\"/shared/metrics.sock\")\n metrics.redis.flushall()\n\n yield metrics\n\n del metrics\n\n def test_caller_responder_exist(self, caller, responder):\n \"\"\"\n Ensures that the caller and responder were created with the proper\n names.\n \"\"\"\n caller, caller_name = caller\n responder, responder_name = responder\n\n print(caller.get_all_elements())\n assert responder_name in caller.get_all_elements()\n assert caller_name in responder.get_all_elements()\n\n def test_id_generation(self, caller):\n \"\"\"\n Ensures id generation functions are working with expected input.\n \"\"\"\n caller, caller_name = caller\n\n assert caller._make_response_id(\"abc\") == \"response:abc\"\n assert caller._make_command_id(\"abc\") == \"command:abc\"\n assert caller._make_stream_id(\"abc\", \"123\") == \"stream:abc:123\"\n\n def test_command_in_redis(self, caller, responder):\n \"\"\"\n Tests caller sending command and verifies that command was sent properly\n in Redis.\n \"\"\"\n caller, caller_name = caller\n responder, responder_name = responder\n\n proc = Process(\n target=caller.command_send,\n args=(\n responder_name,\n \"test_cmd\",\n 0,\n ),\n )\n proc.start()\n data = caller._rclient.xread(\n {caller._make_command_id(responder_name): \"$\"}, block=1000\n )\n proc.join()\n stream, msgs = data[0] # since there's only one stream\n assert stream.decode() == \"command:%s\" % (responder_name,)\n _id, msg = msgs[0]\n assert msg[b\"element\"].decode() == caller_name\n assert msg[b\"cmd\"] == b\"test_cmd\"\n assert msg[b\"data\"] == b\"0\"\n\n def test_add_entry_and_get_n_most_recent(self, caller, responder):\n \"\"\"\n Adds 10 entries to the responder's stream and makes sure that the\n proper values are returned from get_n_most_recent.\n \"\"\"\n caller, caller_name = caller\n responder, responder_name = responder\n\n for i in range(10):\n responder.entry_write(\"test_stream\", {\"data\": i})\n entries = caller.entry_read_n(responder_name, \"test_stream\", 5)\n assert len(entries) == 5\n assert entries[0][\"data\"] == b\"9\"\n assert entries[-1][\"data\"] == b\"5\"\n\n def test_add_entry_with_override_element_name(self, caller, responder):\n \"\"\"\n Adds an entry to the responder stream with a fake element name and\n makes sure that entry is on correct stream.\n \"\"\"\n caller, caller_name = caller\n responder, responder_name = responder\n\n responder.entry_write(\n \"test_stream\", {\"data\": \"fake\"}, element_name=\"fake_element\"\n )\n\n # assert that entries are on override element stream\n entries = caller.entry_read_since(\"fake_element\", \"test_stream\", last_id=0)\n assert len(entries) == 1\n assert entries[0][\"data\"] == b\"fake\"\n\n # assert that writing element stream is empty\n entries = caller.entry_read_since(responder_name, \"test_stream\", last_id=0)\n assert len(entries) == 0\n\n # clean up stream (necessary since it doesn't belong to a real element)\n caller._rclient.unlink(\"stream:fake_element:test_stream\")\n\n def test_add_entry_and_get_n_most_recent_legacy_serialize(self, caller, responder):\n \"\"\"\n Adds 10 entries to the responder's stream with legacy serialization\n and makes sure that the proper values are returned from\n get_n_most_recent.\n \"\"\"\n caller, caller_name = caller\n responder, responder_name = responder\n\n for i in range(10):\n data = {\"data\": i}\n responder.entry_write(\"test_stream_serialized\", data, serialize=True)\n # Ensure that serialization keeps the original data in tact\n assert data[\"data\"] == i\n entries = caller.entry_read_n(\n responder_name, \"test_stream_serialized\", 5, deserialize=True\n )\n assert len(entries) == 5\n assert entries[0][\"data\"] == 9\n assert entries[-1][\"data\"] == 5\n\n def test_add_entry_and_get_n_most_recent_arrow_serialized(self, caller, responder):\n \"\"\"\n Adds 10 entries to the responder's stream with Apache Arrow\n serialization and makes sure that the proper values are returned\n from get_n_most_recent without specifying deserialization\n method in method call, instead relying on serialization key embedded\n within entry.\n \"\"\"\n caller, caller_name = caller\n responder, responder_name = responder\n\n for i in range(10):\n data = {\"data\": i}\n responder.entry_write(\n \"test_stream_arrow_serialized\", data, serialization=\"arrow\"\n )\n # Ensure that serialization keeps the original data in tact\n assert data[\"data\"] == i\n entries = caller.entry_read_n(responder_name, \"test_stream_arrow_serialized\", 5)\n assert len(entries) == 5\n assert entries[0][\"data\"] == 9\n assert entries[-1][\"data\"] == 5\n\n def test_add_entry_and_get_n_most_recent_arrow_numpy_serialized(\n self, caller, responder\n ):\n \"\"\"\n Adds 10 entries to the responder's stream with Apache Arrow\n serialization and makes sure the proper values are returned from\n get_n_most_recent without specifying deserialization method in\n method call, instead relying on serialization key embedded within\n entry.\n \"\"\"\n caller, caller_name = caller\n responder, responder_name = responder\n\n for i in range(10):\n data = {\"data\": np.ones((3, 3)) * i}\n responder.entry_write(\n \"test_stream_arrow_numpy_serialized\", data, serialization=\"arrow\"\n )\n entries = caller.entry_read_n(\n responder_name, \"test_stream_arrow_numpy_serialized\", 5\n )\n assert len(entries) == 5\n assert np.array_equal(entries[0][\"data\"], np.ones((3, 3)) * 9)\n assert np.array_equal(entries[-1][\"data\"], np.ones((3, 3)) * 5)\n\n def test_add_entry_arrow_serialize_custom_type(self, caller, responder):\n \"\"\"\n Attempts to add an arrow-serialized entry of a custom\n (not Python built-in) type. Ensures that TypeError is raised.\n \"\"\"\n caller, caller_name = caller\n responder, responder_name = responder\n\n class CustomClass:\n pass\n\n inst = CustomClass()\n\n with pytest.raises(TypeError) as excinfo:\n responder.entry_write(\n \"test_arrow_custom_type\", {\"data\": inst}, serialization=\"arrow\"\n )\n\n print(excinfo.value)\n assert \"not serializeable by pyarrow without pickling\" in str(excinfo.value)\n\n # Test collection containing non-serializeable type\n with pytest.raises(TypeError) as excinfo:\n responder.entry_write(\n \"test_arrow_custom_type\", {\"data\": [inst]}, serialization=\"arrow\"\n )\n\n print(excinfo.value)\n assert \"not serializeable by pyarrow without pickling\" in str(excinfo.value)\n\n def test_add_command(self, responder):\n \"\"\"\n Ensures that a command can be added to a responder.\n \"\"\"\n responder, responder_name = responder\n\n responder.command_add(\"test_command\", lambda x: x, timeout=123)\n assert \"test_command\" in responder.handler_map\n assert responder.timeouts[\"test_command\"] == 123\n\n def test_clean_up_stream(self, responder):\n \"\"\"\n Ensures that a stream can be removed from Redis and removed from\n responder's streams set.\n \"\"\"\n responder, responder_name = responder\n\n responder.entry_write(\"clean_me\", {\"data\": 0})\n\n assert \"stream:%s:clean_me\" % (responder_name,) in responder.get_all_streams()\n responder.clean_up_stream(\"clean_me\")\n\n assert (\n \"stream:%s:clean_me\" % (responder_name,) not in responder.get_all_streams()\n )\n assert \"clean_me\" not in responder.streams\n self._assert_cleaned_up(responder)\n\n def test_clean_up_stream_element_name(self, caller, responder):\n \"\"\"\n Ensures an element can clean up a stream with a different element\n name.\n \"\"\"\n responder, responder_name = responder\n responder.entry_write(\"clean_me\", {\"data\": 0}, element_name=\"fake\")\n\n # have responder element clean up stream with fake element name\n responder.clean_up_stream(\"clean_me\", element_name=\"fake\")\n\n stream_exists = responder._rclient.exists(\"stream:clean_me:fake\")\n assert not stream_exists\n\n def test_clean_up(self, responder):\n \"\"\"\n Ensures that a responder can be removed from Redis\n \"\"\"\n responder, responder_name = responder\n\n new_responder = self._element_create(\"new_responder\")\n assert \"new_responder\" in responder.get_all_elements()\n del new_responder\n # Explicitly invoke collection after ref count set to 0\n gc.collect()\n assert \"new_responder\" not in responder.get_all_elements()\n\n def test_command_response(self, caller, responder):\n \"\"\"\n Element sends command and responder returns response.\n Tests expected use case of command response.\n \"\"\"\n caller, caller_name = caller\n responder, responder_name = responder\n\n responder.command_add(\"add_1\", add_1)\n self._element_start(responder, caller)\n response = caller.command_send(responder_name, \"add_1\", 42)\n self._element_cleanup(responder)\n assert response[\"err_code\"] == ATOM_NO_ERROR\n assert response[\"data\"] == b\"43\"\n\n def test_log_fail_in_command_loop(self, caller, responder):\n caller, caller_name = caller\n responder, responder_name = responder\n\n def fail(x):\n raise ValueError(\"oh no\")\n\n responder.command_add(\"fail\", fail)\n\n # this should be a non-blocking call\n responder.command_loop(n_workers=1, block=False)\n caller.command_send(responder_name, \"fail\", 42)\n responder.command_loop_shutdown()\n del responder\n\n def test_command_response_n_workers_2_no_fork(self, caller, responder):\n \"\"\"\n Element sends command and responder returns response.\n Tests expected use case of command response.\n \"\"\"\n caller, caller_name = caller\n responder, responder_name = responder\n\n responder.command_add(\"add_1\", add_1)\n\n # this should be a non-blocking call\n responder.command_loop(n_workers=2, block=False)\n\n response = caller.command_send(responder_name, \"add_1\", 42)\n response2 = caller.command_send(responder_name, \"add_1\", 43)\n response3 = caller.command_send(responder_name, \"add_1\", 44)\n\n responder.command_loop_shutdown()\n\n assert response[\"err_code\"] == ATOM_NO_ERROR\n assert response[\"data\"] == b\"43\"\n\n assert response2[\"err_code\"] == ATOM_NO_ERROR\n assert response2[\"data\"] == b\"44\"\n\n assert response3[\"err_code\"] == ATOM_NO_ERROR\n assert response3[\"data\"] == b\"45\"\n time.sleep(0.5)\n del responder\n\n def test_command_response_n_workers_2_threads(self, caller, responder):\n \"\"\"\n Element sends command and responder returns response.\n Tests expected use case of command response.\n \"\"\"\n caller, caller_name = caller\n responder, responder_name = responder\n\n responder.command_add(\"add_1\", add_1)\n\n thread = Thread(target=responder.command_loop, kwargs={\"n_workers\": 2})\n thread.start()\n\n response = caller.command_send(responder_name, \"add_1\", 42)\n response2 = caller.command_send(responder_name, \"add_1\", 43)\n response3 = caller.command_send(responder_name, \"add_1\", 44)\n\n responder.command_loop_shutdown()\n\n thread.join()\n\n assert response[\"err_code\"] == ATOM_NO_ERROR\n assert response[\"data\"] == b\"43\"\n\n assert response2[\"err_code\"] == ATOM_NO_ERROR\n assert response2[\"data\"] == b\"44\"\n\n assert response3[\"err_code\"] == ATOM_NO_ERROR\n assert response3[\"data\"] == b\"45\"\n\n def test_command_response_n_workers_2(self, caller, responder):\n \"\"\"\n Element sends command and responder returns response.\n Tests expected use case of command response.\n \"\"\"\n caller, caller_name = caller\n responder, responder_name = responder\n\n responder.command_add(\"add_1\", add_1)\n\n proc = Process(target=responder.command_loop, kwargs={\"n_workers\": 2})\n proc.start()\n\n response = caller.command_send(responder_name, \"add_1\", 42)\n response2 = caller.command_send(responder_name, \"add_1\", 43)\n response3 = caller.command_send(responder_name, \"add_1\", 44)\n\n responder.command_loop_shutdown()\n\n proc.join()\n\n assert response[\"err_code\"] == ATOM_NO_ERROR\n assert response[\"data\"] == b\"43\"\n\n assert response2[\"err_code\"] == ATOM_NO_ERROR\n assert response2[\"data\"] == b\"44\"\n\n assert response3[\"err_code\"] == ATOM_NO_ERROR\n assert response3[\"data\"] == b\"45\"\n\n def test_command_response_n_workers_2_use_threads(self, caller, responder):\n \"\"\"\n Element sends command and responder returns response if we use threads\n for workers instead of processes.\n \"\"\"\n caller, caller_name = caller\n responder, responder_name = responder\n\n responder.command_add(\"add_1\", add_1)\n\n proc = Process(\n target=responder.command_loop, kwargs={\"n_workers\": 2, \"use_procs\": False}\n )\n proc.start()\n\n response = caller.command_send(responder_name, \"add_1\", 42)\n response2 = caller.command_send(responder_name, \"add_1\", 43)\n response3 = caller.command_send(responder_name, \"add_1\", 44)\n\n responder.command_loop_shutdown()\n\n proc.join()\n\n assert response[\"err_code\"] == ATOM_NO_ERROR\n assert response[\"data\"] == b\"43\"\n\n assert response2[\"err_code\"] == ATOM_NO_ERROR\n assert response2[\"data\"] == b\"44\"\n\n assert response3[\"err_code\"] == ATOM_NO_ERROR\n assert response3[\"data\"] == b\"45\"\n\n def test_command_response_legacy_serialized(self, caller, responder):\n \"\"\"\n Element sends command and responder returns response.\n Tests expected use case of command response.\n \"\"\"\n caller, caller_name = caller\n responder, responder_name = responder\n\n def add_1_serialized(data):\n return Response(data + 1, serialize=True)\n\n responder.command_add(\"add_1_3\", add_1_serialized, deserialize=True)\n self._element_start(responder, caller)\n response = caller.command_send(\n responder_name, \"add_1_3\", 0, serialize=True, deserialize=True\n )\n self._element_cleanup(responder)\n assert response[\"err_code\"] == ATOM_NO_ERROR\n assert response[\"data\"] == 1\n\n def test_command_response_mixed_serialization(self, caller, responder):\n \"\"\"\n Ensures that command and response are serialized correctly based on\n serialization specified.\n \"\"\"\n caller, caller_name = caller\n responder, responder_name = responder\n\n def add_1_arrow_serialized(data):\n return Response(data + 1, serialization=\"arrow\")\n\n responder.command_add(\n \"test_command\", add_1_arrow_serialized, serialization=\"msgpack\"\n )\n assert \"test_command\" in responder.handler_map\n assert responder.handler_map[\"test_command\"][\"serialization\"] == \"msgpack\"\n self._element_start(responder, caller)\n response = caller.command_send(\n responder_name, \"test_command\", 123, serialization=\"msgpack\"\n )\n self._element_cleanup(responder)\n assert response[\"err_code\"] == ATOM_NO_ERROR\n assert response[\"data\"] == 124\n\n def test_listen_on_streams(self, caller, responder):\n \"\"\"\n Creates two responders publishing entries on their respective streams\n with a caller listening on those streams and publishing data to a\n new stream. This test ensures that the new stream contains all the\n data from the responders.\n \"\"\"\n caller, caller_name = caller\n responder, responder_name = responder\n\n responder_0_name = responder_name + \"_0\"\n responder_1_name = responder_name + \"_1\"\n\n responder_0 = self._element_create(responder_0_name)\n responder_1 = self._element_create(responder_1_name)\n entries = set()\n\n def entry_write_loop(responder, stream_name, data):\n # Wait until both responders and the caller are ready\n while -1 not in entries or -2 not in entries:\n responder.entry_write(\n stream_name, {\"value\": data - 2}, serialization=\"msgpack\"\n )\n for i in range(10):\n responder.entry_write(\n stream_name, {\"value\": data}, serialization=\"msgpack\"\n )\n data += 2\n\n def add_entries(data):\n entries.add(data[\"value\"])\n\n proc_responder_0 = Thread(\n target=entry_write_loop,\n args=(\n responder_0,\n \"stream_0\",\n 0,\n ),\n )\n proc_responder_1 = Thread(\n target=entry_write_loop,\n args=(\n responder_1,\n \"stream_1\",\n 1,\n ),\n )\n\n stream_handlers = [\n StreamHandler(responder_0_name, \"stream_0\", add_entries),\n StreamHandler(responder_1_name, \"stream_1\", add_entries),\n ]\n thread_caller = Thread(\n target=caller.entry_read_loop,\n args=(\n stream_handlers,\n None,\n 1000,\n True,\n ),\n daemon=True,\n )\n thread_caller.start()\n proc_responder_0.start()\n proc_responder_1.start()\n proc_responder_0.join()\n proc_responder_1.join()\n # Wait to give the caller time to handle all the data from the streams\n thread_caller.join(5.0)\n caller._rclient.delete(f\"stream:{responder_0_name}:stream_0\")\n caller._rclient.delete(f\"stream:{responder_1_name}:stream_1\")\n for i in range(20):\n assert i in entries\n\n self._element_cleanup(responder_0)\n self._element_cleanup(responder_1)\n\n def test_read_since(self, caller, responder):\n \"\"\"\n Sets the current timestamp as last_id and writes 5 entries to a stream.\n Ensures that we can get 5 entries since the last id using\n entry_read_since.\n \"\"\"\n caller, caller_name = caller\n responder, responder_name = responder\n\n responder.entry_write(\"test_stream\", {\"data\": None})\n\n # Sleep so that last_id is later than the first entry\n time.sleep(0.01)\n last_id = responder._get_redis_timestamp()\n\n # Sleep so that the entries are later than last_id\n time.sleep(0.01)\n\n for i in range(5):\n responder.entry_write(\"test_stream\", {\"data\": i})\n\n # Ensure this doesn't get an entry (because it's waiting for new entries\n # nd they never come)\n entries = caller.entry_read_since(responder_name, \"test_stream\")\n assert len(entries) == 0\n\n # Ensure this gets all entries\n entries = caller.entry_read_since(responder_name, \"test_stream\", last_id=\"0\")\n assert len(entries) == 6\n\n # Ensure we get the correct number of entries since the last_id\n entries = caller.entry_read_since(responder_name, \"test_stream\", last_id)\n assert len(entries) == 5\n\n # Ensure that if we pass n, we get the n earliest entries since last_id\n entries = caller.entry_read_since(responder_name, \"test_stream\", last_id, 2)\n assert len(entries) == 2\n assert entries[-1][\"data\"] == b\"1\"\n\n # Ensure that last_id=='$' only gets new entries arriving after the call\n q = Queue()\n\n def wrapped_read(q):\n q.put(caller.entry_read_since(responder_name, \"test_stream\", block=500))\n\n proc = Process(target=wrapped_read, args=(q,))\n proc.start()\n time.sleep(\n 0.1\n ) # sleep to give the process time to start listening for new entries\n responder.entry_write(\"test_stream\", {\"data\": None})\n entries = q.get()\n responder.command_loop_shutdown()\n proc.join()\n proc.terminate()\n assert len(entries) == 1\n\n def test_parallel_read_write(self, caller, responder):\n \"\"\"\n Has the same responder class receiving commands on 1 thread,\n while publishing to a stream on a 2nd thread at high volume.\n Meanwhile, a caller quickly sends a series of commands to the responder\n and verifies we get valid results back.\n Ensures that we can safely send and receive using the same element class\n without concurrency issues.\n \"\"\"\n caller, caller_name = caller\n responder, responder_name = responder\n responder_0_name = responder_name + \"_0\"\n responder_0 = self._element_create(responder_0_name)\n\n def no_op_serialized(data):\n \"\"\"\n NO_OP command responds with whatever data it receives\n \"\"\"\n return Response(data, serialization=\"msgpack\")\n\n responder_0.command_add(\"no_op\", no_op_serialized, serialization=\"msgpack\")\n\n # Entry write loop mimics high volume publisher\n def entry_write_loop(responder):\n for i in range(3000):\n responder.entry_write(\"stream_0\", {\"value\": 0}, serialization=\"msgpack\")\n time.sleep(0.0001)\n\n # Command loop thread to handle incoming commands\n self._element_start(responder_0, caller)\n # Entry write thread to publish a whole bunch to a stream\n entry_write_thread = Thread(\n target=entry_write_loop, args=(responder_0,), daemon=True\n )\n entry_write_thread.start()\n\n # Send a bunch of commands to responder and you should get valid\n # responses back, even while its busy publishing to a stream\n try:\n for i in range(20):\n response = caller.command_send(\n responder_0_name, \"no_op\", 1, serialization=\"msgpack\"\n )\n assert response[\"err_code\"] == ATOM_NO_ERROR\n assert response[\"data\"] == 1\n finally:\n # Cleanup threads\n entry_write_thread.join()\n self._element_cleanup(responder_0)\n del responder_0\n\n def test_healthcheck_default(self, caller, responder):\n \"\"\"\n Verify default healthcheck\n \"\"\"\n caller, caller_name = caller\n responder, responder_name = responder\n\n self._element_start(responder, caller)\n response = caller.command_send(responder_name, HEALTHCHECK_COMMAND)\n assert response[\"err_code\"] == ATOM_NO_ERROR\n assert response[\"data\"] == b\"\"\n self._element_cleanup(responder)\n\n def test_healthcheck_success(self, caller, responder):\n \"\"\"\n Verify a successful response from a custom healthcheck\n \"\"\"\n caller, caller_name = caller\n responder = self._element_create(\"healthcheck_success_responder\")\n\n responder.healthcheck_set(lambda: Response(err_code=0, err_str=\"We're good\"))\n self._element_start(responder, caller)\n response = caller.command_send(\n \"healthcheck_success_responder\", HEALTHCHECK_COMMAND\n )\n assert response[\"err_code\"] == ATOM_NO_ERROR\n assert response[\"data\"] == b\"\"\n assert response[\"err_str\"] == \"We're good\"\n self._element_cleanup(responder)\n\n def test_healthcheck_failure(self, caller, responder):\n \"\"\"\n Verify a failed response from a custom healthcheck\n \"\"\"\n responder = self._element_create(\"healthcheck_failure_responder\")\n caller, caller_name = caller\n\n responder.healthcheck_set(\n lambda: Response(err_code=5, err_str=\"Camera is unplugged\")\n )\n self._element_start(responder, caller, do_healthcheck=False)\n response = caller.command_send(\n \"healthcheck_failure_responder\", HEALTHCHECK_COMMAND\n )\n assert response[\"err_code\"] == 5 + ATOM_USER_ERRORS_BEGIN\n assert response[\"data\"] == b\"\"\n assert response[\"err_str\"] == \"Camera is unplugged\"\n self._element_cleanup(responder)\n\n def test_wait_for_elements_healthy(self, caller, responder):\n \"\"\"\n Verify wait_for_elements_healthy success/failure cases\n \"\"\"\n caller, caller_name = caller\n responder, responder_name = responder\n\n self._element_start(responder, caller)\n\n def wait_for_elements_check(caller, elements_to_check):\n caller.wait_for_elements_healthy(elements_to_check)\n\n wait_for_elements_thread = Thread(\n target=wait_for_elements_check, args=(caller, [responder_name]), daemon=True\n )\n wait_for_elements_thread.start()\n # If elements reported healthy, call should have returned quickly and\n # thread should exit\n wait_for_elements_thread.join(0.5)\n assert not wait_for_elements_thread.is_alive()\n\n wait_for_elements_thread = Thread(\n target=wait_for_elements_check,\n args=(caller, [responder_name, \"test_responder_2\"]),\n daemon=True,\n )\n wait_for_elements_thread.start()\n # 1 of these elements is missing, so thread is busy and this join call\n # should timeout retrying\n wait_for_elements_thread.join(0.5)\n assert wait_for_elements_thread.is_alive()\n\n try:\n responder_2 = self._element_create(\"test_responder_2\")\n self._element_start(responder_2, caller, do_healthcheck=False)\n\n # test_responder_2 is alive now, so both healthchecks should succeed\n # and thread should exit roughly within the retry interval\n wait_for_elements_thread.join(HEALTHCHECK_RETRY_INTERVAL + 1.0)\n assert not wait_for_elements_thread.is_alive()\n finally:\n # Cleanup threads\n self._element_cleanup(responder_2)\n del responder_2\n\n self._element_cleanup(responder)\n\n def test_version_command(self, caller, responder):\n \"\"\"\n Verify the response from the get_element_version command\n \"\"\"\n caller, caller_name = caller\n responder, responder_name = responder\n\n self._element_start(responder, caller)\n response = caller.command_send(\n responder_name, VERSION_COMMAND, serialization=\"msgpack\"\n )\n assert response[\"err_code\"] == ATOM_NO_ERROR\n assert response[\"data\"] == {\n \"version\": float(\".\".join(VERSION.split(\".\")[:-1])),\n \"language\": LANG,\n }\n response2 = caller.get_element_version(responder_name)\n assert response == response2\n self._element_cleanup(responder)\n\n def test_command_list_command(self, caller, responder):\n \"\"\"\n Verify the response from the COMMAND_LIST_COMMAND command\n \"\"\"\n\n caller, caller_name = caller\n responder, responder_name = responder\n\n # Test with no commands\n no_command_responder = self._element_create(\"no_command_responder\")\n self._element_start(no_command_responder, caller)\n assert (\n caller.command_send(\n no_command_responder.name, COMMAND_LIST_COMMAND, serialization=\"msgpack\"\n )[\"data\"]\n == []\n )\n self._element_cleanup(no_command_responder)\n del no_command_responder\n\n responder = self._element_create(\"responder_with_commands\")\n # Add commands to responder\n responder.command_add(\"foo_func1\", lambda data: data)\n responder.command_add(\n \"foo_func2\", lambda: None, timeout=500, serialization=\"msgpack\"\n )\n responder.command_add(\n \"foo_func3\", lambda x, y: x + y, timeout=1, serialization=\"msgpack\"\n )\n self._element_start(responder, caller)\n\n # Test with three commands\n response = caller.command_send(\n responder.name, COMMAND_LIST_COMMAND, serialization=\"msgpack\"\n )\n assert response[\"err_code\"] == ATOM_NO_ERROR\n assert response[\"data\"] == [\"foo_func1\", \"foo_func2\", \"foo_func3\"]\n\n self._element_cleanup(responder)\n\n def test_get_all_commands_with_version(self, caller, responder):\n \"\"\"\n Ensure get_all_commands only queries support elements.\n \"\"\"\n caller, caller_name = caller\n responder, responder_name = responder\n\n # Change responder reported version\n responder.handler_map[VERSION_COMMAND][\"handler\"] = lambda: Response(\n data={\"language\": \"Python\", \"version\": 0.2}, serialization=\"msgpack\"\n )\n # Create element with normal, supported version\n responder2_name = responder_name + \"_2\"\n responder2 = self._element_create(responder2_name)\n\n # Add commands to both responders and start command loop\n responder.command_add(\"foo_func0\", lambda data: data)\n responder2.command_add(\n \"foo_func0\", lambda: None, timeout=500, serialization=\"msgpack\"\n )\n responder2.command_add(\n \"foo_func1\", lambda x, y: x + y, timeout=1, serialization=\"msgpack\"\n )\n self._element_start(responder, caller)\n self._element_start(responder2, caller)\n\n # Retrieve commands\n commands = caller.get_all_commands(\n element_name=[responder_name, responder2_name]\n )\n # Do not include responder's commands as the version is too low\n desired_commands = [\n f\"{responder2_name}:foo_func0\",\n f\"{responder2_name}:foo_func1\",\n ]\n assert commands == desired_commands\n\n self._element_cleanup(responder)\n self._element_cleanup(responder2)\n del responder2\n\n def test_get_all_commands(self, caller, responder):\n \"\"\"\n Verify the response from the get_all_commands command\n \"\"\"\n caller, caller_name = caller\n responder, responder_name = responder\n\n # Test with no available commands\n assert caller.get_all_commands() == []\n\n # Set up two responders\n test_name_1, test_name_2 = responder_name + \"_1\", responder_name + \"_2\"\n responder1, responder2 = (\n self._element_create(test_name_1),\n self._element_create(test_name_2),\n )\n\n proc1_function_data = [\n (\"foo_func0\", lambda x: x + 3),\n (\"foo_func1\", lambda: None, 10, \"arrow\"),\n (\"foo_func2\", lambda x: None),\n ]\n proc2_function_data = [\n (\"foo_func0\", lambda y: y * 3, 10),\n (\"other_foo0\", lambda y: None, 3, \"msgpack\"),\n (\"other_foo1\", lambda: 5),\n ]\n\n # Add functions\n for data in proc1_function_data:\n responder1.command_add(*data)\n for data in proc2_function_data:\n responder2.command_add(*data)\n\n self._element_start(responder1, caller)\n self._element_start(responder2, caller)\n\n # True function names\n responder1_function_names = [f\"{test_name_1}:foo_func{i}\" for i in range(3)]\n responder2_function_names = [\n f\"{test_name_2}:foo_func0\",\n f\"{test_name_2}:other_foo0\",\n f\"{test_name_2}:other_foo1\",\n ]\n\n # Either order of function names is fine for testing all function names\n command_list = caller.get_all_commands()\n assert (\n command_list == responder1_function_names + responder2_function_names\n or command_list == responder2_function_names + responder1_function_names\n )\n\n # Test just functions for 1\n command_list = caller.get_all_commands(test_name_1)\n assert command_list == responder1_function_names\n\n # Test just functions for 2\n command_list = caller.get_all_commands(test_name_2)\n assert command_list == responder2_function_names\n\n self._element_cleanup(responder1)\n self._element_cleanup(responder2)\n del responder1\n del responder2\n\n def test_no_ack(self, caller, responder):\n \"\"\"\n Element sends command and responder does not acknowledge.\n \"\"\"\n caller, caller_name = caller\n responder, responder_name = responder\n\n responder.command_add(\"add_1\", add_1)\n response = caller.command_send(responder_name, \"add_1\", 0)\n assert response[\"err_code\"] == ATOM_COMMAND_NO_ACK\n\n def test_unsupported_command(self, caller, responder):\n \"\"\"\n Element sends command that responder does not have.\n \"\"\"\n caller, caller_name = caller\n responder, responder_name = responder\n\n self._element_start(responder, caller)\n response = caller.command_send(responder_name, \"add_1\", 0)\n\n self._element_cleanup(responder)\n assert response[\"err_code\"] == ATOM_COMMAND_UNSUPPORTED\n\n def test_command_timeout(self, caller, responder):\n \"\"\"\n Element sends command to responder that does not return data within the\n timeout.\n \"\"\"\n caller, caller_name = caller\n responder, responder_name = responder\n\n # Set a timeout of 10 ms\n responder.command_add(\"sleep_ms\", sleep_ms, 10, serialization=\"msgpack\")\n self._element_start(responder, caller)\n response = caller.command_send(\n responder_name, \"sleep_ms\", 1000, serialization=\"msgpack\"\n )\n self._element_cleanup(responder)\n assert response[\"err_code\"] == ATOM_COMMAND_NO_RESPONSE\n\n def test_handler_returns_not_response(self, caller, responder):\n \"\"\"\n Element calls command from responder that does not return an object of\n type Response.\n \"\"\"\n caller, caller_name = caller\n responder, responder_name = responder\n\n responder.command_add(\"ret_not_response\", lambda x: 0)\n self._element_start(responder, caller)\n response = caller.command_send(responder_name, \"ret_not_response\", None)\n self._element_cleanup(responder)\n assert response[\"err_code\"] == ATOM_CALLBACK_FAILED\n\n # TODO: come back and fix logging tests once that's sorted\n # def test_log(self, caller):\n # \"\"\"\n # Writes a log with each severity level and ensures that all the logs\n # exist.\n # \"\"\"\n # caller, caller_name = caller\n # for i, severity in enumerate(LogLevel):\n # caller.log(severity, f\"severity {i}\", stdout=False)\n # logs = caller._rclient.xread({\"log\": 0})[0][1]\n # logs = logs[-8:]\n # for i in range(8):\n # assert logs[i][1][b\"msg\"].decode() == f\"severity {i}\"\n\n def test_parameter_write(self, caller):\n caller, caller_name = caller\n data = {b\"my_str\": b\"hello, world!\"}\n key = \"my_param\"\n _ = caller.parameter_write(key, data)\n param_data = caller.parameter_read(key)\n assert param_data == data\n success = caller.parameter_delete(key)\n assert success == True\n\n def test_parameter_read_field(self, caller):\n \"\"\"\n Writes parameter with multiple fields; verifies that\n a single field can be successfully read.\n \"\"\"\n caller, caller_name = caller\n data = {b\"str1\": b\"hello, world!\", b\"str2\": b\"goodbye\"}\n key = \"my_param\"\n _ = caller.parameter_write(key, data)\n param_data = caller.parameter_read(key, fields=\"str2\")\n assert param_data == {b\"str2\": b\"goodbye\"}\n success = caller.parameter_delete(key)\n assert success == True\n\n def test_parameter_write_msgpack(self, caller):\n \"\"\"\n Writes parameter with msgpack serialization.\n Verifies it is successfully deserialized when read.\n \"\"\"\n caller, caller_name = caller\n data = {b\"my_str\": b\"hello, world!\"}\n key = \"my_param\"\n _ = caller.parameter_write(key, data, serialization=\"msgpack\")\n param_data = caller.parameter_read(key)\n assert param_data == data\n success = caller.parameter_delete(key)\n assert success == True\n\n def test_parameter_read_msgpack_field(self, caller):\n \"\"\"\n Writes parameter with multiple fields serialized with msgpack.\n Verifies that a single field is successfully read and deserialized.\n \"\"\"\n caller, caller_name = caller\n data = {b\"str1\": b\"hello, world!\", b\"str2\": b\"goodbye\"}\n key = \"my_param\"\n _ = caller.parameter_write(key, data, serialization=\"msgpack\")\n param_data = caller.parameter_read(key, fields=[\"str2\"])\n assert param_data == {b\"str2\": b\"goodbye\"}\n success = caller.parameter_delete(key)\n assert success == True\n\n def test_parameter_write_override_true(self, caller):\n \"\"\"\n Writes parameter with override allowed. Updates one existing\n field. Reads parameter and checks field was updated.\n \"\"\"\n caller, caller_name = caller\n data = {b\"str1\": b\"hello, world!\", b\"str2\": b\"goodbye\"}\n key = \"my_param\"\n _ = caller.parameter_write(key, data)\n new_data = {b\"str2\": b\"goodbye again\"}\n updated = caller.parameter_write(key, new_data)\n assert updated == [b\"str2\"]\n new_data = caller.parameter_read(key)\n assert new_data == {b\"str1\": b\"hello, world!\", b\"str2\": b\"goodbye again\"}\n success = caller.parameter_delete(key)\n assert success == True\n\n def test_parameter_write_override_false(self, caller):\n \"\"\"\n Writes parmaeter with override not allowed. Tries updating\n existing field and verifies that exception is raised. Reads\n parameter and checks field was not updated.\n \"\"\"\n caller, caller_name = caller\n data = {b\"str1\": b\"hello, world!\", b\"str2\": b\"goodbye\"}\n key = \"my_param\"\n _ = caller.parameter_write(key, data, override=False)\n new_data = {b\"str2\": b\"goodbye again\"}\n with pytest.raises(Exception):\n _ = caller.parameter_write(key, new_data)\n\n current_data = caller.parameter_read(key)\n assert current_data == data\n success = caller.parameter_delete(key)\n assert success == True\n\n def test_parameter_write_override_false_allows_new_key(self, caller):\n \"\"\"\n Writes parameter with override not allowed. Tries adding new field\n to parameter and verifies new field was successfully added.\n \"\"\"\n caller, caller_name = caller\n data = {b\"str1\": b\"hello, world!\", b\"str2\": b\"goodbye\"}\n key = \"my_param\"\n _ = caller.parameter_write(key, data, override=False)\n new_data = {b\"str3\": b\"goodbye again\"}\n new_fields = caller.parameter_write(key, new_data)\n assert new_fields == [b\"str3\"]\n new_data = caller.parameter_read(key)\n assert new_data == {\n b\"str1\": b\"hello, world!\",\n b\"str2\": b\"goodbye\",\n b\"str3\": b\"goodbye again\",\n }\n success = caller.parameter_delete(key)\n assert success == True\n\n def test_parameter_write_new_serialization_raises_error(self, caller):\n \"\"\"\n Writes parameter with msgpack serialization. Attempts to add\n new field with no serialization; verifies that exception is raised\n and existing parameter is not changed.\n \"\"\"\n caller, caller_name = caller\n data = {b\"str1\": b\"hello, world!\", b\"str2\": b\"goodbye\"}\n key = \"my_param\"\n _ = caller.parameter_write(key, data, serialization=\"msgpack\")\n new_data = {b\"str3\": b\"goodbye again\"}\n with pytest.raises(Exception):\n _ = caller.parameter_write(key, new_data)\n\n current_data = caller.parameter_read(key)\n assert current_data == data\n success = caller.parameter_delete(key)\n assert success == True\n\n def test_parameter_get_override(self, caller):\n caller, caller_name = caller\n data = {b\"str1\": b\"hello, world!\", b\"str2\": b\"goodbye\"}\n key = \"my_param\"\n _ = caller.parameter_write(key, data, override=False)\n override = caller.parameter_get_override(key)\n assert override == \"false\"\n success = caller.parameter_delete(key)\n assert success == True\n\n def test_parameter_get_override_doesnt_exist(self, caller):\n caller, caller_name = caller\n key = \"my_param\"\n with pytest.raises(Exception):\n _ = caller.parameter_get_override(key)\n\n def test_parameter_default_timeout_is_none(self, caller):\n caller, caller_name = caller\n data = {b\"my_str\": b\"hello, world!\"}\n key = \"my_param\"\n _ = caller.parameter_write(key, data)\n remaining_ms = caller.parameter_get_timeout_ms(key)\n assert remaining_ms == -1\n success = caller.parameter_delete(key)\n assert success == True\n\n def test_parameter_get_timeout_ms(self, caller):\n caller, caller_name = caller\n data = {b\"my_str\": b\"hello, world!\"}\n key = \"my_param\"\n _ = caller.parameter_write(key, data, timeout_ms=1000)\n remaining_ms = caller.parameter_get_timeout_ms(key)\n assert remaining_ms > 0 and remaining_ms <= 1000\n time.sleep(0.1)\n still_remaining_ms = caller.parameter_get_timeout_ms(key)\n assert (still_remaining_ms < remaining_ms) and (still_remaining_ms > 0)\n success = caller.parameter_delete(key)\n assert success == True\n\n def test_parameter_update_timeout_ms(self, caller):\n caller, caller_name = caller\n data = {b\"my_str\": b\"hello, world!\"}\n key = \"my_param\"\n _ = caller.parameter_write(key, data, timeout_ms=1000)\n remaining_ms = caller.parameter_get_timeout_ms(key)\n assert remaining_ms > 0 and remaining_ms <= 1000\n\n caller.parameter_update_timeout_ms(key, 10000)\n updated_ms = caller.parameter_get_timeout_ms(key)\n assert (updated_ms > 1000) and (updated_ms <= 10000)\n success = caller.parameter_delete(key)\n assert success == True\n\n def test_parameter_remove_timeout(self, caller):\n caller, caller_name = caller\n data = {b\"my_str\": b\"hello, world!\"}\n key = \"my_param\"\n _ = caller.parameter_write(key, data, timeout_ms=1000)\n remaining_ms = caller.parameter_get_timeout_ms(key)\n assert remaining_ms > 0 and remaining_ms <= 1000\n\n caller.parameter_update_timeout_ms(key, 0)\n updated_ms = caller.parameter_get_timeout_ms(key)\n assert updated_ms == -1\n success = caller.parameter_delete(key)\n assert success == True\n\n def test_parameter_delete(self, caller):\n caller, caller_name = caller\n data = {b\"my_str\": b\"hello, world!\"}\n key = \"my_param\"\n _ = caller.parameter_write(key, data, timeout_ms=0)\n param_data = caller.parameter_read(key)\n assert param_data == data\n\n timeout_ms = caller.parameter_get_timeout_ms(key)\n assert timeout_ms == -1\n\n success = caller.parameter_delete(key)\n assert success == True\n del_data = caller.parameter_read(key)\n assert del_data is None\n\n def test_parameter_delete_missing(self, caller):\n caller, caller_name = caller\n data = {b\"my_str\": b\"hello, world!\"}\n key = \"my_param\"\n _ = caller.parameter_write(key, data, timeout_ms=0)\n success = caller.parameter_delete(key)\n assert success == True\n del_data = caller.parameter_read(key)\n assert del_data is None\n success = caller.parameter_delete(key)\n assert success == False\n\n def test_parameter_list(self, caller):\n \"\"\"\n Writes parameters, verifies that parameter_list lists exactly\n the parameters that exist at any point in time, and then cleans\n up written parameters\n \"\"\"\n caller, caller_name = caller\n assert len(caller.parameter_list()) == 0\n keys = [\"str1\", \"str2\", \"other\"]\n data = [\n {b\"k1\": b\"hello, world\"},\n {b\"k1\": b\"hello, world!\", b\"str2\": b\"goodbye\"},\n {b\"k3\": b\"hello\"},\n ]\n _ = caller.parameter_write(keys[0], data[0], serialization=\"msgpack\")\n assert set([keys[0]]) == set(caller.parameter_list())\n assert [] == caller.parameter_list(\"str2\")\n assert [] == caller.parameter_list(\"other\")\n\n _ = caller.parameter_write(\n keys[1],\n data[1],\n serialization=\"msgpack\",\n )\n assert set(keys[0:2]) == set(caller.parameter_list())\n assert [] == caller.parameter_list(\"other\")\n\n _ = caller.parameter_write(keys[2], data[2], serialization=\"msgpack\")\n\n assert set(keys) == set(caller.parameter_list())\n\n for i, key in enumerate(keys):\n success = caller.parameter_delete(key)\n assert success == True\n assert set(keys[i + 1 :]) == set(caller.parameter_list())\n\n def test_parameter_list_pattern_matching(self, caller):\n \"\"\"\n Writes 3 parameters, tests that parameter_list can correctly\n return parameters matching a few patterns, as described in\n https://redis.io/commands/KEYS, then deletes the parameters.\n \"\"\"\n caller, caller_name = caller\n keys = [\"str1\", \"str2\", \"spr2\", \"sppr2\"]\n data = [\n {b\"k1\": b\"hello, world\"},\n {b\"k1\": b\"hello, world!\", b\"str2\": b\"goodbye\"},\n {b\"k3\": b\"hello\"},\n {b\"k1\": b\"hello, world!\", b\"str2\": b\"goodbye\"},\n ]\n for i, key in enumerate(keys):\n _ = caller.parameter_write(key, data[i], serialization=\"msgpack\")\n\n assert set(keys) == set(caller.parameter_list())\n assert set(keys[0:2]) == set(caller.parameter_list(\"str*\"))\n assert [\"spr2\"] == caller.parameter_list(\"spr2\")\n assert [\"str1\"] == caller.parameter_list(\"str1\")\n assert [\"str2\"] == caller.parameter_list(\"str2\")\n assert [] == caller.parameter_list(\"str\")\n assert set([\"str2\", \"spr2\"]) == set(caller.parameter_list(\"s?r2\"))\n assert set([\"str2\", \"spr2\", \"sppr2\"]) == set(caller.parameter_list(\"s*r2\"))\n assert [\"str1\"] == caller.parameter_list(\"str[^2]\")\n assert [] == caller.parameter_list(\"str[4-9]\")\n\n for key in keys:\n success = caller.parameter_delete(key)\n assert success == True\n\n def test_reference_basic(self, caller):\n caller, caller_name = caller\n data = b\"hello, world!\"\n ref_id = caller.reference_create(data)[0]\n ref_data = caller.reference_get(ref_id)[0]\n assert ref_data == data\n success, failed = caller.reference_delete(ref_id)\n assert success == True\n assert len(failed) == 0\n\n def test_reference_user_key(self, caller):\n caller, caller_name = caller\n data = b\"hello, world!\"\n key = \"my_string\"\n ref_id = caller.reference_create(data, keys=key)[0]\n ref_data = caller.reference_get(ref_id)[0]\n assert ref_data == data\n success, failed = caller.reference_delete(ref_id)\n assert success == True\n assert len(failed) == 0\n\n def test_reference_doesnt_exist(self, caller):\n caller, caller_name = caller\n ref_id = \"nonexistent\"\n ref_data = caller.reference_get(ref_id)[0]\n assert ref_data is None\n\n def test_reference_legacy_serialization(self, caller):\n caller, caller_name = caller\n data = {\n \"hello\": \"world\",\n \"atom\": 123456,\n \"some_obj\": {\"references\": \"are fun!\"},\n }\n ref_id = caller.reference_create(data, serialize=True)[0]\n ref_data = caller.reference_get(ref_id, deserialize=True)[0]\n assert ref_data == data\n success, failed = caller.reference_delete(ref_id)\n assert success == True\n assert len(failed) == 0\n\n def test_reference_arrow(self, caller):\n \"\"\"\n Creates references serialized with Apache Arrow; gets references and\n deserializes based on serialization method embedded within reference\n key.\n \"\"\"\n caller, caller_name = caller\n data = {\n \"hello\": \"world\",\n \"atom\": 123456,\n \"some_obj\": {\"references\": \"are fun!\"},\n }\n ref_id = caller.reference_create(data, serialization=\"arrow\")[0]\n ref_data = caller.reference_get(ref_id)[0]\n assert ref_data == data\n success, failed = caller.reference_delete(ref_id)\n assert success == True\n assert len(failed) == 0\n\n def test_reference_msgpack_dne(self, caller):\n caller, caller_name = caller\n ref_id = \"nonexistent\"\n ref_data = caller.reference_get(ref_id, serialization=\"msgpack\")[0]\n assert ref_data is None\n\n def test_reference_multiple(self, caller):\n caller, caller_name = caller\n data = [b\"hello, world!\", b\"robots are fun!\"]\n ref_ids = caller.reference_create(*data)\n ref_data = caller.reference_get(*ref_ids)\n for i in range(len(data)):\n assert ref_data[i] == data[i]\n success, failed = caller.reference_delete(*ref_ids)\n assert success == True\n assert len(failed) == 0\n\n def test_reference_multiple_user_keys(self, caller):\n caller, caller_name = caller\n data = [b\"hello, world!\", b\"robots are fun!\"]\n ref_ids = caller.reference_create(*data, keys=[\"ref1\", \"ref2\"])\n assert \"ref1\" in ref_ids[0] and \"ref2\" in ref_ids[1]\n ref_data = caller.reference_get(*ref_ids)\n for i in range(len(data)):\n assert ref_data[i] == data[i]\n\n success, failed = caller.reference_delete(*ref_ids)\n assert success == True\n assert len(failed) == 0\n\n def test_reference_multiple_mismatch_keys(self, caller):\n caller, caller_name = caller\n data = [b\"hello, world!\", b\"robots are fun!\"]\n with pytest.raises(Exception):\n _ = caller.reference_create(*data, keys=[\"ref1\"])\n\n def test_reference_multiple_mismatch_keys_2(self, caller):\n caller, caller_name = caller\n data = [b\"hello, world!\"]\n with pytest.raises(Exception):\n _ = caller.reference_create(*data, keys=[\"ref1\", \"ref2\"])\n\n def test_reference_multiple_msgpack(self, caller):\n caller, caller_name = caller\n data = [\n {\"hello\": \"world\", \"atom\": 123456, \"some_obj\": {\"references\": \"are fun!\"}},\n True,\n ]\n ref_ids = caller.reference_create(*data, serialization=\"msgpack\")\n ref_data = caller.reference_get(*ref_ids)\n for i in range(len(data)):\n assert ref_data[i] == data[i]\n success, failed = caller.reference_delete(*ref_ids)\n assert success == True\n assert len(failed) == 0\n\n def test_reference_multiple_mixed_serialization(self, caller):\n caller, caller_name = caller\n data = [{\"hello\": \"world\"}, b\"123456\"]\n ref_ids = []\n ref_ids.extend(caller.reference_create(data[0], serialization=\"msgpack\"))\n ref_ids.extend(caller.reference_create(data[1], serialization=\"none\"))\n ref_data = caller.reference_get(*ref_ids)\n for ref, orig in zip(ref_data, data):\n assert ref == orig\n success, failed = caller.reference_delete(*ref_ids)\n assert success == True\n assert len(failed) == 0\n\n def test_reference_get_timeout_ms(self, caller):\n caller, caller_name = caller\n data = b\"hello, world!\"\n ref_id = caller.reference_create(data, timeout_ms=1000)[0]\n ref_remaining_ms = caller.reference_get_timeout_ms(ref_id)\n assert ref_remaining_ms > 0 and ref_remaining_ms <= 1000\n time.sleep(0.1)\n ref_still_remaining_ms = caller.reference_get_timeout_ms(ref_id)\n assert (ref_still_remaining_ms < ref_remaining_ms) and (\n ref_still_remaining_ms > 0\n )\n success, failed = caller.reference_delete(ref_id)\n assert success == True\n assert len(failed) == 0\n\n def test_reference_update_timeout_ms(self, caller):\n caller, caller_name = caller\n data = b\"hello, world!\"\n ref_id = caller.reference_create(data, timeout_ms=1000)[0]\n ref_remaining_ms = caller.reference_get_timeout_ms(ref_id)\n assert ref_remaining_ms > 0 and ref_remaining_ms <= 1000\n\n caller.reference_update_timeout_ms(ref_id, 10000)\n ref_updated_ms = caller.reference_get_timeout_ms(ref_id)\n assert (ref_updated_ms > 1000) and (ref_updated_ms <= 10000)\n success, failed = caller.reference_delete(ref_id)\n assert success == True\n assert len(failed) == 0\n\n def test_reference_remove_timeout(self, caller):\n caller, caller_name = caller\n data = b\"hello, world!\"\n ref_id = caller.reference_create(data, timeout_ms=1000)[0]\n ref_remaining_ms = caller.reference_get_timeout_ms(ref_id)\n assert ref_remaining_ms > 0 and ref_remaining_ms <= 1000\n\n caller.reference_update_timeout_ms(ref_id, 0)\n ref_updated_ms = caller.reference_get_timeout_ms(ref_id)\n assert ref_updated_ms == -1\n success, failed = caller.reference_delete(ref_id)\n assert success == True\n assert len(failed) == 0\n\n def test_reference_delete(self, caller):\n caller, caller_name = caller\n data = b\"hello, world!\"\n ref_id = caller.reference_create(data, timeout_ms=0)[0]\n ref_data = caller.reference_get(ref_id)[0]\n assert ref_data == data\n\n ref_ms = caller.reference_get_timeout_ms(ref_id)\n assert ref_ms == -1\n\n success, failed = caller.reference_delete(ref_id)\n assert success == True\n assert len(failed) == 0\n del_data = caller.reference_get(ref_id)[0]\n assert del_data is None\n\n def test_reference_delete_multiple(self, caller):\n caller, caller_name = caller\n\n data = [b\"hello, world!\", b\"test\"]\n ref_ids = caller.reference_create(*data, timeout_ms=0)\n ref_data = caller.reference_get(*ref_ids)\n assert ref_data[0] == data[0]\n assert ref_data[1] == data[1]\n\n ref_ms = caller.reference_get_timeout_ms(ref_ids[0])\n assert ref_ms == -1\n ref_ms = caller.reference_get_timeout_ms(ref_ids[1])\n assert ref_ms == -1\n\n success, failed = caller.reference_delete(*ref_ids)\n assert success == True\n assert len(failed) == 0\n del_data = caller.reference_get(*ref_ids)\n assert del_data[0] is None\n assert del_data[1] is None\n\n def test_reference_delete_single_missing(self, caller):\n caller, caller_name = caller\n\n data = [b\"hello, world!\", b\"test\"]\n ref_ids = caller.reference_create(*data, timeout_ms=0)\n ref_data = caller.reference_get(*ref_ids)\n assert ref_data[0] == data[0]\n assert ref_data[1] == data[1]\n\n ref_ms = caller.reference_get_timeout_ms(ref_ids[0])\n assert ref_ms == -1\n ref_ms = caller.reference_get_timeout_ms(ref_ids[1])\n assert ref_ms == -1\n\n missing_str = \"bad-reference\"\n ref_ids.append(missing_str)\n success, failed = caller.reference_delete(*ref_ids)\n assert success == False\n assert failed == [missing_str]\n\n def test_reference_delete_all_missing(self, caller):\n caller, caller_name = caller\n\n missing_references = [\"ref-a\", \"ref-b\", \"ref-c\", \"ref-\"]\n success, failed = caller.reference_delete(*missing_references)\n assert success == False\n assert failed == missing_references\n\n def test_reference_delete_msgpack(self, caller):\n caller, caller_name = caller\n\n data = {\"msgpack\": \"data\"}\n ref_id = caller.reference_create(data, timeout_ms=0, serialization=\"msgpack\")[0]\n ref_data = caller.reference_get(ref_id)[0]\n assert ref_data == data\n\n ref_ms = caller.reference_get_timeout_ms(ref_id)\n assert ref_ms == -1\n\n success, failed = caller.reference_delete(ref_id)\n assert success == True\n assert len(failed) == 0\n del_data = caller.reference_get(ref_id)[0]\n assert del_data is None\n\n def test_reference_expire(self, caller):\n caller, caller_name = caller\n\n data = {\"msgpack\": \"data\"}\n ref_id = caller.reference_create(data, serialization=\"msgpack\", timeout_ms=500)[\n 0\n ]\n ref_data = caller.reference_get(ref_id)[0]\n assert ref_data == data\n\n time.sleep(0.5)\n expired_data = caller.reference_get(ref_id)[0]\n assert expired_data is None\n\n def test_reference_create_from_stream_single_key(self, caller):\n caller, caller_name = caller\n\n stream_name = \"test_ref\"\n stream_data = {\"data\": b\"test reference!\"}\n caller.entry_write(stream_name, stream_data)\n key_dict = caller.reference_create_from_stream(\n caller.name, stream_name, timeout_ms=0\n )\n ref_data = caller.reference_get(key_dict[\"data\"])[0]\n assert ref_data == stream_data[\"data\"]\n success, failed = caller.reference_delete(key_dict[\"data\"])\n assert success == True\n assert len(failed) == 0\n\n def test_reference_create_from_stream_multiple_keys(self, caller):\n caller, caller_name = caller\n\n stream_name = \"test_ref_multiple_keys\"\n stream_data = {\"key1\": b\"value 1!\", \"key2\": b\"value 2!\"}\n caller.entry_write(stream_name, stream_data)\n key_dict = caller.reference_create_from_stream(\n caller.name, stream_name, timeout_ms=0\n )\n for key in key_dict:\n ref_data = caller.reference_get(key_dict[key])[0]\n assert ref_data == stream_data[key]\n success, failed = caller.reference_delete(*key_dict.values())\n assert success == True\n assert len(failed) == 0\n\n def test_reference_create_from_stream_multiple_keys_legacy_serialization(\n self, caller\n ):\n caller, caller_name = caller\n\n stream_name = \"test_ref_multiple_keys\"\n stream_data = {\"key1\": {\"nested1\": \"val1\"}, \"key2\": {\"nested2\": \"val2\"}}\n orig_stream_data = copy.deepcopy(stream_data)\n caller.entry_write(stream_name, stream_data, serialize=True)\n key_dict = caller.reference_create_from_stream(\n caller.name, stream_name, timeout_ms=0\n )\n for key in key_dict:\n ref_data = caller.reference_get(key_dict[key], deserialize=True)[0]\n assert ref_data == orig_stream_data[key]\n success, failed = caller.reference_delete(*key_dict.values())\n assert success == True\n assert len(failed) == 0\n\n def test_reference_create_from_stream_multiple_keys_arrow(self, caller):\n caller, caller_name = caller\n\n stream_name = \"test_ref_multiple_keys\"\n stream_data = {\"key1\": {\"nested1\": \"val1\"}, \"key2\": {\"nested2\": \"val2\"}}\n orig_stream_data = copy.deepcopy(stream_data)\n caller.entry_write(stream_name, stream_data, serialization=\"arrow\")\n key_dict = caller.reference_create_from_stream(\n caller.name, stream_name, timeout_ms=0\n )\n for key in key_dict:\n ref_data = caller.reference_get(key_dict[key])[0]\n assert ref_data == orig_stream_data[key]\n success, failed = caller.reference_delete(*key_dict.values())\n assert success == True\n assert len(failed) == 0\n\n def test_reference_create_from_stream_multiple_keys_persist(self, caller):\n caller, caller_name = caller\n\n stream_name = \"test_ref_multiple_keys\"\n stream_data = {\"key1\": b\"value 1!\", \"key2\": b\"value 2!\"}\n caller.entry_write(stream_name, stream_data)\n key_dict = caller.reference_create_from_stream(\n caller.name, stream_name, timeout_ms=0\n )\n for key in key_dict:\n assert caller.reference_get_timeout_ms(key_dict[key]) == -1\n success, failed = caller.reference_delete(*key_dict.values())\n assert success == True\n assert len(failed) == 0\n\n def test_reference_create_from_stream_multiple_keys_timeout(self, caller):\n caller, caller_name = caller\n\n stream_name = \"test_ref_multiple_keys\"\n stream_data = {\"key1\": b\"value 1!\", \"key2\": b\"value 2!\"}\n caller.entry_write(stream_name, stream_data)\n key_dict = caller.reference_create_from_stream(\n caller.name, stream_name, timeout_ms=500\n )\n for key in key_dict:\n ref_data = caller.reference_get(key_dict[key])[0]\n assert ref_data == stream_data[key]\n time.sleep(0.5)\n for key in key_dict:\n assert caller.reference_get(key_dict[key])[0] is None\n\n def test_reference_create_from_stream_multiple_keys_latest(self, caller):\n caller, caller_name = caller\n\n def get_data(i):\n return {\"key1\": f\"value {i}!\", \"key2\": f\"value {i}!\"}\n\n stream_name = \"test_ref_multiple_keys\"\n\n # Write all of the keys and get IDs back\n ids = []\n for i in range(10):\n stream_data = get_data(i)\n ids.append(\n caller.entry_write(stream_name, stream_data, serialization=\"msgpack\")\n )\n\n # Check that we can get each of them individually\n for i, id_val in enumerate(ids):\n\n # Make the reference to the particular ID\n key_dict = caller.reference_create_from_stream(\n caller.name, stream_name, stream_id=id_val, timeout_ms=0\n )\n\n # Loop over the references and check the data\n for key in key_dict:\n\n ref_data = caller.reference_get(key_dict[key])[0]\n correct_data = get_data(i)\n assert ref_data == correct_data[key]\n success, failed = caller.reference_delete(*key_dict.values())\n assert success == True\n assert len(failed) == 0\n\n # Now, check the final piece and make sure it's the most recent\n key_dict = caller.reference_create_from_stream(\n caller.name, stream_name, timeout_ms=0\n )\n\n # Loop over the references and check the data\n for key in key_dict:\n\n ref_data = caller.reference_get(key_dict[key])[0]\n correct_data = get_data(9)\n assert ref_data == correct_data[key]\n\n success, failed = caller.reference_delete(*key_dict.values())\n assert success == True\n assert len(failed) == 0\n\n def test_entry_read_n_ignore_serialization(self, caller):\n caller, caller_name = caller\n\n test_data = {\"some_key\": \"some_val\"}\n caller.entry_write(\"test_stream\", {\"data\": test_data}, serialization=\"msgpack\")\n entries = caller.entry_read_n(\n caller_name, \"test_stream\", 1, serialization=None, force_serialization=True\n )\n assert test_data == unpackb(entries[0][\"data\"], raw=False)\n\n def test_entry_read_since_ignore_serialization(self, caller):\n caller, caller_name = caller\n\n test_data_1 = {\"some_key\": \"some_val\"}\n test_data_2 = {\"some_other_key\": \"some_other_val\"}\n data_1_id = caller.entry_write(\n \"test_stream\", {\"data\": test_data_1}, serialization=\"msgpack\"\n )\n caller.entry_write(\n \"test_stream\", {\"data\": test_data_2}, serialization=\"msgpack\"\n )\n\n entries = caller.entry_read_since(\n caller_name,\n \"test_stream\",\n last_id=data_1_id,\n serialization=None,\n force_serialization=True,\n )\n assert test_data_2 == unpackb(entries[0][\"data\"], raw=False)\n\n def test_reference_ignore_serialization(self, caller):\n caller, caller_name = caller\n\n data = [\n {\"hello\": \"world\", \"atom\": 123456, \"some_obj\": {\"references\": \"are fun!\"}},\n True,\n ]\n ref_ids = caller.reference_create(*data, serialization=\"msgpack\")\n ref_data = caller.reference_get(\n *ref_ids, serialization=None, force_serialization=True\n )\n for i in range(len(data)):\n assert unpackb(ref_data[i], raw=False) == data[i]\n success, failed = caller.reference_delete(*ref_ids)\n assert success == True\n assert len(failed) == 0\n\n def test_command_response_wrong_n_workers(self, caller, responder):\n \"\"\"\n Element sends command and responder returns response.\n Tests expected use case of command response.\n \"\"\"\n caller, caller_name = caller\n responder, responder_name = responder\n\n responder.command_add(\"add_1\", add_1)\n # this should be a non-blocking call\n with pytest.raises(ValueError):\n responder.command_loop(n_workers=-1)\n\n def test_timeout_ms(self):\n then = time.time()\n\n with pytest.raises(ElementConnectionTimeoutError):\n e = self._element_create(\n \"timeout-element-1\", host=\"10.255.255.1\", conn_timeout_ms=2000\n )\n assert e._redis_connection_timeout == 2.0\n e._rclient.keys()\n\n now = time.time()\n diff = now - then\n\n assert int(round(diff, 2)) == 2\n\n def test_metrics_create_basic(self, caller, metrics):\n caller, caller_name = caller\n data = caller.metrics_create_custom(\n MetricsLevel.INFO, \"some_metric\", retention=10000\n )\n assert data == \"some_metric\"\n\n data = metrics.info(\"some_metric\")\n assert data.retention_msecs == 10000\n\n def test_metrics_create_label(self, caller, metrics):\n caller, caller_name = caller\n label_dict = {\"single\": \"label\"}\n data = caller.metrics_create_custom(\n MetricsLevel.INFO, \"some_metric\", labels=label_dict\n )\n assert data == \"some_metric\"\n\n data = metrics.info(\"some_metric\")\n assert data.labels == label_dict\n\n def test_metrics_create_labels(self, caller, metrics):\n caller, caller_name = caller\n label_dict = {\"label1\": \"hello\", \"label2\": \"world\"}\n data = caller.metrics_create_custom(\n MetricsLevel.INFO, \"some_metric\", labels=label_dict\n )\n assert data == \"some_metric\"\n\n data = metrics.info(\"some_metric\")\n assert data.labels == label_dict\n\n def test_validate_metrics_labels_enforced(self, caller, metrics):\n enforced = False\n caller, caller_name = caller\n label_dict = {\"label1\": \"\", \"label2\": \"valid\"}\n\n try:\n caller.metrics_create_custom(\n MetricsLevel.INFO, \"some_metric\", labels=label_dict\n )\n except AtomError as e:\n print(e)\n enforced = True\n\n assert enforced is True\n\n def test_metrics_create_rule(self, caller, metrics):\n caller, caller_name = caller\n rules = [(\"sum\", 10000, 200000)]\n data = caller.metrics_create_custom(\n MetricsLevel.INFO, \"some_metric\", rules=rules\n )\n assert data == \"some_metric\"\n\n data = metrics.info(\"some_metric\")\n print(data.rules)\n assert len(data.rules) == 1\n assert data.rules[0][0] == b\"some_metric_SUM_10000\"\n assert data.rules[0][1] == 10000\n assert data.rules[0][2] == b\"SUM\"\n\n data = metrics.info(\"some_metric_SUM_10000\")\n assert data.retention_msecs == 200000\n\n def test_metrics_create_rules(self, caller, metrics):\n caller, caller_name = caller\n rules = [\n (\"sum\", 10000, 200000),\n (\"avg\", 86400, 604800),\n ]\n\n data = caller.metrics_create_custom(\n MetricsLevel.INFO, \"some_metric\", rules=rules\n )\n assert data == \"some_metric\"\n\n data = metrics.info(\"some_metric\")\n assert len(data.rules) == 2\n sum_idx = 0 if data.rules[0][0] == b\"some_metric_SUM_10000\" else 1\n avg_idx = 1 if sum_idx == 0 else 0\n assert data.rules[sum_idx][0] == b\"some_metric_SUM_10000\"\n assert data.rules[sum_idx][1] == 10000\n assert data.rules[sum_idx][2] == b\"SUM\"\n assert data.rules[avg_idx][0] == b\"some_metric_AVG_86400\"\n assert data.rules[avg_idx][1] == 86400\n assert data.rules[avg_idx][2] == b\"AVG\"\n\n data = metrics.info(\"some_metric_SUM_10000\")\n assert data.retention_msecs == 200000\n\n data = metrics.info(\"some_metric_AVG_86400\")\n assert data.retention_msecs == 604800\n\n def test_metrics_create_already_created(self, caller, metrics):\n caller, caller_name = caller\n data = caller.metrics_create_custom(\n MetricsLevel.INFO, \"some_metric\", retention=10000\n )\n assert data == \"some_metric\"\n data = caller.metrics_create_custom(\n MetricsLevel.INFO, \"some_metric\", retention=10000\n )\n assert data == \"some_metric\"\n\n def test_metrics_create_update(self, caller, metrics):\n caller, caller_name = caller\n rules = [\n (\"sum\", 10000, 200000),\n (\"avg\", 86400, 604800),\n ]\n label_dict = {\"label1\": \"hello\", \"label2\": \"world\"}\n data = caller.metrics_create_custom(\n MetricsLevel.INFO, \"some_metric\", rules=rules, labels=label_dict\n )\n assert data == \"some_metric\"\n\n data = metrics.info(\"some_metric\")\n assert data.labels == label_dict\n assert len(data.rules) == 2\n sum_idx = 0 if data.rules[0][0] == b\"some_metric_SUM_10000\" else 1\n avg_idx = 1 if sum_idx == 0 else 0\n assert data.rules[sum_idx][0] == b\"some_metric_SUM_10000\"\n assert data.rules[sum_idx][1] == 10000\n assert data.rules[sum_idx][2] == b\"SUM\"\n assert data.rules[avg_idx][0] == b\"some_metric_AVG_86400\"\n assert data.rules[avg_idx][1] == 86400\n assert data.rules[avg_idx][2] == b\"AVG\"\n\n data = caller.metrics_create_custom(\n MetricsLevel.INFO,\n \"some_metric\",\n rules=rules,\n labels=label_dict,\n update=True,\n )\n assert data == \"some_metric\"\n rules = [\n (\"min\", 6000, 1000),\n (\"max\", 5000, 10000),\n ]\n label_dict = {\"label1\": \"elementary\", \"label2\": \"robotics\"}\n data = caller.metrics_create_custom(\n MetricsLevel.INFO, \"some_metric\", rules=rules, labels=label_dict\n )\n assert data == \"some_metric\"\n data = caller.metrics_create_custom(\n MetricsLevel.INFO,\n \"some_metric\",\n rules=rules,\n labels=label_dict,\n update=True,\n )\n assert data == \"some_metric\"\n data = metrics.info(\"some_metric\")\n assert data.labels == label_dict\n assert len(data.rules) == 2\n max_idx = 0 if data.rules[0][0] == b\"some_metric_MAX_5000\" else 1\n min_idx = 1 if max_idx == 0 else 0\n assert data.rules[max_idx][0] == b\"some_metric_MAX_5000\"\n assert data.rules[max_idx][1] == 5000\n assert data.rules[max_idx][2] == b\"MAX\"\n assert data.rules[min_idx][0] == b\"some_metric_MIN_6000\"\n assert data.rules[min_idx][1] == 6000\n assert data.rules[min_idx][2] == b\"MIN\"\n\n def test_metrics_add(self, caller, metrics):\n caller, caller_name = caller\n data = caller.metrics_create_custom(\n MetricsLevel.INFO, \"some_metric\", retention=10000\n )\n assert data == \"some_metric\"\n\n data = caller.metrics_add(\"some_metric\", 42)\n print(data)\n assert (\n len(data) == 1\n and type(data[0]) == list\n and len(data[0]) == 1\n and type(data[0][0]) == int\n )\n\n # make a metric and have the timestamp auto-created\n data = metrics.get(\"some_metric\")\n assert data[1] == 42\n # Make sure the auto-generated timestamp is within 1s of the unix time\n assert (time.time() * 1000) - data[0] <= 1000\n\n def test_metrics_add_set_timestamp_int(self, caller, metrics):\n caller, caller_name = caller\n\n data = caller.metrics_create_custom(\n MetricsLevel.INFO, \"some_metric\", retention=10000\n )\n assert data == \"some_metric\"\n\n data = caller.metrics_add(\"some_metric\", 42, timestamp=1)\n assert (\n len(data) == 1\n and type(data[0]) == list\n and len(data[0]) == 1\n and type(data[0][0]) == int\n )\n\n # make a metric and have the timestamp auto-created\n data = metrics.get(\"some_metric\")\n assert data[1] == 42\n assert data[0] == 1\n\n def test_metrics_add_set_timestamp_time(self, caller, metrics):\n caller, caller_name = caller\n curr_time = int(time.time() * 1000)\n data = caller.metrics_create_custom(\n MetricsLevel.INFO, \"some_metric\", retention=10000\n )\n assert data == \"some_metric\"\n\n data = caller.metrics_add(\"some_metric\", 42, timestamp=curr_time)\n assert (\n len(data) == 1\n and type(data[0]) == list\n and len(data[0]) == 1\n and type(data[0][0]) == int\n )\n\n # make a metric and have the timestamp auto-created\n data = metrics.get(\"some_metric\")\n assert data[1] == 42\n assert data[0] == curr_time\n\n def test_metrics_add_multiple(self, caller, metrics):\n caller, caller_name = caller\n data = caller.metrics_create_custom(\n MetricsLevel.INFO, \"some_metric\", retention=10000\n )\n assert data == \"some_metric\"\n\n data = caller.metrics_add(\"some_metric\", 42)\n assert (\n len(data) == 1\n and type(data[0]) == list\n and len(data[0]) == 1\n and type(data[0][0]) == int\n )\n\n time.sleep(0.001)\n data = caller.metrics_add(\"some_metric\", 2020)\n assert (\n len(data) == 1\n and type(data[0]) == list\n and len(data[0]) == 1\n and type(data[0][0]) == int\n )\n\n # make a metric and have the timestamp auto-created\n data = metrics.range(\"some_metric\", 1, -1)\n assert data[0][1] == 42\n assert data[1][1] == 2020\n\n def test_metrics_add_multiple_handle_same_timestamp(self, caller, metrics):\n caller, caller_name = caller\n data = caller.metrics_create_custom(\n MetricsLevel.INFO, \"some_metric\", retention=10000\n )\n assert data == \"some_metric\"\n data = caller.metrics_add(\"some_metric\", 42, timestamp=1234)\n assert len(data) == 1 and type(data[0]) == list and data[0][0] == 1234\n\n data = caller.metrics_add(\"some_metric\", 2020, timestamp=1234)\n assert len(data) == 1 and type(data[0]) == list and data[0][0] == 1234\n\n # Behavior should be update\n data = metrics.range(\"some_metric\", 1, -1)\n assert len(data) == 1\n # Default behavior is MAX\n assert data[0][1] == 2020\n assert data[0][0] == 1234\n\n def test_metrics_async(self, caller, metrics):\n caller, caller_name = caller\n data = caller.metrics_create_custom(\n MetricsLevel.INFO, \"some_metric\", retention=10000\n )\n assert data == \"some_metric\"\n pipeline = caller.metrics_get_pipeline()\n assert pipeline is not None\n pipeline = caller.metrics_get_pipeline()\n assert pipeline is not None\n data = caller.metrics_add(\"some_metric\", 42, pipeline=pipeline)\n assert data is None\n\n data = metrics.get(\"some_metric\")\n assert data == (0, 0)\n data = caller.metrics_write_pipeline(pipeline)\n assert data is not None\n data = metrics.get(\"some_metric\")\n assert type(data[0]) == int and data[1] == 42\n\n def test_metrics_add_multiple_simultaneous(self, caller, metrics):\n caller, caller_name = caller\n data = caller.metrics_create_custom(\n MetricsLevel.INFO, \"some_metric\", retention=10000\n )\n assert data == \"some_metric\"\n data = caller.metrics_create_custom(\n MetricsLevel.INFO, \"some_other_metric\", retention=10000\n )\n assert data == \"some_other_metric\"\n data = caller.metrics_add(\"some_metric\", 42)\n assert data is not None\n data = caller.metrics_add(\"some_other_metric\", 2020)\n assert data is not None\n\n # make a metric and have the timestamp auto-created\n data = metrics.range(\"some_metric\", 1, -1)\n assert len(data) == 1 and data[0][1] == 42\n data = metrics.range(\"some_other_metric\", 1, -1)\n assert len(data) == 1 and data[0][1] == 2020\n\n def test_metrics_add_multiple_simultaneous_async(self, caller, metrics):\n caller, caller_name = caller\n data = caller.metrics_create_custom(\n MetricsLevel.INFO, \"some_metric\", retention=10000\n )\n assert data == \"some_metric\"\n data = caller.metrics_create_custom(\n MetricsLevel.INFO, \"some_other_metric\", retention=10000\n )\n assert data == \"some_other_metric\"\n pipeline = caller.metrics_get_pipeline()\n assert pipeline is not None\n data = caller.metrics_add(\"some_metric\", 42, pipeline=pipeline)\n assert data is None\n data = caller.metrics_add(\"some_other_metric\", 2020, pipeline=pipeline)\n assert data is None\n\n time.sleep(0.001)\n data = caller.metrics_write_pipeline(pipeline)\n assert data is not None\n\n # make a metric and have the timestamp auto-created\n data = metrics.range(\"some_metric\", 1, -1)\n assert len(data) == 1 and data[0][1] == 42\n data = metrics.range(\"some_other_metric\", 1, -1)\n assert len(data) == 1 and data[0][1] == 2020\n\n def test_metrics_add_multiple_async(self, caller, metrics):\n caller, caller_name = caller\n data = caller.metrics_create_custom(\n MetricsLevel.INFO, \"some_metric\", retention=10000\n )\n assert data == \"some_metric\"\n pipeline = caller.metrics_get_pipeline()\n assert pipeline is not None\n pipeline = caller.metrics_get_pipeline()\n assert pipeline is not None\n data = caller.metrics_add(\"some_metric\", 42, pipeline=pipeline)\n assert data is None\n time.sleep(0.001)\n data = caller.metrics_add(\"some_metric\", 2020, pipeline=pipeline)\n assert data is None\n data = caller.metrics_write_pipeline(pipeline)\n assert data is not None\n # make a metric and have the timestamp auto-created\n data = metrics.range(\"some_metric\", 1, -1)\n assert len(data) == 2 and data[0][1] == 42 and data[1][1] == 2020\n\n def test_metrics_add_multiple_async_handle_same_timestamp(self, caller, metrics):\n caller, caller_name = caller\n data = caller.metrics_create_custom(\n MetricsLevel.INFO, \"some_metric\", retention=10000\n )\n assert data == \"some_metric\"\n pipeline = caller.metrics_get_pipeline()\n assert pipeline is not None\n data = caller.metrics_add(\"some_metric\", 42, timestamp=1234, pipeline=pipeline)\n assert data is None\n data = caller.metrics_add(\n \"some_metric\", 2020, timestamp=1234, pipeline=pipeline\n )\n assert data is None\n\n data = metrics.get(\"some_metric\")\n assert data == (0, 0)\n\n data = caller.metrics_write_pipeline(pipeline)\n assert data is not None\n\n # make a metric and have the timestamp auto-created\n data = metrics.range(\"some_metric\", 1, -1)\n\n # There's a super-slim chance this makes it through if the\n # calls are on a millisecond boundary\n assert len(data) == 1 or (len(data) == 2)\n\n # If there's only one piece of data, behavior should be MAX by default\n if len(data) == 1:\n assert data[0][1] == 2020\n else:\n assert data[0][1] == 42\n assert data[1][1] == 2020\n\n def test_metrics_async_timestamp_no_jitter(self, caller, metrics):\n caller, caller_name = caller\n data = caller.metrics_create_custom(\n MetricsLevel.INFO, \"some_metric\", retention=10000\n )\n assert data == \"some_metric\"\n pipeline = caller.metrics_get_pipeline()\n assert pipeline is not None\n data = caller.metrics_add(\"some_metric\", 42, pipeline=pipeline)\n assert data is None\n add_time = time.time()\n\n data = metrics.get(\"some_metric\")\n assert data == (0, 0)\n\n time.sleep(2.0)\n flush_time = time.time()\n\n data = caller.metrics_write_pipeline(pipeline)\n assert data is not None\n\n data = metrics.get(\"some_metric\")\n assert data[1] == 42\n\n # Make sure the timestamp gets set at the flush and\n # not the add\n assert (int(1000 * add_time) - data[0]) <= 1000\n assert (int(1000 * flush_time) - data[0]) >= 1900\n\n def test_metrics_remote(self, caller, metrics):\n my_elem = Element(\n \"test_metrics_no_redis\", metrics_host=\"127.0.0.1\", metrics_port=6380\n )\n assert my_elem is not None\n\n data = my_elem.metrics_create_custom(\n MetricsLevel.INFO, \"some_metric\", retention=10000\n )\n assert data == \"some_metric\"\n\n my_elem._clean_up()\n\n def test_metrics_remote_nonexist(self, caller, metrics):\n my_elem = Element(\n \"test_metrics_no_redis\", metrics_host=\"127.0.0.1\", metrics_port=6381\n )\n assert my_elem is not None\n\n data = my_elem.metrics_create_custom(\n MetricsLevel.INFO, \"some_metric\", retention=10000\n )\n assert data is None\n\n my_elem._clean_up()\n\n def test_metrics_remote_nonexist_enforced(self, caller, metrics):\n enforced = False\n\n try:\n Element(\n \"test_metrics_no_redis\",\n metrics_host=\"127.0.0.1\",\n metrics_port=6381,\n enforce_metrics=True,\n )\n except AtomError as e:\n print(e)\n enforced = True\n\n assert enforced is True\n\n def test_metrics_socket_nonexist(self, caller, metrics):\n my_elem = Element(\n \"test_metrics_no_redis\", metrics_socket_path=\"/shared/nonexistent.sock\"\n )\n assert my_elem is not None\n\n data = my_elem.metrics_create_custom(\n MetricsLevel.INFO, \"some_metric\", retention=10000\n )\n assert data is None\n\n my_elem._clean_up()\n\n def test_metrics_socket_nonexist_enforced(self, caller, metrics):\n enforced = False\n\n try:\n Element(\n \"test_metrics_no_redis\",\n metrics_socket_path=\"/shared/nonexistent.sock\",\n enforce_metrics=True,\n )\n except AtomError as e:\n print(e)\n enforced = True\n\n assert enforced is True\n\n def test_metrics_turned_off(self, caller, metrics):\n os.environ[\"ATOM_USE_METRICS\"] = \"FALSE\"\n my_elem = Element(\"test_metrics_turned_off\")\n assert my_elem is not None\n\n pipeline = my_elem.metrics_get_pipeline()\n assert pipeline is None\n data = my_elem.metrics_create_custom(\n MetricsLevel.INFO, \"some_metric\", retention=10000\n )\n assert data is None\n data = my_elem.metrics_add(\"some_metric\", 42)\n assert data is None\n data = my_elem.metrics_write_pipeline(pipeline)\n assert data is None\n\n my_elem._clean_up()\n\n def test_counter_set(self, caller):\n\n caller, caller_name = caller\n\n for i in range(10):\n counter_val = caller.counter_set(\"some_counter\", i)\n assert counter_val == i\n\n success = caller.counter_delete(\"some_counter\")\n assert success == True\n\n def test_counter_get(self, caller):\n\n caller, caller_name = caller\n\n for i in range(10):\n counter_val = caller.counter_set(\"some_counter\", i)\n assert counter_val == i\n assert caller.counter_get(\"some_counter\") == i\n\n success = caller.counter_delete(\"some_counter\")\n assert success == True\n\n def test_counter_delete(self, caller):\n\n caller, caller_name = caller\n\n counter_val = caller.counter_set(\"some_counter\", 32)\n assert counter_val == 32\n assert caller.counter_get(\"some_counter\") == 32\n success = caller.counter_delete(\"some_counter\")\n assert success == True\n assert caller.counter_get(\"some_counter\") is None\n\n def test_counter_update(self, caller):\n\n caller, caller_name = caller\n\n counter_sum = 0\n\n for i in range(20):\n\n # Test 10 positive and 10 negative numbers\n rand_val = random.randint(0, 1000)\n if i % 2 == 0:\n rand_val *= -1\n\n # Add the value to the sum\n counter_sum += rand_val\n\n # Update the counter\n counter_val = caller.counter_update(\"some_counter\", rand_val)\n\n # Make sure our sum matches the counter's\n assert counter_sum == counter_val\n\n success = caller.counter_delete(\"some_counter\")\n assert success == True\n\n def test_counter_set_update(self, caller):\n\n caller, caller_name = caller\n\n counter_val = caller.counter_set(\"some_counter\", 40)\n assert counter_val == 40\n\n counter_val = caller.counter_update(\"some_counter\", 2)\n assert counter_val == 42\n\n counter_val = caller.counter_update(\"some_counter\", 0)\n assert counter_val == 42\n\n counter_val = caller.counter_update(\"some_counter\", -1)\n assert counter_val == 41\n\n success = caller.counter_delete(\"some_counter\")\n assert success == True\n\n def test_counter_expire(self, caller):\n\n caller, caller_name = caller\n\n counter_val = caller.counter_set(\"some_counter\", -27, timeout_ms=50)\n assert counter_val == -27\n\n time.sleep(0.1)\n\n counter_val = caller.counter_get(\"some_counter\")\n assert counter_val is None\n\n def test_multiple_counters(self, caller):\n\n caller, caller_name = caller\n\n counter1_sum = 0\n counter2_sum = 0\n\n for i in range(20):\n\n # Test 10 positive and 10 negative numbers\n rand_val_1 = random.randint(0, 1000)\n rand_val_2 = random.randint(0, 1000)\n if i % 2 == 0:\n rand_val_1 *= -1\n rand_val_2 *= -1\n\n # Add the value to the sum\n counter1_sum += rand_val_1\n counter2_sum += rand_val_2\n\n # Update the counter\n counter1_val = caller.counter_update(\"some_counter1\", rand_val_1)\n assert counter1_sum == counter1_val\n counter2_val = caller.counter_update(\"some_counter2\", rand_val_2)\n assert counter2_sum == counter2_val\n\n success = caller.counter_delete(\"some_counter1\")\n assert success == True\n success = caller.counter_delete(\"some_counter2\")\n assert success == True\n\n def test_counter_set_pipelines(self, caller):\n \"\"\"\n Tests to make sure we're properly releasing pipelines. This should\n raise a pipeline error if we're having issues and will check that the\n pipeline pools for both redis and metrics are the proper size at the end\n \"\"\"\n\n caller, caller_name = caller\n for i in range(2 * REDIS_PIPELINE_POOL_SIZE):\n caller.counter_set(\"some_counter\", 0)\n\n assert caller._rpipeline_pool.qsize() == REDIS_PIPELINE_POOL_SIZE\n assert caller._mpipeline_pool.qsize() == REDIS_PIPELINE_POOL_SIZE\n\n success = caller.counter_delete(\"some_counter\")\n assert success == True\n\n def test_counter_update_pipelines(self, caller):\n \"\"\"\n Tests to make sure we're properly releasing pipelines. This should\n raise a pipeline error if we're having issues and will check that the\n pipeline pools for both redis and metrics are the proper size at the end\n \"\"\"\n\n caller, caller_name = caller\n for i in range(2 * REDIS_PIPELINE_POOL_SIZE):\n caller.counter_update(\"some_counter\", 1)\n\n assert caller._rpipeline_pool.qsize() == REDIS_PIPELINE_POOL_SIZE\n assert caller._mpipeline_pool.qsize() == REDIS_PIPELINE_POOL_SIZE\n\n success = caller.counter_delete(\"some_counter\")\n assert success == True\n\n def test_counter_get_pipelines(self, caller):\n \"\"\"\n Tests to make sure we're properly releasing pipelines. This should\n raise a pipeline error if we're having issues and will check that the\n pipeline pools for both redis and metrics are the proper size at the end\n \"\"\"\n caller, caller_name = caller\n\n caller.counter_set(\"some_counter\", 239829)\n\n for i in range(2 * REDIS_PIPELINE_POOL_SIZE):\n caller.counter_get(\"some_counter\")\n\n assert caller._rpipeline_pool.qsize() == REDIS_PIPELINE_POOL_SIZE\n assert caller._mpipeline_pool.qsize() == REDIS_PIPELINE_POOL_SIZE\n\n success = caller.counter_delete(\"some_counter\")\n assert success == True\n\n def test_counter_delete_pipelines(self, caller):\n \"\"\"\n Tests to make sure we're properly releasing pipelines. This should\n raise a pipeline error if we're having issues and will check that the\n pipeline pools for both redis and metrics are the proper size at the end\n \"\"\"\n\n caller, caller_name = caller\n for i in range(2 * REDIS_PIPELINE_POOL_SIZE):\n caller.counter_set(\"some_counter\", i)\n success = caller.counter_delete(\"some_counter\")\n assert success == True\n\n assert caller._rpipeline_pool.qsize() == REDIS_PIPELINE_POOL_SIZE\n assert caller._mpipeline_pool.qsize() == REDIS_PIPELINE_POOL_SIZE\n\n def test_set_add(self, caller):\n\n caller, caller_name = caller\n n_items = 10\n\n for i in range(n_items):\n member = f\"key{i}\"\n cardinality = caller.sorted_set_add(\"some_set\", member, i)\n assert cardinality == i + 1\n value = caller.sorted_set_read(\"some_set\", member)\n assert value == i\n\n caller.sorted_set_delete(\"some_set\")\n\n def test_set_size(self, caller):\n\n caller, caller_name = caller\n n_items = 10\n\n for i in range(n_items):\n member = f\"key{i}\"\n add_cardinality = caller.sorted_set_add(\"some_set\", member, i)\n size_cardinality = caller.sorted_set_size(\"some_set\")\n assert add_cardinality == size_cardinality\n\n caller.sorted_set_delete(\"some_set\")\n\n def test_set_size_no_set(self, caller):\n\n caller, caller_name = caller\n\n size = caller.sorted_set_size(\"some_set\")\n assert size == 0\n\n def test_set_update(self, caller):\n\n caller, caller_name = caller\n n_items = 10\n\n for i in range(n_items):\n member = \"same_value\"\n caller.sorted_set_add(\"some_set\", member, i)\n value = caller.sorted_set_read(\"some_set\", member)\n\n assert value == i\n\n caller.sorted_set_delete(\"some_set\")\n\n def test_set_range_min_withvalues(self, caller):\n\n caller, caller_name = caller\n\n values = []\n n_items = 10\n\n for i in range(n_items):\n member = f\"key{i}\"\n cardinality = caller.sorted_set_add(\"some_set\", member, i)\n assert cardinality == i + 1\n values.append((member.encode(\"utf-8\"), float(i)))\n\n set_range = caller.sorted_set_range(\"some_set\", 0, -1)\n assert set_range == values\n\n caller.sorted_set_delete(\"some_set\")\n\n def test_set_range_min_slice_withvalues(self, caller):\n\n caller, caller_name = caller\n\n values = []\n slice_start = 3\n slice_end = 5\n n_items = 10\n\n for i in range(n_items):\n member = f\"key{i}\"\n cardinality = caller.sorted_set_add(\"some_set\", member, i)\n assert cardinality == i + 1\n\n if i >= slice_start and i <= slice_end:\n values.append((member.encode(\"utf-8\"), float(i)))\n\n set_range = caller.sorted_set_range(\"some_set\", slice_start, slice_end)\n assert set_range == values\n\n caller.sorted_set_delete(\"some_set\")\n\n def test_set_range_min_novalues(self, caller):\n\n caller, caller_name = caller\n\n values = []\n n_items = 10\n\n for i in range(n_items):\n member = f\"key{i}\"\n cardinality = caller.sorted_set_add(\"some_set\", member, i)\n assert cardinality == i + 1\n\n values.append(member.encode(\"utf-8\"))\n\n set_range = caller.sorted_set_range(\"some_set\", 0, -1, withvalues=False)\n assert set_range == values\n\n caller.sorted_set_delete(\"some_set\")\n\n def test_set_range_max_withvalues(self, caller):\n\n caller, caller_name = caller\n\n values = []\n n_items = 10\n\n for i in range(n_items):\n member = f\"key{i}\"\n cardinality = caller.sorted_set_add(\"some_set\", member, i)\n assert cardinality == i + 1\n\n values.insert(0, (member.encode(\"utf-8\"), float(i)))\n\n set_range = caller.sorted_set_range(\"some_set\", 0, -1, maximum=True)\n assert set_range == values\n\n caller.sorted_set_delete(\"some_set\")\n\n def test_set_range_max_slice_withvalues(self, caller):\n\n caller, caller_name = caller\n\n values = []\n slice_start = 1\n slice_end = 7\n n_items = 10\n\n for i in range(n_items):\n member = f\"key{i}\"\n cardinality = caller.sorted_set_add(\"some_set\", member, i)\n assert cardinality == i + 1\n\n if i <= (n_items - 1 - slice_start) and i >= (n_items - 1 - slice_end):\n values.insert(0, (member.encode(\"utf-8\"), float(i)))\n\n set_range = caller.sorted_set_range(\n \"some_set\", slice_start, slice_end, maximum=True\n )\n assert set_range == values\n\n caller.sorted_set_delete(\"some_set\")\n\n def test_set_range_max_novalues(self, caller):\n\n caller, caller_name = caller\n\n values = []\n n_items = 10\n\n for i in range(n_items):\n member = f\"key{i}\"\n cardinality = caller.sorted_set_add(\"some_set\", member, i)\n assert cardinality == i + 1\n\n values.insert(0, member.encode(\"utf-8\"))\n\n set_range = caller.sorted_set_range(\n \"some_set\", 0, -1, maximum=True, withvalues=False\n )\n assert set_range == values\n\n caller.sorted_set_delete(\"some_set\")\n\n def test_set_pop_min(self, caller):\n\n caller, caller_name = caller\n\n values = []\n n_items = 10\n\n for i in range(n_items):\n member = f\"key{i}\"\n cardinality = caller.sorted_set_add(\"some_set\", member, i)\n assert cardinality == i + 1\n\n values.append((member.encode(\"utf-8\"), float(i)))\n\n set_range = caller.sorted_set_range(\"some_set\", 0, -1)\n assert set_range == values\n\n for i in range(n_items):\n pop_val, cardinality = caller.sorted_set_pop(\"some_set\")\n assert values[0] == pop_val\n assert cardinality == n_items - i - 1\n values.pop(0)\n\n # No delete -- set disappears on its own when final member popped\n\n def test_set_pop_min_blocking(self, caller):\n\n caller, caller_name = caller\n\n values = []\n n_items = 10\n\n for i in range(n_items):\n member = f\"key{i}\"\n cardinality = caller.sorted_set_add(\"some_set\", member, i)\n assert cardinality == i + 1\n\n values.append((member.encode(\"utf-8\"), float(i)))\n\n set_range = caller.sorted_set_range(\"some_set\", 0, -1)\n assert set_range == values\n\n for i in range(n_items):\n pop_val, cardinality = caller.sorted_set_pop(\n \"some_set\", block=True, timeout=0.1\n )\n assert values[0] == pop_val\n assert cardinality == n_items - i - 1\n values.pop(0)\n\n # No delete -- set disappears on its own when final member popped\n\n def test_set_pop_no_exist(self, caller):\n\n caller, caller_name = caller\n passed = False\n\n try:\n pop_val, cardinality = caller.sorted_set_pop(\"some_set\")\n except SetEmptyError:\n passed = True\n\n assert passed == True\n\n # No delete -- set disappears on its own when final member popped\n\n def test_set_pop_no_exist_blocking(self, caller):\n\n caller, caller_name = caller\n passed = False\n block_time = 0.1\n\n start_time = time.time()\n try:\n pop_val, cardinality = caller.sorted_set_pop(\n \"some_set\", block=True, timeout=block_time\n )\n except SetEmptyError:\n passed = True\n end_time = time.time()\n\n assert passed == True\n assert end_time - start_time >= block_time\n\n # No delete -- set disappears on its own when final member popped\n\n def test_set_pop_empty(self, caller):\n\n caller, caller_name = caller\n cardinality = caller.sorted_set_add(\"some_set\", \"member\", 23)\n assert cardinality == 1\n pop_val, cardinality = caller.sorted_set_pop(\"some_set\")\n assert pop_val == (b\"member\", 23)\n assert cardinality == 0\n\n passed = False\n\n try:\n pop_val, cardinality = caller.sorted_set_pop(\"some_set\")\n except SetEmptyError:\n passed = True\n\n assert passed == True\n\n # No delete -- set disappears on its own when final member popped\n\n def test_set_pop_max(self, caller):\n\n caller, caller_name = caller\n\n values = []\n n_items = 10\n\n for i in range(n_items):\n member = f\"key{i}\"\n cardinality = caller.sorted_set_add(\"some_set\", member, i)\n assert cardinality == i + 1\n\n values.insert(0, (member.encode(\"utf-8\"), float(i)))\n\n set_range = caller.sorted_set_range(\"some_set\", 0, -1, maximum=True)\n assert set_range == values\n\n for i in range(n_items):\n pop_val, cardinality = caller.sorted_set_pop(\"some_set\", maximum=True)\n assert values[0] == pop_val\n assert cardinality == n_items - i - 1\n values.pop(0)\n\n # No delete -- set disappears on its own when final member popped\n\n def test_set_pop_max_blocking(self, caller):\n\n caller, caller_name = caller\n\n values = []\n n_items = 10\n\n for i in range(n_items):\n member = f\"key{i}\"\n cardinality = caller.sorted_set_add(\"some_set\", member, i)\n assert cardinality == i + 1\n\n values.insert(0, (member.encode(\"utf-8\"), float(i)))\n\n set_range = caller.sorted_set_range(\"some_set\", 0, -1, maximum=True)\n assert set_range == values\n\n for i in range(n_items):\n pop_val, cardinality = caller.sorted_set_pop(\n \"some_set\", maximum=True, block=True, timeout=0.1\n )\n assert values[0] == pop_val\n assert cardinality == n_items - i - 1\n values.pop(0)\n\n # No delete -- set disappears on its own when final member popped\n\n def test_set_pop_max_no_exist(self, caller):\n\n caller, caller_name = caller\n passed = False\n\n try:\n pop_val, cardinality = caller.sorted_set_pop(\"some_set\", maximum=True)\n except SetEmptyError:\n passed = True\n\n assert passed == True\n\n # No delete -- set disappears on its own when final member popped\n\n def test_set_pop_max_no_exist_blocking(self, caller):\n\n caller, caller_name = caller\n passed = False\n block_time = 1.0\n\n start_time = time.time()\n try:\n pop_val, cardinality = caller.sorted_set_pop(\n \"some_set\", maximum=True, block=True, timeout=block_time\n )\n except SetEmptyError:\n passed = True\n end_time = time.time()\n\n assert passed == True\n assert end_time - start_time >= block_time\n\n # No delete -- set disappears on its own when final member popped\n\n def test_set_pop_maximum_empty(self, caller):\n\n caller, caller_name = caller\n cardinality = caller.sorted_set_add(\"some_set\", \"member\", 23)\n assert cardinality == 1\n pop_val, cardinality = caller.sorted_set_pop(\"some_set\", maximum=True)\n assert pop_val == (b\"member\", 23)\n assert cardinality == 0\n\n passed = False\n\n try:\n pop_val, cardinality = caller.sorted_set_pop(\"some_set\", maximum=True)\n except SetEmptyError:\n passed = True\n\n assert passed == True\n\n # No delete -- set disappears on its own when final member popped\n\n def test_set_remove(self, caller):\n\n caller, caller_name = caller\n\n values = []\n n_items = 10\n\n for i in range(n_items):\n member = f\"key{i}\"\n cardinality = caller.sorted_set_add(\"some_set\", member, i)\n assert cardinality == i + 1\n\n values.append((member.encode(\"utf-8\"), float(i)))\n\n set_range = caller.sorted_set_range(\"some_set\", 0, -1)\n assert set_range == values\n\n for i in range(n_items):\n member = f\"key{i}\"\n caller.sorted_set_remove(\"some_set\", member)\n values.pop(0)\n if values:\n set_range = caller.sorted_set_range(\"some_set\", 0, -1)\n assert set_range == values\n\n # No delete -- set disappears on its own when final member popped\n\n def test_set_pop_n(self, caller):\n\n caller, caller_name = caller\n\n values = []\n n_items = 10\n\n for i in range(n_items):\n member = f\"key{i}\"\n cardinality = caller.sorted_set_add(\"some_set\", member, i)\n assert cardinality == i + 1\n\n values.append((member.encode(\"utf-8\"), float(i)))\n\n set_range = caller.sorted_set_range(\"some_set\", 0, -1)\n assert set_range == values\n\n # We'll pop in 2 chunks, once and then the rest\n pop_chunk_size = 3\n\n pop_vals, cardinality = caller.sorted_set_pop_n(\"some_set\", pop_chunk_size)\n assert values[0:pop_chunk_size] == pop_vals\n assert cardinality == n_items - pop_chunk_size\n\n pop_vals, cardinality = caller.sorted_set_pop_n(\"some_set\", n_items)\n assert values[pop_chunk_size:n_items] == pop_vals\n assert cardinality == 0\n\n passed = False\n try:\n pop_vals, cardinality = caller.sorted_set_pop_n(\"some_set\", 1)\n except SetEmptyError:\n passed = True\n assert passed == True\n\n def test_set_pop_n_max(self, caller):\n\n caller, caller_name = caller\n\n values = []\n n_items = 10\n\n for i in range(n_items):\n member = f\"key{i}\"\n cardinality = caller.sorted_set_add(\"some_set\", member, i)\n assert cardinality == i + 1\n\n values.insert(0, (member.encode(\"utf-8\"), float(i)))\n\n set_range = caller.sorted_set_range(\"some_set\", 0, -1, maximum=True)\n assert set_range == values\n\n # We'll pop in 2 chunks, once and then the rest\n pop_chunk_size = 3\n\n pop_vals, cardinality = caller.sorted_set_pop_n(\n \"some_set\", pop_chunk_size, maximum=True\n )\n assert values[0:pop_chunk_size] == pop_vals\n assert cardinality == n_items - pop_chunk_size\n\n pop_vals, cardinality = caller.sorted_set_pop_n(\n \"some_set\", n_items, maximum=True\n )\n assert values[pop_chunk_size:n_items] == pop_vals\n assert cardinality == 0\n\n passed = False\n try:\n pop_vals, cardinality = caller.sorted_set_pop_n(\"some_set\", 1)\n except SetEmptyError:\n passed = True\n assert passed == True\n\n\ndef add_1(x):\n return Response(int(x) + 1)\n\n\ndef sleep_ms(x):\n time.sleep(x / 1000.0)\n"
] | [
[
"numpy.ones"
]
] |
wangkuiyi/torchrec | [
"1318d91608f40f56fb20b2bd8cd97114e30902bf"
] | [
"torchrec/distributed/planner/planners.py"
] | [
"#!/usr/bin/env python3\n# Copyright (c) Meta Platforms, Inc. and affiliates.\n# All rights reserved.\n#\n# This source code is licensed under the BSD-style license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport copy\nfrom functools import reduce\nfrom typing import Tuple, Dict, Optional, List, cast, Union\n\nimport torch\nimport torch.distributed as dist\nfrom torch import nn\nfrom torchrec.distributed.collective_utils import (\n invoke_on_rank_and_broadcast_result,\n)\nfrom torchrec.distributed.planner.constants import MAX_SIZE\nfrom torchrec.distributed.planner.enumerators import EmbeddingEnumerator\nfrom torchrec.distributed.planner.partitioners import GreedyPerfPartitioner\nfrom torchrec.distributed.planner.perf_models import NoopPerfModel\nfrom torchrec.distributed.planner.proposers import GreedyProposer, UniformProposer\nfrom torchrec.distributed.planner.stats import EmbeddingStats\nfrom torchrec.distributed.planner.storage_reservations import (\n HeuristicalStorageReservation,\n)\nfrom torchrec.distributed.planner.types import (\n ParameterConstraints,\n Partitioner,\n Topology,\n Stats,\n Shard,\n Storage,\n ShardingOption,\n StorageReservation,\n Enumerator,\n Proposer,\n PerfModel,\n PlannerError,\n)\nfrom torchrec.distributed.types import (\n EnumerableShardingSpec,\n ShardMetadata,\n)\nfrom torchrec.distributed.types import (\n ShardingPlan,\n ShardingPlanner,\n ModuleSharder,\n ShardingType,\n ParameterSharding,\n)\n\n\ndef _merge_shards_by_dim(shards: List[Shard], dim: int) -> List[Shard]:\n # merges shards down to one per rank along dimension.\n # Will recompute shard offsets\n merged_shards = []\n shards = sorted(shards, key=lambda x: x.rank)\n\n current_rank = -1\n current_shard: Optional[Shard] = None\n current_dim_offset = 0\n for shard in shards:\n if shard.rank != current_rank:\n current_shard = copy.deepcopy(shard)\n current_shard.offset[dim] = current_dim_offset\n merged_shards.append(current_shard)\n current_rank = shard.rank\n else:\n # pyre-ignore [16]\n current_shard.size[dim] += shard.size[dim]\n # pyre-ignore [16]\n current_shard.storage += shard.storage\n # pyre-ignore [16]\n current_shard.perf += shard.perf\n current_dim_offset += shard.size[dim]\n return merged_shards\n\n\ndef _to_sharding_plan(\n sharding_options: List[ShardingOption],\n topology: Topology,\n) -> ShardingPlan:\n def _placement(\n compute_device: str,\n rank: int,\n local_size: int,\n ) -> str:\n param_device = compute_device\n if compute_device == \"cuda\":\n param_device = torch.device(\"cuda\", rank % local_size)\n return f\"rank:{rank}/{param_device}\"\n\n compute_device = topology.compute_device\n local_size = topology.local_world_size\n\n plan = {}\n for sharding_option in sharding_options:\n shards = sharding_option.shards\n sharding_type = sharding_option.sharding_type\n\n module_plan = plan.get(sharding_option.path, {})\n module_plan[sharding_option.name] = ParameterSharding(\n sharding_spec=None\n if sharding_type == ShardingType.DATA_PARALLEL.value\n else EnumerableShardingSpec(\n [\n ShardMetadata(\n shard_sizes=shard.size,\n shard_offsets=shard.offset,\n placement=_placement(\n compute_device, cast(int, shard.rank), local_size\n ),\n )\n for shard in shards\n ]\n ),\n sharding_type=sharding_type,\n compute_kernel=sharding_option.compute_kernel,\n ranks=[cast(int, shard.rank) for shard in shards],\n )\n plan[sharding_option.path] = module_plan\n return ShardingPlan(plan)\n\n\nclass EmbeddingShardingPlanner(ShardingPlanner):\n def __init__(\n self,\n topology: Topology,\n enumerator: Optional[Enumerator] = None,\n storage_reservation: Optional[StorageReservation] = None,\n proposer: Optional[Union[Proposer, List[Proposer]]] = None,\n partitioner: Optional[Partitioner] = None,\n performance_model: Optional[PerfModel] = None,\n stats: Optional[Stats] = None,\n constraints: Optional[Dict[str, ParameterConstraints]] = None,\n debug: bool = False,\n ) -> None:\n self._topology = topology\n self._constraints = constraints\n self._enumerator: Enumerator = (\n enumerator\n if enumerator\n else EmbeddingEnumerator(\n topology=topology,\n constraints=constraints,\n )\n )\n self._storage_reservation: StorageReservation = (\n storage_reservation\n if storage_reservation\n else HeuristicalStorageReservation(percentage=0.15)\n )\n self._partitioner: Partitioner = (\n partitioner if partitioner else GreedyPerfPartitioner()\n )\n if proposer:\n self._proposers: List[Proposer] = (\n [proposer] if not isinstance(proposer, list) else proposer\n )\n else:\n self._proposers = [\n GreedyProposer(),\n GreedyProposer(use_depth=False),\n UniformProposer(),\n ]\n self._perf_model: PerfModel = (\n performance_model if performance_model else NoopPerfModel(topology=topology)\n )\n self._stats: Stats = stats if stats else EmbeddingStats()\n self._debug = debug\n self._num_proposals: int = 0\n self._num_plans: int = 0\n\n def collective_plan(\n self,\n module: nn.Module,\n sharders: List[ModuleSharder[nn.Module]],\n # pyre-fixme[11]: Annotation `ProcessGroup` is not defined as a type.\n pg: dist.ProcessGroup,\n ) -> ShardingPlan:\n \"\"\"\n Call self.plan(...) on rank 0 and broadcast\n \"\"\"\n return invoke_on_rank_and_broadcast_result(\n pg,\n 0,\n self.plan,\n module,\n sharders,\n )\n\n def plan(\n self,\n module: nn.Module,\n sharders: List[ModuleSharder[nn.Module]],\n ) -> ShardingPlan:\n\n best_plan = None\n lowest_storage = Storage(MAX_SIZE, MAX_SIZE)\n best_perf_rating = MAX_SIZE\n\n storage_constraint: Topology = self._storage_reservation.reserve(\n topology=self._topology,\n module=module,\n sharders=sharders,\n constraints=self._constraints,\n )\n\n search_space = self._enumerator.enumerate(\n module=module,\n sharders=sharders,\n )\n if not search_space:\n # No shardable parameters\n return ShardingPlan({})\n\n proposal_cache: Dict[\n Tuple[int, ...],\n Tuple[bool, Optional[List[ShardingOption]], Optional[float]],\n ] = {}\n\n for proposer in self._proposers:\n proposer.load(search_space=search_space)\n\n for proposer in self._proposers:\n proposal = proposer.propose()\n\n while proposal:\n proposal_key = tuple(sorted(map(hash, proposal)))\n if proposal_key in proposal_cache:\n partitionable, plan, perf_rating = proposal_cache[proposal_key]\n proposer.feedback(\n partitionable=partitionable,\n plan=plan,\n perf_rating=perf_rating,\n )\n proposal = proposer.propose()\n continue\n\n self._num_proposals += 1\n try:\n plan = self._partitioner.partition(\n proposal=proposal,\n storage_constraint=storage_constraint,\n )\n self._num_plans += 1\n perf_rating = self._perf_model.rate(plan=plan)\n if perf_rating < best_perf_rating:\n best_perf_rating = perf_rating\n best_plan = plan\n proposal_cache[proposal_key] = (True, plan, perf_rating)\n proposer.feedback(\n partitionable=True, plan=plan, perf_rating=perf_rating\n )\n except PlannerError:\n current_storage = cast(\n Storage,\n reduce(\n lambda x, y: x + y,\n [\n shard.storage\n for option in proposal\n for shard in option.shards\n ],\n ),\n )\n if current_storage < lowest_storage:\n lowest_storage = current_storage\n proposal_cache[proposal_key] = (False, None, None)\n proposer.feedback(partitionable=False)\n\n proposal = proposer.propose()\n\n if best_plan:\n sharding_plan = _to_sharding_plan(best_plan, self._topology)\n\n self._stats.log(\n sharding_plan=sharding_plan,\n topology=self._topology,\n num_proposals=self._num_proposals,\n num_plans=self._num_plans,\n best_plan=best_plan,\n constraints=self._constraints,\n debug=self._debug,\n )\n return sharding_plan\n else:\n global_storage_capacity = reduce(\n lambda x, y: x + y,\n [device.storage for device in self._topology.devices],\n )\n global_storge_constraints = reduce(\n lambda x, y: x + y,\n [device.storage for device in storage_constraint.devices],\n )\n raise PlannerError(\n f\"Unable to find a plan for this model are evaluating {self._num_proposals} proposals.\"\n \"\\nPossible solutions:\"\n f\"\\n 1) Increase the number of devices ({self._topology.world_size})\"\n f\"\\n 2) Reduce the model size (\"\n f\"\\n\\t Global storage: {global_storage_capacity.hbm}, \"\n f\"\\n\\t Available for model parallel: {global_storge_constraints},\"\n f\"\\n\\t Requirement for model parallel: {lowest_storage})\"\n f\"\\n 3) Reduce local batch size ({self._topology.batch_size})\"\n \"\\n 4) Remove planner constraints that might be reducing search space or available storage\\n\"\n )\n"
] | [
[
"torch.device"
]
] |
mshader/mne-python | [
"4a932cbb76006329e62fb4901628db68b471774e"
] | [
"mne/filter.py"
] | [
"\"\"\"IIR and FIR filtering and resampling functions.\"\"\"\n\nfrom copy import deepcopy\nfrom functools import partial\n\nimport numpy as np\n\nfrom .annotations import _annotations_starts_stops\nfrom .io.pick import _picks_to_idx\nfrom .cuda import (_setup_cuda_fft_multiply_repeated, _fft_multiply_repeated,\n _setup_cuda_fft_resample, _fft_resample, _smart_pad)\nfrom .fixes import irfft, ifftshift, fftfreq\nfrom .parallel import parallel_func, check_n_jobs\nfrom .time_frequency.multitaper import _mt_spectra, _compute_mt_params\nfrom .utils import (logger, verbose, sum_squared, check_version, warn, _pl,\n _check_preload, _validate_type, _check_option)\n\n# These values from Ifeachor and Jervis.\n_length_factors = dict(hann=3.1, hamming=3.3, blackman=5.0)\n\n\ndef is_power2(num):\n \"\"\"Test if number is a power of 2.\n\n Parameters\n ----------\n num : int\n Number.\n\n Returns\n -------\n b : bool\n True if is power of 2.\n\n Examples\n --------\n >>> is_power2(2 ** 3)\n True\n >>> is_power2(5)\n False\n \"\"\"\n num = int(num)\n return num != 0 and ((num & (num - 1)) == 0)\n\n\ndef next_fast_len(target):\n \"\"\"Find the next fast size of input data to `fft`, for zero-padding, etc.\n\n SciPy's FFTPACK has efficient functions for radix {2, 3, 4, 5}, so this\n returns the next composite of the prime factors 2, 3, and 5 which is\n greater than or equal to `target`. (These are also known as 5-smooth\n numbers, regular numbers, or Hamming numbers.)\n\n Parameters\n ----------\n target : int\n Length to start searching from. Must be a positive integer.\n\n Returns\n -------\n out : int\n The first 5-smooth number greater than or equal to `target`.\n\n Notes\n -----\n Copied from SciPy with minor modifications.\n \"\"\"\n from bisect import bisect_left\n hams = (8, 9, 10, 12, 15, 16, 18, 20, 24, 25, 27, 30, 32, 36, 40, 45, 48,\n 50, 54, 60, 64, 72, 75, 80, 81, 90, 96, 100, 108, 120, 125, 128,\n 135, 144, 150, 160, 162, 180, 192, 200, 216, 225, 240, 243, 250,\n 256, 270, 288, 300, 320, 324, 360, 375, 384, 400, 405, 432, 450,\n 480, 486, 500, 512, 540, 576, 600, 625, 640, 648, 675, 720, 729,\n 750, 768, 800, 810, 864, 900, 960, 972, 1000, 1024, 1080, 1125,\n 1152, 1200, 1215, 1250, 1280, 1296, 1350, 1440, 1458, 1500, 1536,\n 1600, 1620, 1728, 1800, 1875, 1920, 1944, 2000, 2025, 2048, 2160,\n 2187, 2250, 2304, 2400, 2430, 2500, 2560, 2592, 2700, 2880, 2916,\n 3000, 3072, 3125, 3200, 3240, 3375, 3456, 3600, 3645, 3750, 3840,\n 3888, 4000, 4050, 4096, 4320, 4374, 4500, 4608, 4800, 4860, 5000,\n 5120, 5184, 5400, 5625, 5760, 5832, 6000, 6075, 6144, 6250, 6400,\n 6480, 6561, 6750, 6912, 7200, 7290, 7500, 7680, 7776, 8000, 8100,\n 8192, 8640, 8748, 9000, 9216, 9375, 9600, 9720, 10000)\n\n if target <= 6:\n return target\n\n # Quickly check if it's already a power of 2\n if not (target & (target - 1)):\n return target\n\n # Get result quickly for small sizes, since FFT itself is similarly fast.\n if target <= hams[-1]:\n return hams[bisect_left(hams, target)]\n\n match = float('inf') # Anything found will be smaller\n p5 = 1\n while p5 < target:\n p35 = p5\n while p35 < target:\n # Ceiling integer division, avoiding conversion to float\n # (quotient = ceil(target / p35))\n quotient = -(-target // p35)\n\n p2 = 2 ** int(quotient - 1).bit_length()\n\n N = p2 * p35\n if N == target:\n return N\n elif N < match:\n match = N\n p35 *= 3\n if p35 == target:\n return p35\n if p35 < match:\n match = p35\n p5 *= 5\n if p5 == target:\n return p5\n if p5 < match:\n match = p5\n return match\n\n\ndef _overlap_add_filter(x, h, n_fft=None, phase='zero', picks=None,\n n_jobs=1, copy=True, pad='reflect_limited'):\n \"\"\"Filter the signal x using h with overlap-add FFTs.\n\n Parameters\n ----------\n x : array, shape (n_signals, n_times)\n Signals to filter.\n h : 1d array\n Filter impulse response (FIR filter coefficients). Must be odd length\n if phase == 'linear'.\n n_fft : int\n Length of the FFT. If None, the best size is determined automatically.\n phase : str\n If 'zero', the delay for the filter is compensated (and it must be\n an odd-length symmetric filter). If 'linear', the response is\n uncompensated. If 'zero-double', the filter is applied in the\n forward and reverse directions. If 'minimum', a minimum-phase\n filter will be used.\n picks : list | None\n See calling functions.\n n_jobs : int | str\n Number of jobs to run in parallel. Can be 'cuda' if ``cupy``\n is installed properly.\n copy : bool\n If True, a copy of x, filtered, is returned. Otherwise, it operates\n on x in place.\n pad : str\n Padding type for ``_smart_pad``.\n\n Returns\n -------\n x : array, shape (n_signals, n_times)\n x filtered.\n \"\"\"\n n_jobs = check_n_jobs(n_jobs, allow_cuda=True)\n # set up array for filtering, reshape to 2D, operate on last axis\n x, orig_shape, picks = _prep_for_filtering(x, copy, picks)\n # Extend the signal by mirroring the edges to reduce transient filter\n # response\n _check_zero_phase_length(len(h), phase)\n if len(h) == 1:\n return x * h ** 2 if phase == 'zero-double' else x * h\n n_edge = max(min(len(h), x.shape[1]) - 1, 0)\n logger.debug('Smart-padding with: %s samples on each edge' % n_edge)\n n_x = x.shape[1] + 2 * n_edge\n\n if phase == 'zero-double':\n h = np.convolve(h, h[::-1])\n\n # Determine FFT length to use\n min_fft = 2 * len(h) - 1\n if n_fft is None:\n max_fft = n_x\n if max_fft >= min_fft:\n # cost function based on number of multiplications\n N = 2 ** np.arange(np.ceil(np.log2(min_fft)),\n np.ceil(np.log2(max_fft)) + 1, dtype=int)\n cost = (np.ceil(n_x / (N - len(h) + 1).astype(np.float)) *\n N * (np.log2(N) + 1))\n\n # add a heuristic term to prevent too-long FFT's which are slow\n # (not predicted by mult. cost alone, 4e-5 exp. determined)\n cost += 4e-5 * N * n_x\n\n n_fft = N[np.argmin(cost)]\n else:\n # Use only a single block\n n_fft = next_fast_len(min_fft)\n logger.debug('FFT block length: %s' % n_fft)\n if n_fft < min_fft:\n raise ValueError('n_fft is too short, has to be at least '\n '2 * len(h) - 1 (%s), got %s' % (min_fft, n_fft))\n\n # Figure out if we should use CUDA\n n_jobs, cuda_dict = _setup_cuda_fft_multiply_repeated(\n n_jobs, h, n_fft)\n\n # Process each row separately\n picks = _picks_to_idx(len(x), picks)\n if n_jobs == 1:\n for p in picks:\n x[p] = _1d_overlap_filter(x[p], len(h), n_edge, phase,\n cuda_dict, pad, n_fft)\n else:\n parallel, p_fun, _ = parallel_func(_1d_overlap_filter, n_jobs)\n data_new = parallel(p_fun(x[p], len(h), n_edge, phase,\n cuda_dict, pad, n_fft) for p in picks)\n for pp, p in enumerate(picks):\n x[p] = data_new[pp]\n\n x.shape = orig_shape\n return x\n\n\ndef _1d_overlap_filter(x, n_h, n_edge, phase, cuda_dict, pad, n_fft):\n \"\"\"Do one-dimensional overlap-add FFT FIR filtering.\"\"\"\n # pad to reduce ringing\n x_ext = _smart_pad(x, (n_edge, n_edge), pad)\n n_x = len(x_ext)\n x_filtered = np.zeros_like(x_ext)\n\n n_seg = n_fft - n_h + 1\n n_segments = int(np.ceil(n_x / float(n_seg)))\n shift = ((n_h - 1) // 2 if phase.startswith('zero') else 0) + n_edge\n\n # Now the actual filtering step is identical for zero-phase (filtfilt-like)\n # or single-pass\n for seg_idx in range(n_segments):\n start = seg_idx * n_seg\n stop = (seg_idx + 1) * n_seg\n seg = x_ext[start:stop]\n seg = np.concatenate([seg, np.zeros(n_fft - len(seg))])\n\n prod = _fft_multiply_repeated(seg, cuda_dict)\n\n start_filt = max(0, start - shift)\n stop_filt = min(start - shift + n_fft, n_x)\n start_prod = max(0, shift - start)\n stop_prod = start_prod + stop_filt - start_filt\n x_filtered[start_filt:stop_filt] += prod[start_prod:stop_prod]\n\n # Remove mirrored edges that we added and cast (n_edge can be zero)\n x_filtered = x_filtered[:n_x - 2 * n_edge].astype(x.dtype)\n return x_filtered\n\n\ndef _filter_attenuation(h, freq, gain):\n \"\"\"Compute minimum attenuation at stop frequency.\"\"\"\n from scipy.signal import freqz\n _, filt_resp = freqz(h.ravel(), worN=np.pi * freq)\n filt_resp = np.abs(filt_resp) # use amplitude response\n filt_resp[np.where(gain == 1)] = 0\n idx = np.argmax(filt_resp)\n att_db = -20 * np.log10(np.maximum(filt_resp[idx], 1e-20))\n att_freq = freq[idx]\n return att_db, att_freq\n\n\ndef _prep_for_filtering(x, copy, picks=None):\n \"\"\"Set up array as 2D for filtering ease.\"\"\"\n x = _check_filterable(x)\n if copy is True:\n x = x.copy()\n orig_shape = x.shape\n x = np.atleast_2d(x)\n picks = _picks_to_idx(x.shape[-2], picks)\n x.shape = (np.prod(x.shape[:-1]), x.shape[-1])\n if len(orig_shape) == 3:\n n_epochs, n_channels, n_times = orig_shape\n offset = np.repeat(np.arange(0, n_channels * n_epochs, n_channels),\n len(picks))\n picks = np.tile(picks, n_epochs) + offset\n elif len(orig_shape) > 3:\n raise ValueError('picks argument is not supported for data with more'\n ' than three dimensions')\n assert all(0 <= pick < x.shape[0] for pick in picks) # guaranteed by above\n\n return x, orig_shape, picks\n\n\ndef _firwin_design(N, freq, gain, window, sfreq):\n \"\"\"Construct a FIR filter using firwin.\"\"\"\n from scipy.signal import firwin\n assert freq[0] == 0\n assert len(freq) > 1\n assert len(freq) == len(gain)\n h = np.zeros(N)\n prev_freq = freq[-1]\n prev_gain = gain[-1]\n if gain[-1] == 1:\n h[N // 2] = 1 # start with \"all up\"\n assert prev_gain in (0, 1)\n for this_freq, this_gain in zip(freq[::-1][1:], gain[::-1][1:]):\n assert this_gain in (0, 1)\n if this_gain != prev_gain:\n # Get the correct N to satistify the requested transition bandwidth\n transition = (prev_freq - this_freq) / 2.\n this_N = int(round(_length_factors[window] / transition))\n this_N += (1 - this_N % 2) # make it odd\n if this_N > N:\n raise ValueError('The requested filter length %s is too short '\n 'for the requested %0.2f Hz transition band, '\n 'which requires %s samples'\n % (N, transition * sfreq / 2., this_N))\n # Construct a lowpass\n this_h = firwin(this_N, (prev_freq + this_freq) / 2.,\n window=window, pass_zero=True, nyq=freq[-1])\n offset = (N - this_N) // 2\n if this_gain == 0:\n h[offset:N - offset] -= this_h\n else:\n h[offset:N - offset] += this_h\n prev_gain = this_gain\n prev_freq = this_freq\n return h\n\n\ndef _construct_fir_filter(sfreq, freq, gain, filter_length, phase, fir_window,\n fir_design):\n \"\"\"Filter signal using gain control points in the frequency domain.\n\n The filter impulse response is constructed from a Hann window (window\n used in \"firwin2\" function) to avoid ripples in the frequency response\n (windowing is a smoothing in frequency domain).\n\n If x is multi-dimensional, this operates along the last dimension.\n\n Parameters\n ----------\n sfreq : float\n Sampling rate in Hz.\n freq : 1d array\n Frequency sampling points in Hz.\n gain : 1d array\n Filter gain at frequency sampling points.\n Must be all 0 and 1 for fir_design==\"firwin\".\n filter_length : int\n Length of the filter to use. Must be odd length if phase == \"zero\".\n phase : str\n If 'zero', the delay for the filter is compensated (and it must be\n an odd-length symmetric filter). If 'linear', the response is\n uncompensated. If 'zero-double', the filter is applied in the\n forward and reverse directions. If 'minimum', a minimum-phase\n filter will be used.\n fir_window : str\n The window to use in FIR design, can be \"hamming\" (default),\n \"hann\", or \"blackman\".\n fir_design : str\n Can be \"firwin2\" or \"firwin\".\n\n Returns\n -------\n h : array\n Filter coefficients.\n \"\"\"\n assert freq[0] == 0\n if fir_design == 'firwin2':\n from scipy.signal import firwin2 as fir_design\n else:\n assert fir_design == 'firwin'\n fir_design = partial(_firwin_design, sfreq=sfreq)\n from scipy.signal import minimum_phase\n\n # issue a warning if attenuation is less than this\n min_att_db = 12 if phase == 'minimum' else 20\n\n # normalize frequencies\n freq = np.array(freq) / (sfreq / 2.)\n if freq[0] != 0 or freq[-1] != 1:\n raise ValueError('freq must start at 0 and end an Nyquist (%s), got %s'\n % (sfreq / 2., freq))\n gain = np.array(gain)\n\n # Use overlap-add filter with a fixed length\n N = _check_zero_phase_length(filter_length, phase, gain[-1])\n # construct symmetric (linear phase) filter\n if phase == 'minimum':\n h = fir_design(N * 2 - 1, freq, gain, window=fir_window)\n h = minimum_phase(h)\n else:\n h = fir_design(N, freq, gain, window=fir_window)\n assert h.size == N\n att_db, att_freq = _filter_attenuation(h, freq, gain)\n if phase == 'zero-double':\n att_db += 6\n if att_db < min_att_db:\n att_freq *= sfreq / 2.\n warn('Attenuation at stop frequency %0.2f Hz is only %0.2f dB. '\n 'Increase filter_length for higher attenuation.'\n % (att_freq, att_db))\n return h\n\n\ndef _check_zero_phase_length(N, phase, gain_nyq=0):\n N = int(N)\n if N % 2 == 0:\n if phase == 'zero':\n raise RuntimeError('filter_length must be odd if phase=\"zero\", '\n 'got %s' % N)\n elif phase == 'zero-double' and gain_nyq == 1:\n N += 1\n return N\n\n\ndef _check_coefficients(system):\n \"\"\"Check for filter stability.\"\"\"\n if isinstance(system, tuple):\n from scipy.signal import tf2zpk\n z, p, k = tf2zpk(*system)\n else: # sos\n from scipy.signal import sos2zpk\n z, p, k = sos2zpk(system)\n if np.any(np.abs(p) > 1.0):\n raise RuntimeError('Filter poles outside unit circle, filter will be '\n 'unstable. Consider using different filter '\n 'coefficients.')\n\n\ndef _filtfilt(x, iir_params, picks, n_jobs, copy):\n \"\"\"Call filtfilt.\"\"\"\n # set up array for filtering, reshape to 2D, operate on last axis\n from scipy.signal import filtfilt, sosfiltfilt\n padlen = min(iir_params['padlen'], x.shape[-1] - 1)\n n_jobs = check_n_jobs(n_jobs)\n x, orig_shape, picks = _prep_for_filtering(x, copy, picks)\n if 'sos' in iir_params:\n fun = partial(sosfiltfilt, sos=iir_params['sos'], padlen=padlen,\n axis=-1)\n _check_coefficients(iir_params['sos'])\n else:\n fun = partial(filtfilt, b=iir_params['b'], a=iir_params['a'],\n padlen=padlen, axis=-1)\n _check_coefficients((iir_params['b'], iir_params['a']))\n if n_jobs == 1:\n for p in picks:\n x[p] = fun(x=x[p])\n else:\n parallel, p_fun, _ = parallel_func(fun, n_jobs)\n data_new = parallel(p_fun(x=x[p]) for p in picks)\n for pp, p in enumerate(picks):\n x[p] = data_new[pp]\n x.shape = orig_shape\n return x\n\n\ndef estimate_ringing_samples(system, max_try=100000):\n \"\"\"Estimate filter ringing.\n\n Parameters\n ----------\n system : tuple | ndarray\n A tuple of (b, a) or ndarray of second-order sections coefficients.\n max_try : int\n Approximate maximum number of samples to try.\n This will be changed to a multiple of 1000.\n\n Returns\n -------\n n : int\n The approximate ringing.\n \"\"\"\n from scipy import signal\n if isinstance(system, tuple): # TF\n kind = 'ba'\n b, a = system\n zi = [0.] * (len(a) - 1)\n else:\n kind = 'sos'\n sos = system\n zi = [[0.] * 2] * len(sos)\n n_per_chunk = 1000\n n_chunks_max = int(np.ceil(max_try / float(n_per_chunk)))\n x = np.zeros(n_per_chunk)\n x[0] = 1\n last_good = n_per_chunk\n thresh_val = 0\n for ii in range(n_chunks_max):\n if kind == 'ba':\n h, zi = signal.lfilter(b, a, x, zi=zi)\n else:\n h, zi = signal.sosfilt(sos, x, zi=zi)\n x[0] = 0 # for subsequent iterations we want zero input\n h = np.abs(h)\n thresh_val = max(0.001 * np.max(h), thresh_val)\n idx = np.where(np.abs(h) > thresh_val)[0]\n if len(idx) > 0:\n last_good = idx[-1]\n else: # this iteration had no sufficiently lange values\n idx = (ii - 1) * n_per_chunk + last_good\n break\n else:\n warn('Could not properly estimate ringing for the filter')\n idx = n_per_chunk * n_chunks_max\n return idx\n\n\n_ftype_dict = {\n 'butter': 'Butterworth',\n 'cheby1': 'Chebyshev I',\n 'cheby2': 'Chebyshev II',\n 'ellip': 'Cauer/elliptic',\n 'bessel': 'Bessel/Thomson',\n}\n\n\n@verbose\ndef construct_iir_filter(iir_params, f_pass=None, f_stop=None, sfreq=None,\n btype=None, return_copy=True, verbose=None):\n \"\"\"Use IIR parameters to get filtering coefficients.\n\n This function works like a wrapper for iirdesign and iirfilter in\n scipy.signal to make filter coefficients for IIR filtering. It also\n estimates the number of padding samples based on the filter ringing.\n It creates a new iir_params dict (or updates the one passed to the\n function) with the filter coefficients ('b' and 'a') and an estimate\n of the padding necessary ('padlen') so IIR filtering can be performed.\n\n Parameters\n ----------\n iir_params : dict\n Dictionary of parameters to use for IIR filtering.\n\n * If ``iir_params['sos']`` exists, it will be used as\n second-order sections to perform IIR filtering.\n\n .. versionadded:: 0.13\n\n * Otherwise, if ``iir_params['b']`` and ``iir_params['a']``\n exist, these will be used as coefficients to perform IIR\n filtering.\n * Otherwise, if ``iir_params['order']`` and\n ``iir_params['ftype']`` exist, these will be used with\n `scipy.signal.iirfilter` to make a filter.\n You should also supply ``iir_params['rs']`` and\n ``iir_params['rp']`` if using elliptic or Chebychev filters.\n * Otherwise, if ``iir_params['gpass']`` and\n ``iir_params['gstop']`` exist, these will be used with\n `scipy.signal.iirdesign` to design a filter.\n * ``iir_params['padlen']`` defines the number of samples to pad\n (and an estimate will be calculated if it is not given).\n See Notes for more details.\n * ``iir_params['output']`` defines the system output kind when\n designing filters, either \"sos\" or \"ba\". For 0.13 the\n default is 'ba' but will change to 'sos' in 0.14.\n\n f_pass : float or list of float\n Frequency for the pass-band. Low-pass and high-pass filters should\n be a float, band-pass should be a 2-element list of float.\n f_stop : float or list of float\n Stop-band frequency (same size as f_pass). Not used if 'order' is\n specified in iir_params.\n sfreq : float | None\n The sample rate.\n btype : str\n Type of filter. Should be 'lowpass', 'highpass', or 'bandpass'\n (or analogous string representations known to\n :func:`scipy.signal.iirfilter`).\n return_copy : bool\n If False, the 'sos', 'b', 'a', and 'padlen' entries in\n ``iir_params`` will be set inplace (if they weren't already).\n Otherwise, a new ``iir_params`` instance will be created and\n returned with these entries.\n %(verbose)s\n\n Returns\n -------\n iir_params : dict\n Updated iir_params dict, with the entries (set only if they didn't\n exist before) for 'sos' (or 'b', 'a'), and 'padlen' for\n IIR filtering.\n\n See Also\n --------\n mne.filter.filter_data\n mne.io.Raw.filter\n\n Notes\n -----\n This function triages calls to :func:`scipy.signal.iirfilter` and\n :func:`scipy.signal.iirdesign` based on the input arguments (see\n linked functions for more details).\n\n .. versionchanged:: 0.14\n Second-order sections are used in filter design by default (replacing\n ``output='ba'`` by ``output='sos'``) to help ensure filter stability\n and reduce numerical error.\n\n Examples\n --------\n iir_params can have several forms. Consider constructing a low-pass\n filter at 40 Hz with 1000 Hz sampling rate.\n\n In the most basic (2-parameter) form of iir_params, the order of the\n filter 'N' and the type of filtering 'ftype' are specified. To get\n coefficients for a 4th-order Butterworth filter, this would be:\n\n >>> iir_params = dict(order=4, ftype='butter', output='sos') # doctest:+SKIP\n >>> iir_params = construct_iir_filter(iir_params, 40, None, 1000, 'low', return_copy=False) # doctest:+SKIP\n >>> print((2 * len(iir_params['sos']), iir_params['padlen'])) # doctest:+SKIP\n (4, 82)\n\n Filters can also be constructed using filter design methods. To get a\n 40 Hz Chebyshev type 1 lowpass with specific gain characteristics in the\n pass and stop bands (assuming the desired stop band is at 45 Hz), this\n would be a filter with much longer ringing:\n\n >>> iir_params = dict(ftype='cheby1', gpass=3, gstop=20, output='sos') # doctest:+SKIP\n >>> iir_params = construct_iir_filter(iir_params, 40, 50, 1000, 'low') # doctest:+SKIP\n >>> print((2 * len(iir_params['sos']), iir_params['padlen'])) # doctest:+SKIP\n (6, 439)\n\n Padding and/or filter coefficients can also be manually specified. For\n a 10-sample moving window with no padding during filtering, for example,\n one can just do:\n\n >>> iir_params = dict(b=np.ones((10)), a=[1, 0], padlen=0)\n >>> iir_params = construct_iir_filter(iir_params, return_copy=False)\n >>> print((iir_params['b'], iir_params['a'], iir_params['padlen'])) # doctest:+SKIP\n (array([1., 1., 1., 1., 1., 1., 1., 1., 1., 1.]), [1, 0], 0)\n\n For more information, see the tutorials\n :ref:`disc-filtering` and :ref:`tut-filter-resample`.\n \"\"\" # noqa: E501\n from scipy.signal import iirfilter, iirdesign, freqz, sosfreqz\n known_filters = ('bessel', 'butter', 'butterworth', 'cauer', 'cheby1',\n 'cheby2', 'chebyshev1', 'chebyshev2', 'chebyshevi',\n 'chebyshevii', 'ellip', 'elliptic')\n if not isinstance(iir_params, dict):\n raise TypeError('iir_params must be a dict, got %s' % type(iir_params))\n # if the filter has been designed, we're good to go\n Wp = None\n if 'sos' in iir_params:\n system = iir_params['sos']\n output = 'sos'\n elif 'a' in iir_params and 'b' in iir_params:\n system = (iir_params['b'], iir_params['a'])\n output = 'ba'\n else:\n output = iir_params.get('output', 'sos')\n _check_option('output', output, ('ba', 'sos'))\n # ensure we have a valid ftype\n if 'ftype' not in iir_params:\n raise RuntimeError('ftype must be an entry in iir_params if ''b'' '\n 'and ''a'' are not specified')\n ftype = iir_params['ftype']\n if ftype not in known_filters:\n raise RuntimeError('ftype must be in filter_dict from '\n 'scipy.signal (e.g., butter, cheby1, etc.) not '\n '%s' % ftype)\n\n # use order-based design\n f_pass = np.atleast_1d(f_pass)\n if f_pass.ndim > 1:\n raise ValueError('frequencies must be 1D, got %dD' % f_pass.ndim)\n edge_freqs = ', '.join('%0.2f' % (f,) for f in f_pass)\n Wp = f_pass / (float(sfreq) / 2)\n # IT will de designed\n ftype_nice = _ftype_dict.get(ftype, ftype)\n logger.info('')\n logger.info('IIR filter parameters')\n logger.info('---------------------')\n logger.info('%s %s zero-phase (two-pass forward and reverse) '\n 'non-causal filter:' % (ftype_nice, btype))\n # SciPy designs for -3dB but we do forward-backward, so this is -6dB\n if 'order' in iir_params:\n kwargs = dict(N=iir_params['order'], Wn=Wp, btype=btype,\n ftype=ftype, output=output)\n for key in ('rp', 'rs'):\n if key in iir_params:\n kwargs[key] = iir_params[key]\n system = iirfilter(**kwargs)\n logger.info('- Filter order %d (effective, after forward-backward)'\n % (2 * iir_params['order'] * len(Wp),))\n else:\n # use gpass / gstop design\n Ws = np.asanyarray(f_stop) / (float(sfreq) / 2)\n if 'gpass' not in iir_params or 'gstop' not in iir_params:\n raise ValueError('iir_params must have at least ''gstop'' and'\n ' ''gpass'' (or ''N'') entries')\n system = iirdesign(Wp, Ws, iir_params['gpass'],\n iir_params['gstop'], ftype=ftype, output=output)\n\n if system is None:\n raise RuntimeError('coefficients could not be created from iir_params')\n # do some sanity checks\n _check_coefficients(system)\n\n # get the gains at the cutoff frequencies\n if Wp is not None:\n if output == 'sos':\n cutoffs = sosfreqz(system, worN=Wp * np.pi)[1]\n else:\n cutoffs = freqz(system[0], system[1], worN=Wp * np.pi)[1]\n # 2 * 20 here because we do forward-backward filtering\n cutoffs = 40 * np.log10(np.abs(cutoffs))\n cutoffs = ', '.join(['%0.2f' % (c,) for c in cutoffs])\n logger.info('- Cutoff%s at %s Hz: %s dB'\n % (_pl(f_pass), edge_freqs, cutoffs))\n # now deal with padding\n if 'padlen' not in iir_params:\n padlen = estimate_ringing_samples(system)\n else:\n padlen = iir_params['padlen']\n\n if return_copy:\n iir_params = deepcopy(iir_params)\n\n iir_params.update(dict(padlen=padlen))\n if output == 'sos':\n iir_params.update(sos=system)\n else:\n iir_params.update(b=system[0], a=system[1])\n logger.info('')\n return iir_params\n\n\ndef _check_method(method, iir_params, extra_types=()):\n \"\"\"Parse method arguments.\"\"\"\n allowed_types = ['iir', 'fir', 'fft'] + list(extra_types)\n _validate_type(method, 'str', 'method')\n _check_option('method', method, allowed_types)\n if method == 'fft':\n method = 'fir' # use the better name\n if method == 'iir':\n if iir_params is None:\n iir_params = dict()\n if len(iir_params) == 0 or (len(iir_params) == 1 and\n 'output' in iir_params):\n iir_params = dict(order=4, ftype='butter',\n output=iir_params.get('output', 'sos'))\n elif iir_params is not None:\n raise ValueError('iir_params must be None if method != \"iir\"')\n return iir_params, method\n\n\n@verbose\ndef filter_data(data, sfreq, l_freq, h_freq, picks=None, filter_length='auto',\n l_trans_bandwidth='auto', h_trans_bandwidth='auto', n_jobs=1,\n method='fir', iir_params=None, copy=True, phase='zero',\n fir_window='hamming', fir_design='firwin',\n pad='reflect_limited', verbose=None):\n \"\"\"Filter a subset of channels.\n\n Parameters\n ----------\n data : ndarray, shape (..., n_times)\n The data to filter.\n sfreq : float\n The sample frequency in Hz.\n %(l_freq)s\n %(h_freq)s\n %(picks_nostr)s\n Currently this is only supported for 2D (n_channels, n_times) and\n 3D (n_epochs, n_channels, n_times) arrays.\n %(filter_length)s\n %(l_trans_bandwidth)s\n %(h_trans_bandwidth)s\n %(n_jobs-fir)s\n %(method-fir)s\n %(iir_params)s\n copy : bool\n If True, a copy of x, filtered, is returned. Otherwise, it operates\n on x in place.\n %(phase)s\n %(fir_window)s\n %(fir_design)s\n %(pad-fir)s\n The default is ``'reflect_limited'``.\n\n .. versionadded:: 0.15\n %(verbose)s\n\n Returns\n -------\n data : ndarray, shape (..., n_times)\n The filtered data.\n\n See Also\n --------\n construct_iir_filter\n create_filter\n mne.io.Raw.filter\n notch_filter\n resample\n\n Notes\n -----\n Applies a zero-phase low-pass, high-pass, band-pass, or band-stop\n filter to the channels selected by ``picks``.\n\n ``l_freq`` and ``h_freq`` are the frequencies below which and above\n which, respectively, to filter out of the data. Thus the uses are:\n\n * ``l_freq < h_freq``: band-pass filter\n * ``l_freq > h_freq``: band-stop filter\n * ``l_freq is not None and h_freq is None``: high-pass filter\n * ``l_freq is None and h_freq is not None``: low-pass filter\n\n .. note:: If n_jobs > 1, more memory is required as\n ``len(picks) * n_times`` additional time points need to\n be temporaily stored in memory.\n\n For more information, see the tutorials\n :ref:`disc-filtering` and :ref:`tut-filter-resample` and\n :func:`mne.filter.create_filter`.\n \"\"\"\n data = _check_filterable(data)\n iir_params, method = _check_method(method, iir_params)\n filt = create_filter(\n data, sfreq, l_freq, h_freq, filter_length, l_trans_bandwidth,\n h_trans_bandwidth, method, iir_params, phase, fir_window, fir_design)\n if method in ('fir', 'fft'):\n data = _overlap_add_filter(data, filt, None, phase, picks, n_jobs,\n copy, pad)\n else:\n data = _filtfilt(data, filt, picks, n_jobs, copy)\n return data\n\n\n@verbose\ndef create_filter(data, sfreq, l_freq, h_freq, filter_length='auto',\n l_trans_bandwidth='auto', h_trans_bandwidth='auto',\n method='fir', iir_params=None, phase='zero',\n fir_window='hamming', fir_design='firwin', verbose=None):\n r\"\"\"Create a FIR or IIR filter.\n\n ``l_freq`` and ``h_freq`` are the frequencies below which and above\n which, respectively, to filter out of the data. Thus the uses are:\n\n * ``l_freq < h_freq``: band-pass filter\n * ``l_freq > h_freq``: band-stop filter\n * ``l_freq is not None and h_freq is None``: high-pass filter\n * ``l_freq is None and h_freq is not None``: low-pass filter\n\n Parameters\n ----------\n data : ndarray, shape (..., n_times) | None\n The data that will be filtered. This is used for sanity checking\n only. If None, no sanity checking related to the length of the signal\n relative to the filter order will be performed.\n sfreq : float\n The sample frequency in Hz.\n %(l_freq)s\n %(h_freq)s\n %(filter_length)s\n %(l_trans_bandwidth)s\n %(h_trans_bandwidth)s\n %(method-fir)s\n %(iir_params)s\n %(phase)s\n %(fir_window)s\n %(fir_design)s\n %(verbose)s\n\n Returns\n -------\n filt : array or dict\n Will be an array of FIR coefficients for method='fir', and dict\n with IIR parameters for method='iir'.\n\n See Also\n --------\n filter_data\n\n Notes\n -----\n .. note:: For FIR filters, the *cutoff frequency*, i.e. the -6 dB point,\n is in the middle of the transition band (when using phase='zero'\n and fir_design='firwin'). For IIR filters, the cutoff frequency\n is given by ``l_freq`` or ``h_freq`` directly, and\n ``l_trans_bandwidth`` and ``h_trans_bandwidth`` are ignored.\n\n **Band-pass filter**\n\n The frequency response is (approximately) given by::\n\n 1-| ----------\n | /| | \\\n |H| | / | | \\\n | / | | \\\n | / | | \\\n 0-|---------- | | --------------\n | | | | | |\n 0 Fs1 Fp1 Fp2 Fs2 Nyq\n\n Where:\n\n * Fs1 = Fp1 - l_trans_bandwidth in Hz\n * Fs2 = Fp2 + h_trans_bandwidth in Hz\n\n **Band-stop filter**\n\n The frequency response is (approximately) given by::\n\n 1-|--------- ----------\n | \\ /\n |H| | \\ /\n | \\ /\n | \\ /\n 0-| -----------\n | | | | | |\n 0 Fp1 Fs1 Fs2 Fp2 Nyq\n\n Where ``Fs1 = Fp1 + l_trans_bandwidth`` and\n ``Fs2 = Fp2 - h_trans_bandwidth``.\n\n Multiple stop bands can be specified using arrays.\n\n **Low-pass filter**\n\n The frequency response is (approximately) given by::\n\n 1-|------------------------\n | \\\n |H| | \\\n | \\\n | \\\n 0-| ----------------\n | | | |\n 0 Fp Fstop Nyq\n\n Where ``Fstop = Fp + trans_bandwidth``.\n\n **High-pass filter**\n\n The frequency response is (approximately) given by::\n\n 1-| -----------------------\n | /\n |H| | /\n | /\n | /\n 0-|---------\n | | | |\n 0 Fstop Fp Nyq\n\n Where ``Fstop = Fp - trans_bandwidth``.\n\n .. versionadded:: 0.14\n \"\"\"\n sfreq = float(sfreq)\n if sfreq < 0:\n raise ValueError('sfreq must be positive')\n # If no data specified, sanity checking will be skipped\n if data is None:\n logger.info('No data specified. Sanity checks related to the length of'\n ' the signal relative to the filter order will be'\n ' skipped.')\n if h_freq is not None:\n h_freq = np.array(h_freq, float).ravel()\n if (h_freq > (sfreq / 2.)).any():\n raise ValueError('h_freq (%s) must be less than the Nyquist '\n 'frequency %s' % (h_freq, sfreq / 2.))\n if l_freq is not None:\n l_freq = np.array(l_freq, float).ravel()\n if (l_freq == 0).all():\n l_freq = None\n iir_params, method = _check_method(method, iir_params)\n if l_freq is None and h_freq is None:\n data, sfreq, _, _, _, _, filter_length, phase, fir_window, \\\n fir_design = _triage_filter_params(\n data, sfreq, None, None, None, None,\n filter_length, method, phase, fir_window, fir_design)\n if method == 'iir':\n out = dict() if iir_params is None else deepcopy(iir_params)\n out.update(b=np.array([1.]), a=np.array([1.]))\n else:\n freq = [0, sfreq / 2.]\n gain = [1., 1.]\n if l_freq is None and h_freq is not None:\n logger.info('Setting up low-pass filter at %0.2g Hz' % (h_freq,))\n data, sfreq, _, f_p, _, f_s, filter_length, phase, fir_window, \\\n fir_design = _triage_filter_params(\n data, sfreq, None, h_freq, None, h_trans_bandwidth,\n filter_length, method, phase, fir_window, fir_design)\n if method == 'iir':\n out = construct_iir_filter(iir_params, f_p, f_s, sfreq, 'lowpass')\n else: # 'fir'\n freq = [0, f_p, f_s]\n gain = [1, 1, 0]\n if f_s != sfreq / 2.:\n freq += [sfreq / 2.]\n gain += [0]\n elif l_freq is not None and h_freq is None:\n logger.info('Setting up high-pass filter at %0.2g Hz' % (l_freq,))\n data, sfreq, pass_, _, stop, _, filter_length, phase, fir_window, \\\n fir_design = _triage_filter_params(\n data, sfreq, l_freq, None, l_trans_bandwidth, None,\n filter_length, method, phase, fir_window, fir_design)\n if method == 'iir':\n out = construct_iir_filter(iir_params, pass_, stop, sfreq,\n 'highpass')\n else: # 'fir'\n freq = [stop, pass_, sfreq / 2.]\n gain = [0, 1, 1]\n if stop != 0:\n freq = [0] + freq\n gain = [0] + gain\n elif l_freq is not None and h_freq is not None:\n if (l_freq < h_freq).any():\n logger.info('Setting up band-pass filter from %0.2g - %0.2g Hz'\n % (l_freq, h_freq))\n data, sfreq, f_p1, f_p2, f_s1, f_s2, filter_length, phase, \\\n fir_window, fir_design = _triage_filter_params(\n data, sfreq, l_freq, h_freq, l_trans_bandwidth,\n h_trans_bandwidth, filter_length, method, phase,\n fir_window, fir_design)\n if method == 'iir':\n out = construct_iir_filter(iir_params, [f_p1, f_p2],\n [f_s1, f_s2], sfreq, 'bandpass')\n else: # 'fir'\n freq = [f_s1, f_p1, f_p2, f_s2]\n gain = [0, 1, 1, 0]\n if f_s2 != sfreq / 2.:\n freq += [sfreq / 2.]\n gain += [0]\n if f_s1 != 0:\n freq = [0] + freq\n gain = [0] + gain\n else:\n # This could possibly be removed after 0.14 release, but might\n # as well leave it in to sanity check notch_filter\n if len(l_freq) != len(h_freq):\n raise ValueError('l_freq and h_freq must be the same length')\n msg = 'Setting up band-stop filter'\n if len(l_freq) == 1:\n msg += ' from %0.2g - %0.2g Hz' % (h_freq, l_freq)\n logger.info(msg)\n # Note: order of outputs is intentionally switched here!\n data, sfreq, f_s1, f_s2, f_p1, f_p2, filter_length, phase, \\\n fir_window, fir_design = _triage_filter_params(\n data, sfreq, h_freq, l_freq, h_trans_bandwidth,\n l_trans_bandwidth, filter_length, method, phase,\n fir_window, fir_design, bands='arr', reverse=True)\n if method == 'iir':\n if len(f_p1) != 1:\n raise ValueError('Multiple stop-bands can only be used '\n 'with FIR filtering')\n out = construct_iir_filter(iir_params, [f_p1[0], f_p2[0]],\n [f_s1[0], f_s2[0]], sfreq,\n 'bandstop')\n else: # 'fir'\n freq = np.r_[f_p1, f_s1, f_s2, f_p2]\n gain = np.r_[np.ones_like(f_p1), np.zeros_like(f_s1),\n np.zeros_like(f_s2), np.ones_like(f_p2)]\n order = np.argsort(freq)\n freq = freq[order]\n gain = gain[order]\n if freq[0] != 0:\n freq = np.r_[[0.], freq]\n gain = np.r_[[1.], gain]\n if freq[-1] != sfreq / 2.:\n freq = np.r_[freq, [sfreq / 2.]]\n gain = np.r_[gain, [1.]]\n if np.any(np.abs(np.diff(gain, 2)) > 1):\n raise ValueError('Stop bands are not sufficiently '\n 'separated.')\n if method == 'fir':\n out = _construct_fir_filter(sfreq, freq, gain, filter_length, phase,\n fir_window, fir_design)\n return out\n\n\n@verbose\ndef notch_filter(x, Fs, freqs, filter_length='auto', notch_widths=None,\n trans_bandwidth=1, method='fir', iir_params=None,\n mt_bandwidth=None, p_value=0.05, picks=None, n_jobs=1,\n copy=True, phase='zero', fir_window='hamming',\n fir_design='firwin', pad='reflect_limited', verbose=None):\n r\"\"\"Notch filter for the signal x.\n\n Applies a zero-phase notch filter to the signal x, operating on the last\n dimension.\n\n Parameters\n ----------\n x : array\n Signal to filter.\n Fs : float\n Sampling rate in Hz.\n freqs : float | array of float | None\n Frequencies to notch filter in Hz, e.g. np.arange(60, 241, 60).\n None can only be used with the mode 'spectrum_fit', where an F\n test is used to find sinusoidal components.\n %(filter_length)s\n notch_widths : float | array of float | None\n Width of the stop band (centred at each freq in freqs) in Hz.\n If None, freqs / 200 is used.\n trans_bandwidth : float\n Width of the transition band in Hz.\n Only used for ``method='fir'``.\n %(method-fir)s\n 'spectrum_fit' will use multi-taper estimation of sinusoidal\n components. If freqs=None and method='spectrum_fit', significant\n sinusoidal components are detected using an F test, and noted by\n logging.\n %(iir_params)s\n mt_bandwidth : float | None\n The bandwidth of the multitaper windowing function in Hz.\n Only used in 'spectrum_fit' mode.\n p_value : float\n P-value to use in F-test thresholding to determine significant\n sinusoidal components to remove when method='spectrum_fit' and\n freqs=None. Note that this will be Bonferroni corrected for the\n number of frequencies, so large p-values may be justified.\n %(picks_nostr)s\n Only supported for 2D (n_channels, n_times) and 3D\n (n_epochs, n_channels, n_times) data.\n %(n_jobs-fir)s\n copy : bool\n If True, a copy of x, filtered, is returned. Otherwise, it operates\n on x in place.\n %(phase)s\n %(fir_window)s\n %(fir_design)s\n %(pad-fir)s\n %(verbose)s\n\n Returns\n -------\n xf : array\n The x array filtered.\n\n See Also\n --------\n filter_data\n resample\n\n Notes\n -----\n The frequency response is (approximately) given by::\n\n 1-|---------- -----------\n | \\ /\n |H| | \\ /\n | \\ /\n | \\ /\n 0-| -\n | | | | |\n 0 Fp1 freq Fp2 Nyq\n\n For each freq in freqs, where ``Fp1 = freq - trans_bandwidth / 2`` and\n ``Fs2 = freq + trans_bandwidth / 2``.\n\n References\n ----------\n Multi-taper removal is inspired by code from the Chronux toolbox, see\n www.chronux.org and the book \"Observed Brain Dynamics\" by Partha Mitra\n & Hemant Bokil, Oxford University Press, New York, 2008. Please\n cite this in publications if method 'spectrum_fit' is used.\n \"\"\"\n iir_params, method = _check_method(method, iir_params, ['spectrum_fit'])\n\n if freqs is not None:\n freqs = np.atleast_1d(freqs)\n elif method != 'spectrum_fit':\n raise ValueError('freqs=None can only be used with method '\n 'spectrum_fit')\n\n # Only have to deal with notch_widths for non-autodetect\n if freqs is not None:\n if notch_widths is None:\n notch_widths = freqs / 200.0\n elif np.any(notch_widths < 0):\n raise ValueError('notch_widths must be >= 0')\n else:\n notch_widths = np.atleast_1d(notch_widths)\n if len(notch_widths) == 1:\n notch_widths = notch_widths[0] * np.ones_like(freqs)\n elif len(notch_widths) != len(freqs):\n raise ValueError('notch_widths must be None, scalar, or the '\n 'same length as freqs')\n\n if method in ('fir', 'iir'):\n # Speed this up by computing the fourier coefficients once\n tb_2 = trans_bandwidth / 2.0\n lows = [freq - nw / 2.0 - tb_2\n for freq, nw in zip(freqs, notch_widths)]\n highs = [freq + nw / 2.0 + tb_2\n for freq, nw in zip(freqs, notch_widths)]\n xf = filter_data(x, Fs, highs, lows, picks, filter_length, tb_2, tb_2,\n n_jobs, method, iir_params, copy, phase, fir_window,\n fir_design, pad=pad)\n elif method == 'spectrum_fit':\n xf = _mt_spectrum_proc(x, Fs, freqs, notch_widths, mt_bandwidth,\n p_value, picks, n_jobs, copy)\n\n return xf\n\n\ndef _mt_spectrum_proc(x, sfreq, line_freqs, notch_widths, mt_bandwidth,\n p_value, picks, n_jobs, copy):\n \"\"\"Call _mt_spectrum_remove.\"\"\"\n from scipy import stats\n # set up array for filtering, reshape to 2D, operate on last axis\n n_jobs = check_n_jobs(n_jobs)\n x, orig_shape, picks = _prep_for_filtering(x, copy, picks)\n\n # XXX need to implement the moving window version for raw files\n n_times = x.shape[1]\n\n # max taper size chosen because it has an max error < 1e-3:\n # >>> np.max(np.diff(dpss_windows(953, 4, 100)[0]))\n # 0.00099972447657578449\n # so we use 1000 because it's the first \"nice\" number bigger than 953.\n # but if we have a new enough scipy,\n # it's only ~0.175 sec for 8 tapers even with 100000 samples\n dpss_n_times_max = 100000 if check_version('scipy', '1.1') else 1000\n\n # figure out what tapers to use\n window_fun, eigvals, _ = _compute_mt_params(\n n_times, sfreq, mt_bandwidth, False, False,\n interp_from=min(n_times, dpss_n_times_max), verbose=False)\n\n # F-stat of 1-p point\n threshold = stats.f.ppf(1 - p_value / n_times, 2, 2 * len(window_fun) - 2)\n\n if n_jobs == 1:\n freq_list = list()\n for ii, x_ in enumerate(x):\n if ii in picks:\n x[ii], f = _mt_spectrum_remove(x_, sfreq, line_freqs,\n notch_widths, window_fun,\n threshold)\n freq_list.append(f)\n else:\n parallel, p_fun, _ = parallel_func(_mt_spectrum_remove, n_jobs)\n data_new = parallel(p_fun(x_, sfreq, line_freqs, notch_widths,\n window_fun, threshold)\n for xi, x_ in enumerate(x)\n if xi in picks)\n freq_list = [d[1] for d in data_new]\n data_new = np.array([d[0] for d in data_new])\n x[picks, :] = data_new\n\n # report found frequencies\n for rm_freqs in freq_list:\n if line_freqs is None:\n if len(rm_freqs) > 0:\n logger.info('Detected notch frequencies:\\n%s'\n % ', '.join([str(rm_f) for rm_f in rm_freqs]))\n else:\n logger.info('Detected notch frequecies:\\nNone')\n\n x.shape = orig_shape\n return x\n\n\ndef _mt_spectrum_remove(x, sfreq, line_freqs, notch_widths,\n window_fun, threshold):\n \"\"\"Use MT-spectrum to remove line frequencies.\n\n Based on Chronux. If line_freqs is specified, all freqs within notch_width\n of each line_freq is set to zero.\n \"\"\"\n # drop the even tapers\n n_tapers = len(window_fun)\n tapers_odd = np.arange(0, n_tapers, 2)\n tapers_even = np.arange(1, n_tapers, 2)\n tapers_use = window_fun[tapers_odd]\n\n # sum tapers for (used) odd prolates across time (n_tapers, 1)\n H0 = np.sum(tapers_use, axis=1)\n\n # sum of squares across tapers (1, )\n H0_sq = sum_squared(H0)\n\n # make \"time\" vector\n rads = 2 * np.pi * (np.arange(x.size) / float(sfreq))\n\n # compute mt_spectrum (returning n_ch, n_tapers, n_freq)\n x_p, freqs = _mt_spectra(x[np.newaxis, :], window_fun, sfreq)\n\n # sum of the product of x_p and H0 across tapers (1, n_freqs)\n x_p_H0 = np.sum(x_p[:, tapers_odd, :] *\n H0[np.newaxis, :, np.newaxis], axis=1)\n\n # resulting calculated amplitudes for all freqs\n A = x_p_H0 / H0_sq\n\n if line_freqs is None:\n # figure out which freqs to remove using F stat\n\n # estimated coefficient\n x_hat = A * H0[:, np.newaxis]\n\n # numerator for F-statistic\n num = (n_tapers - 1) * (A * A.conj()).real * H0_sq\n # denominator for F-statistic\n den = (np.sum(np.abs(x_p[:, tapers_odd, :] - x_hat) ** 2, 1) +\n np.sum(np.abs(x_p[:, tapers_even, :]) ** 2, 1))\n den[den == 0] = np.inf\n f_stat = num / den\n\n # find frequencies to remove\n indices = np.where(f_stat > threshold)[1]\n rm_freqs = freqs[indices]\n else:\n # specify frequencies\n indices_1 = np.unique([np.argmin(np.abs(freqs - lf))\n for lf in line_freqs])\n notch_widths /= 2.0\n indices_2 = [np.logical_and(freqs > lf - nw, freqs < lf + nw)\n for lf, nw in zip(line_freqs, notch_widths)]\n indices_2 = np.where(np.any(np.array(indices_2), axis=0))[0]\n indices = np.unique(np.r_[indices_1, indices_2])\n rm_freqs = freqs[indices]\n\n fits = list()\n for ind in indices:\n c = 2 * A[0, ind]\n fit = np.abs(c) * np.cos(freqs[ind] * rads + np.angle(c))\n fits.append(fit)\n\n if len(fits) == 0:\n datafit = 0.0\n else:\n # fitted sinusoids are summed, and subtracted from data\n datafit = np.sum(np.atleast_2d(fits), axis=0)\n\n return x - datafit, rm_freqs\n\n\ndef _check_filterable(x, kind='filtered'):\n x = np.asanyarray(x)\n if x.dtype != np.float64:\n raise ValueError('Data to be %s must be real floating, got %s'\n % (kind, x.dtype,))\n return x\n\n\n@verbose\ndef resample(x, up=1., down=1., npad=100, axis=-1, window='boxcar', n_jobs=1,\n pad='reflect_limited', verbose=None):\n \"\"\"Resample an array.\n\n Operates along the last dimension of the array.\n\n Parameters\n ----------\n x : ndarray\n Signal to resample.\n up : float\n Factor to upsample by.\n down : float\n Factor to downsample by.\n %(npad)s\n axis : int\n Axis along which to resample (default is the last axis).\n %(window-resample)s\n %(n_jobs-cuda)s\n %(pad-fir)s\n The default is ``'reflect_limited'``.\n\n .. versionadded:: 0.15\n %(verbose)s\n\n Returns\n -------\n y : array\n The x array resampled.\n\n Notes\n -----\n This uses (hopefully) intelligent edge padding and frequency-domain\n windowing improve scipy.signal.resample's resampling method, which\n we have adapted for our use here. Choices of npad and window have\n important consequences, and the default choices should work well\n for most natural signals.\n\n Resampling arguments are broken into \"up\" and \"down\" components for future\n compatibility in case we decide to use an upfirdn implementation. The\n current implementation is functionally equivalent to passing\n up=up/down and down=1.\n \"\"\"\n from scipy.signal import get_window\n # check explicitly for backwards compatibility\n if not isinstance(axis, int):\n err = (\"The axis parameter needs to be an integer (got %s). \"\n \"The axis parameter was missing from this function for a \"\n \"period of time, you might be intending to specify the \"\n \"subsequent window parameter.\" % repr(axis))\n raise TypeError(err)\n\n # make sure our arithmetic will work\n x = _check_filterable(x, 'resampled')\n ratio = float(up) / down\n if axis < 0:\n axis = x.ndim + axis\n orig_last_axis = x.ndim - 1\n if axis != orig_last_axis:\n x = x.swapaxes(axis, orig_last_axis)\n orig_shape = x.shape\n x_len = orig_shape[-1]\n if x_len == 0:\n warn('x has zero length along last axis, returning a copy of x')\n return x.copy()\n bad_msg = 'npad must be \"auto\" or an integer'\n if isinstance(npad, str):\n if npad != 'auto':\n raise ValueError(bad_msg)\n # Figure out reasonable pad that gets us to a power of 2\n min_add = min(x_len // 8, 100) * 2\n npad = 2 ** int(np.ceil(np.log2(x_len + min_add))) - x_len\n npad, extra = divmod(npad, 2)\n npads = np.array([npad, npad + extra], int)\n else:\n if npad != int(npad):\n raise ValueError(bad_msg)\n npads = np.array([npad, npad], int)\n del npad\n\n # prep for resampling now\n x_flat = x.reshape((-1, x_len))\n orig_len = x_len + npads.sum() # length after padding\n new_len = int(round(ratio * orig_len)) # length after resampling\n final_len = int(round(ratio * x_len))\n to_removes = [int(round(ratio * npads[0]))]\n to_removes.append(new_len - final_len - to_removes[0])\n to_removes = np.array(to_removes)\n # This should hold:\n # assert np.abs(to_removes[1] - to_removes[0]) <= int(np.ceil(ratio))\n\n # figure out windowing function\n if window is not None:\n if callable(window):\n W = window(fftfreq(orig_len))\n elif isinstance(window, np.ndarray) and \\\n window.shape == (orig_len,):\n W = window\n else:\n W = ifftshift(get_window(window, orig_len))\n else:\n W = np.ones(orig_len)\n W *= (float(new_len) / float(orig_len))\n\n # figure out if we should use CUDA\n n_jobs, cuda_dict = _setup_cuda_fft_resample(n_jobs, W, new_len)\n\n # do the resampling using an adaptation of scipy's FFT-based resample()\n # use of the 'flat' window is recommended for minimal ringing\n if n_jobs == 1:\n y = np.zeros((len(x_flat), new_len - to_removes.sum()), dtype=x.dtype)\n for xi, x_ in enumerate(x_flat):\n y[xi] = _fft_resample(x_, new_len, npads, to_removes,\n cuda_dict, pad)\n else:\n parallel, p_fun, _ = parallel_func(_fft_resample, n_jobs)\n y = parallel(p_fun(x_, new_len, npads, to_removes, cuda_dict, pad)\n for x_ in x_flat)\n y = np.array(y)\n\n # Restore the original array shape (modified for resampling)\n y.shape = orig_shape[:-1] + (y.shape[1],)\n if axis != orig_last_axis:\n y = y.swapaxes(axis, orig_last_axis)\n\n return y\n\n\ndef _resample_stim_channels(stim_data, up, down):\n \"\"\"Resample stim channels, carefully.\n\n Parameters\n ----------\n stim_data : array, shape (n_samples,) or (n_stim_channels, n_samples)\n Stim channels to resample.\n up : float\n Factor to upsample by.\n down : float\n Factor to downsample by.\n\n Returns\n -------\n stim_resampled : array, shape (n_stim_channels, n_samples_resampled)\n The resampled stim channels.\n\n Note\n ----\n The approach taken here is equivalent to the approach in the C-code.\n See the decimate_stimch function in MNE/mne_browse_raw/save.c\n \"\"\"\n stim_data = np.atleast_2d(stim_data)\n n_stim_channels, n_samples = stim_data.shape\n\n ratio = float(up) / down\n resampled_n_samples = int(round(n_samples * ratio))\n\n stim_resampled = np.zeros((n_stim_channels, resampled_n_samples))\n\n # Figure out which points in old data to subsample protect against\n # out-of-bounds, which can happen (having one sample more than\n # expected) due to padding\n sample_picks = np.minimum(\n (np.arange(resampled_n_samples) / ratio).astype(int),\n n_samples - 1\n )\n\n # Create windows starting from sample_picks[i], ending at sample_picks[i+1]\n windows = zip(sample_picks, np.r_[sample_picks[1:], n_samples])\n\n # Use the first non-zero value in each window\n for window_i, window in enumerate(windows):\n for stim_num, stim in enumerate(stim_data):\n nonzero = stim[window[0]:window[1]].nonzero()[0]\n if len(nonzero) > 0:\n val = stim[window[0] + nonzero[0]]\n else:\n val = stim[window[0]]\n stim_resampled[stim_num, window_i] = val\n\n return stim_resampled\n\n\ndef detrend(x, order=1, axis=-1):\n \"\"\"Detrend the array x.\n\n Parameters\n ----------\n x : n-d array\n Signal to detrend.\n order : int\n Fit order. Currently must be '0' or '1'.\n axis : int\n Axis of the array to operate on.\n\n Returns\n -------\n y : array\n The x array detrended.\n\n Examples\n --------\n As in :func:`scipy.signal.detrend`::\n\n >>> randgen = np.random.RandomState(9)\n >>> npoints = int(1e3)\n >>> noise = randgen.randn(npoints)\n >>> x = 3 + 2*np.linspace(0, 1, npoints) + noise\n >>> (detrend(x) - noise).max() < 0.01\n True\n \"\"\"\n from scipy.signal import detrend\n if axis > len(x.shape):\n raise ValueError('x does not have %d axes' % axis)\n if order == 0:\n fit = 'constant'\n elif order == 1:\n fit = 'linear'\n else:\n raise ValueError('order must be 0 or 1')\n\n y = detrend(x, axis=axis, type=fit)\n\n return y\n\n\n# Taken from Ifeachor and Jervis p. 356.\n# Note that here the passband ripple and stopband attenuation are\n# rendundant. The scalar passband ripple δp is expressed in dB as\n# 20 * log10(1+δp), but the scalar stopband ripple δs is expressed in dB as\n# -20 * log10(δs). So if we know that our stopband attenuation is 53 dB\n# (Hamming) then δs = 10 ** (53 / -20.), which means that the passband\n# deviation should be 20 * np.log10(1 + 10 ** (53 / -20.)) == 0.0194.\n_fir_window_dict = {\n 'hann': dict(name='Hann', ripple=0.0546, attenuation=44),\n 'hamming': dict(name='Hamming', ripple=0.0194, attenuation=53),\n 'blackman': dict(name='Blackman', ripple=0.0017, attenuation=74),\n}\n_known_fir_windows = tuple(sorted(_fir_window_dict.keys()))\n_known_phases = ('linear', 'zero', 'zero-double', 'minimum')\n_known_fir_designs = ('firwin', 'firwin2')\n_fir_design_dict = {\n 'firwin': 'Windowed time-domain',\n 'firwin2': 'Windowed frequency-domain',\n}\n\n\ndef _triage_filter_params(x, sfreq, l_freq, h_freq,\n l_trans_bandwidth, h_trans_bandwidth,\n filter_length, method, phase, fir_window,\n fir_design, bands='scalar', reverse=False):\n \"\"\"Validate and automate filter parameter selection.\"\"\"\n _validate_type(phase, 'str', 'phase')\n _check_option('phase', phase, _known_phases)\n _validate_type(fir_window, 'str', 'fir_window')\n _check_option('fir_window', fir_window, _known_fir_windows)\n _validate_type(fir_design, 'str', 'fir_design')\n _check_option('fir_design', fir_design, _known_fir_designs)\n\n # Helpers for reporting\n report_phase = 'non-linear phase' if phase == 'minimum' else 'zero-phase'\n causality = 'causal' if phase == 'minimum' else 'non-causal'\n if phase == 'zero-double':\n report_pass = 'two-pass forward and reverse'\n else:\n report_pass = 'one-pass'\n if l_freq is not None:\n if h_freq is not None:\n kind = 'bandstop' if reverse else 'bandpass'\n else:\n kind = 'highpass'\n assert not reverse\n elif h_freq is not None:\n kind = 'lowpass'\n assert not reverse\n else:\n kind = 'allpass'\n\n def float_array(c):\n return np.array(c, float).ravel()\n\n if bands == 'arr':\n cast = float_array\n else:\n cast = float\n sfreq = float(sfreq)\n if l_freq is not None:\n l_freq = cast(l_freq)\n if np.any(l_freq <= 0):\n raise ValueError('highpass frequency %s must be greater than zero'\n % (l_freq,))\n if h_freq is not None:\n h_freq = cast(h_freq)\n if np.any(h_freq >= sfreq / 2.):\n raise ValueError('lowpass frequency %s must be less than Nyquist '\n '(%s)' % (h_freq, sfreq / 2.))\n\n dB_cutoff = False # meaning, don't try to compute or report\n if bands == 'scalar' or (len(h_freq) == 1 and len(l_freq) == 1):\n if phase == 'zero':\n dB_cutoff = '-6 dB'\n elif phase == 'zero-double':\n dB_cutoff = '-12 dB'\n\n if method == 'iir':\n # Ignore these parameters, effectively\n l_stop, h_stop = l_freq, h_freq\n else: # method == 'fir'\n l_stop = h_stop = None\n logger.info('')\n logger.info('FIR filter parameters')\n logger.info('---------------------')\n logger.info('Designing a %s, %s, %s %s filter:'\n % (report_pass, report_phase, causality, kind))\n logger.info('- %s design (%s) method'\n % (_fir_design_dict[fir_design], fir_design))\n this_dict = _fir_window_dict[fir_window]\n if fir_design == 'firwin':\n logger.info('- {name:s} window with {ripple:0.4f} passband ripple '\n 'and {attenuation:d} dB stopband attenuation'\n .format(**this_dict))\n else:\n logger.info('- {name:s} window'.format(**this_dict))\n\n if l_freq is not None: # high-pass component\n if isinstance(l_trans_bandwidth, str):\n if l_trans_bandwidth != 'auto':\n raise ValueError('l_trans_bandwidth must be \"auto\" if '\n 'string, got \"%s\"' % l_trans_bandwidth)\n l_trans_bandwidth = np.minimum(np.maximum(0.25 * l_freq, 2.),\n l_freq)\n msg = ('- Lower transition bandwidth: %0.2f Hz'\n % (l_trans_bandwidth))\n if dB_cutoff:\n logger.info('- Lower passband edge: %0.2f' % (l_freq,))\n msg += ' (%s cutoff frequency: %0.2f Hz)' % (\n dB_cutoff, l_freq - l_trans_bandwidth / 2.)\n logger.info(msg)\n l_trans_bandwidth = cast(l_trans_bandwidth)\n if np.any(l_trans_bandwidth <= 0):\n raise ValueError('l_trans_bandwidth must be positive, got %s'\n % (l_trans_bandwidth,))\n l_stop = l_freq - l_trans_bandwidth\n if reverse: # band-stop style\n l_stop += l_trans_bandwidth\n l_freq += l_trans_bandwidth\n if np.any(l_stop < 0):\n raise ValueError('Filter specification invalid: Lower stop '\n 'frequency negative (%0.2f Hz). Increase pass'\n ' frequency or reduce the transition '\n 'bandwidth (l_trans_bandwidth)' % l_stop)\n if h_freq is not None: # low-pass component\n if isinstance(h_trans_bandwidth, str):\n if h_trans_bandwidth != 'auto':\n raise ValueError('h_trans_bandwidth must be \"auto\" if '\n 'string, got \"%s\"' % h_trans_bandwidth)\n h_trans_bandwidth = np.minimum(np.maximum(0.25 * h_freq, 2.),\n sfreq / 2. - h_freq)\n msg = ('- Upper transition bandwidth: %0.2f Hz'\n % (h_trans_bandwidth))\n if dB_cutoff:\n logger.info('- Upper passband edge: %0.2f Hz' % (h_freq,))\n msg += ' (%s cutoff frequency: %0.2f Hz)' % (\n dB_cutoff, h_freq + h_trans_bandwidth / 2.)\n logger.info(msg)\n h_trans_bandwidth = cast(h_trans_bandwidth)\n if np.any(h_trans_bandwidth <= 0):\n raise ValueError('h_trans_bandwidth must be positive, got %s'\n % (h_trans_bandwidth,))\n h_stop = h_freq + h_trans_bandwidth\n if reverse: # band-stop style\n h_stop -= h_trans_bandwidth\n h_freq -= h_trans_bandwidth\n if np.any(h_stop > sfreq / 2):\n raise ValueError('Effective band-stop frequency (%s) is too '\n 'high (maximum based on Nyquist is %s)'\n % (h_stop, sfreq / 2.))\n if isinstance(filter_length, str):\n filter_length = filter_length.lower()\n if filter_length == 'auto':\n h_check = h_trans_bandwidth if h_freq is not None else np.inf\n l_check = l_trans_bandwidth if l_freq is not None else np.inf\n mult_fact = 2. if fir_design == 'firwin2' else 1.\n filter_length = max(int(round(\n _length_factors[fir_window] * sfreq * mult_fact /\n float(min(h_check, l_check)))), 1)\n else:\n err_msg = ('filter_length, if a string, must be a '\n 'human-readable time, e.g. \"10s\", or \"auto\", not '\n '\"%s\"' % filter_length)\n if filter_length.lower().endswith('ms'):\n mult_fact = 1e-3\n filter_length = filter_length[:-2]\n elif filter_length[-1].lower() == 's':\n mult_fact = 1\n filter_length = filter_length[:-1]\n else:\n raise ValueError(err_msg)\n # now get the number\n try:\n filter_length = float(filter_length)\n except ValueError:\n raise ValueError(err_msg)\n if phase == 'zero-double': # old mode\n filter_length = 2 ** int(np.ceil(np.log2(\n filter_length * mult_fact * sfreq)))\n else:\n filter_length = max(int(np.ceil(filter_length * mult_fact *\n sfreq)), 1)\n if fir_design == 'firwin':\n filter_length += (filter_length - 1) % 2\n elif not isinstance(filter_length, int):\n raise ValueError('filter_length must be a str, int, or None, got '\n '%s' % (type(filter_length),))\n logger.info('- Filter length: %s samples (%0.3f sec)'\n % (filter_length, filter_length / sfreq))\n logger.info('')\n\n if filter_length != 'auto':\n if phase == 'zero' and method == 'fir':\n filter_length += (filter_length % 2 == 0)\n if filter_length <= 0:\n raise ValueError('filter_length must be positive, got %s'\n % (filter_length,))\n\n # If we have data supplied, do a sanity check\n if x is not None:\n x = _check_filterable(x)\n len_x = x.shape[-1]\n if method != 'fir':\n filter_length = len_x\n if filter_length > len_x and not (l_freq is None and h_freq is None):\n warn('filter_length (%s) is longer than the signal (%s), '\n 'distortion is likely. Reduce filter length or filter a '\n 'longer signal.' % (filter_length, len_x))\n\n logger.debug('Using filter length: %s' % filter_length)\n return (x, sfreq, l_freq, h_freq, l_stop, h_stop, filter_length, phase,\n fir_window, fir_design)\n\n\nclass FilterMixin(object):\n \"\"\"Object for Epoch/Evoked filtering.\"\"\"\n\n @verbose\n def savgol_filter(self, h_freq, verbose=None):\n \"\"\"Filter the data using Savitzky-Golay polynomial method.\n\n Parameters\n ----------\n h_freq : float\n Approximate high cut-off frequency in Hz. Note that this\n is not an exact cutoff, since Savitzky-Golay filtering\n :footcite:`SavitzkyGolay1964` is done using polynomial fits\n instead of FIR/IIR filtering. This parameter is thus used to\n determine the length of the window over which a 5th-order\n polynomial smoothing is used.\n %(verbose_meth)s\n\n Returns\n -------\n inst : instance of Epochs or Evoked\n The object with the filtering applied.\n\n See Also\n --------\n mne.io.Raw.filter\n\n Notes\n -----\n For Savitzky-Golay low-pass approximation, see:\n\n https://gist.github.com/larsoner/bbac101d50176611136b\n\n .. versionadded:: 0.9.0\n\n References\n ----------\n .. footbibliography::\n\n Examples\n --------\n >>> import mne\n >>> from os import path as op\n >>> evoked_fname = op.join(mne.datasets.sample.data_path(), 'MEG', 'sample', 'sample_audvis-ave.fif') # doctest:+SKIP\n >>> evoked = mne.read_evokeds(evoked_fname, baseline=(None, 0))[0] # doctest:+SKIP\n >>> evoked.savgol_filter(10.) # low-pass at around 10 Hz # doctest:+SKIP\n >>> evoked.plot() # doctest:+SKIP\n \"\"\" # noqa: E501\n from scipy.signal import savgol_filter\n _check_preload(self, 'inst.savgol_filter')\n h_freq = float(h_freq)\n if h_freq >= self.info['sfreq'] / 2.:\n raise ValueError('h_freq must be less than half the sample rate')\n\n # savitzky-golay filtering\n window_length = (int(np.round(self.info['sfreq'] /\n h_freq)) // 2) * 2 + 1\n logger.info('Using savgol length %d' % window_length)\n self._data[:] = savgol_filter(self._data, axis=-1, polyorder=5,\n window_length=window_length)\n return self\n\n @verbose\n def filter(self, l_freq, h_freq, picks=None, filter_length='auto',\n l_trans_bandwidth='auto', h_trans_bandwidth='auto', n_jobs=1,\n method='fir', iir_params=None, phase='zero',\n fir_window='hamming', fir_design='firwin',\n skip_by_annotation=('edge', 'bad_acq_skip'), pad='edge',\n verbose=None):\n \"\"\"Filter a subset of channels.\n\n Parameters\n ----------\n %(l_freq)s\n %(h_freq)s\n %(picks_all_data)s\n %(filter_length)s\n %(l_trans_bandwidth)s\n %(h_trans_bandwidth)s\n %(n_jobs-fir)s\n %(method-fir)s\n %(iir_params)s\n %(phase)s\n %(fir_window)s\n %(fir_design)s\n skip_by_annotation : str | list of str\n If a string (or list of str), any annotation segment that begins\n with the given string will not be included in filtering, and\n segments on either side of the given excluded annotated segment\n will be filtered separately (i.e., as independent signals).\n The default (``('edge', 'bad_acq_skip')`` will separately filter\n any segments that were concatenated by :func:`mne.concatenate_raws`\n or :meth:`mne.io.Raw.append`, or separated during acquisition.\n To disable, provide an empty list. Only used if ``inst`` is raw.\n\n .. versionadded:: 0.16.\n %(pad-fir)s\n %(verbose_meth)s\n\n Returns\n -------\n inst : instance of Epochs, Evoked, or Raw\n The filtered data.\n\n See Also\n --------\n mne.filter.create_filter\n mne.Evoked.savgol_filter\n mne.io.Raw.notch_filter\n mne.io.Raw.resample\n mne.filter.create_filter\n mne.filter.filter_data\n mne.filter.construct_iir_filter\n\n Notes\n -----\n Applies a zero-phase low-pass, high-pass, band-pass, or band-stop\n filter to the channels selected by ``picks``.\n The data are modified inplace.\n\n The object has to have the data loaded e.g. with ``preload=True``\n or ``self.load_data()``.\n\n ``l_freq`` and ``h_freq`` are the frequencies below which and above\n which, respectively, to filter out of the data. Thus the uses are:\n\n * ``l_freq < h_freq``: band-pass filter\n * ``l_freq > h_freq``: band-stop filter\n * ``l_freq is not None and h_freq is None``: high-pass filter\n * ``l_freq is None and h_freq is not None``: low-pass filter\n\n ``self.info['lowpass']`` and ``self.info['highpass']`` are only\n updated with picks=None.\n\n .. note:: If n_jobs > 1, more memory is required as\n ``len(picks) * n_times`` additional time points need to\n be temporaily stored in memory.\n\n For more information, see the tutorials\n :ref:`disc-filtering` and :ref:`tut-filter-resample` and\n :func:`mne.filter.create_filter`.\n\n .. versionadded:: 0.15\n \"\"\"\n from .io.base import BaseRaw\n _check_preload(self, 'inst.filter')\n if pad is None and method != 'iir':\n pad = 'edge'\n update_info, picks = _filt_check_picks(self.info, picks,\n l_freq, h_freq)\n if isinstance(self, BaseRaw):\n # Deal with annotations\n onsets, ends = _annotations_starts_stops(\n self, skip_by_annotation, invert=True)\n logger.info('Filtering raw data in %d contiguous segment%s'\n % (len(onsets), _pl(onsets)))\n else:\n onsets, ends = np.array([0]), np.array([self._data.shape[1]])\n max_idx = (ends - onsets).argmax()\n for si, (start, stop) in enumerate(zip(onsets, ends)):\n # Only output filter params once (for info level), and only warn\n # once about the length criterion (longest segment is too short)\n use_verbose = verbose if si == max_idx else 'error'\n filter_data(\n self._data[:, start:stop], self.info['sfreq'], l_freq, h_freq,\n picks, filter_length, l_trans_bandwidth, h_trans_bandwidth,\n n_jobs, method, iir_params, copy=False, phase=phase,\n fir_window=fir_window, fir_design=fir_design, pad=pad,\n verbose=use_verbose)\n # update info if filter is applied to all data channels,\n # and it's not a band-stop filter\n _filt_update_info(self.info, update_info, l_freq, h_freq)\n return self\n\n @verbose\n def resample(self, sfreq, npad='auto', window='boxcar', n_jobs=1,\n pad='edge', verbose=None): # lgtm\n \"\"\"Resample data.\n\n .. note:: Data must be loaded.\n\n Parameters\n ----------\n sfreq : float\n New sample rate to use.\n %(npad)s\n %(window-resample)s\n %(n_jobs-cuda)s\n %(pad-fir)s\n The default is ``'edge'``, which pads with the edge values of each\n vector.\n\n .. versionadded:: 0.15\n %(verbose_meth)s\n\n Returns\n -------\n inst : instance of Epochs or Evoked\n The resampled object.\n\n See Also\n --------\n mne.io.Raw.resample\n\n Notes\n -----\n For some data, it may be more accurate to use npad=0 to reduce\n artifacts. This is dataset dependent -- check your data!\n \"\"\"\n from .epochs import BaseEpochs\n from .evoked import Evoked\n # Should be guaranteed by our inheritance, and the fact that\n # mne.io.base.BaseRaw overrides this method\n assert isinstance(self, (BaseEpochs, Evoked))\n\n _check_preload(self, 'inst.resample')\n\n sfreq = float(sfreq)\n o_sfreq = self.info['sfreq']\n self._data = resample(self._data, sfreq, o_sfreq, npad, window=window,\n n_jobs=n_jobs, pad=pad)\n self.info['sfreq'] = float(sfreq)\n lowpass = self.info.get('lowpass')\n lowpass = np.inf if lowpass is None else lowpass\n self.info['lowpass'] = min(lowpass, sfreq / 2.)\n new_times = (np.arange(self._data.shape[-1], dtype=np.float) /\n sfreq + self.times[0])\n # adjust indirectly affected variables\n if isinstance(self, BaseEpochs):\n self._set_times(new_times)\n self._raw_times = self.times\n else: # isinstance(self, Evoked)\n self.times = new_times\n self._update_first_last()\n return self\n\n @verbose\n def apply_hilbert(self, picks=None, envelope=False, n_jobs=1, n_fft='auto',\n verbose=None):\n \"\"\"Compute analytic signal or envelope for a subset of channels.\n\n Parameters\n ----------\n %(picks_all_data_noref)s\n envelope : bool\n Compute the envelope signal of each channel. Default False.\n See Notes.\n %(n_jobs)s\n n_fft : int | None | str\n Points to use in the FFT for Hilbert transformation. The signal\n will be padded with zeros before computing Hilbert, then cut back\n to original length. If None, n == self.n_times. If 'auto',\n the next highest fast FFT length will be use.\n %(verbose_meth)s\n\n Returns\n -------\n self : instance of Raw, Epochs, or Evoked\n The raw object with transformed data.\n\n Notes\n -----\n **Parameters**\n\n If ``envelope=False``, the analytic signal for the channels defined in\n ``picks`` is computed and the data of the Raw object is converted to\n a complex representation (the analytic signal is complex valued).\n\n If ``envelope=True``, the absolute value of the analytic signal for the\n channels defined in ``picks`` is computed, resulting in the envelope\n signal.\n\n .. warning: Do not use ``envelope=True`` if you intend to compute\n an inverse solution from the raw data. If you want to\n compute the envelope in source space, use\n ``envelope=False`` and compute the envelope after the\n inverse solution has been obtained.\n\n If envelope=False, more memory is required since the original raw data\n as well as the analytic signal have temporarily to be stored in memory.\n If n_jobs > 1, more memory is required as ``len(picks) * n_times``\n additional time points need to be temporaily stored in memory.\n\n Also note that the ``n_fft`` parameter will allow you to pad the signal\n with zeros before performing the Hilbert transform. This padding\n is cut off, but it may result in a slightly different result\n (particularly around the edges). Use at your own risk.\n\n **Analytic signal**\n\n The analytic signal \"x_a(t)\" of \"x(t)\" is::\n\n x_a = F^{-1}(F(x) 2U) = x + i y\n\n where \"F\" is the Fourier transform, \"U\" the unit step function,\n and \"y\" the Hilbert transform of \"x\". One usage of the analytic\n signal is the computation of the envelope signal, which is given by\n \"e(t) = abs(x_a(t))\". Due to the linearity of Hilbert transform and the\n MNE inverse solution, the enevlope in source space can be obtained\n by computing the analytic signal in sensor space, applying the MNE\n inverse, and computing the envelope in source space.\n \"\"\"\n _check_preload(self, 'inst.apply_hilbert')\n if n_fft is None:\n n_fft = len(self.times)\n elif isinstance(n_fft, str):\n if n_fft != 'auto':\n raise ValueError('n_fft must be an integer, string, or None, '\n 'got %s' % (type(n_fft),))\n n_fft = next_fast_len(len(self.times))\n n_fft = int(n_fft)\n if n_fft < len(self.times):\n raise ValueError(\"n_fft (%d) must be at least the number of time \"\n \"points (%d)\" % (n_fft, len(self.times)))\n dtype = None if envelope else np.complex128\n picks = _picks_to_idx(self.info, picks, exclude=(), with_ref_meg=False)\n args, kwargs = (), dict(n_fft=n_fft, envelope=envelope)\n\n data_in = self._data\n if dtype is not None and dtype != self._data.dtype:\n self._data = self._data.astype(dtype)\n\n if n_jobs == 1:\n # modify data inplace to save memory\n for idx in picks:\n self._data[..., idx, :] = _check_fun(\n _my_hilbert, data_in[..., idx, :], *args, **kwargs)\n else:\n # use parallel function\n parallel, p_fun, _ = parallel_func(_check_fun, n_jobs)\n data_picks_new = parallel(\n p_fun(_my_hilbert, data_in[..., p, :], *args, **kwargs)\n for p in picks)\n for pp, p in enumerate(picks):\n self._data[..., p, :] = data_picks_new[pp]\n return self\n\n\ndef _check_fun(fun, d, *args, **kwargs):\n \"\"\"Check shapes.\"\"\"\n want_shape = d.shape\n d = fun(d, *args, **kwargs)\n if not isinstance(d, np.ndarray):\n raise TypeError('Return value must be an ndarray')\n if d.shape != want_shape:\n raise ValueError('Return data must have shape %s not %s'\n % (want_shape, d.shape))\n return d\n\n\ndef _my_hilbert(x, n_fft=None, envelope=False):\n \"\"\"Compute Hilbert transform of signals w/ zero padding.\n\n Parameters\n ----------\n x : array, shape (n_times)\n The signal to convert\n n_fft : int\n Size of the FFT to perform, must be at least ``len(x)``.\n The signal will be cut back to original length.\n envelope : bool\n Whether to compute amplitude of the hilbert transform in order\n to return the signal envelope.\n\n Returns\n -------\n out : array, shape (n_times)\n The hilbert transform of the signal, or the envelope.\n \"\"\"\n from scipy.signal import hilbert\n n_x = x.shape[-1]\n out = hilbert(x, N=n_fft, axis=-1)[..., :n_x]\n if envelope:\n out = np.abs(out)\n return out\n\n\n@verbose\ndef design_mne_c_filter(sfreq, l_freq=None, h_freq=40.,\n l_trans_bandwidth=None, h_trans_bandwidth=5.,\n verbose=None):\n \"\"\"Create a FIR filter like that used by MNE-C.\n\n Parameters\n ----------\n sfreq : float\n The sample frequency.\n l_freq : float | None\n The low filter frequency in Hz, default None.\n Can be None to avoid high-passing.\n h_freq : float\n The high filter frequency in Hz, default 40.\n Can be None to avoid low-passing.\n l_trans_bandwidth : float | None\n Low transition bandwidthin Hz. Can be None (default) to use 3 samples.\n h_trans_bandwidth : float\n High transition bandwidth in Hz.\n %(verbose)s\n\n Returns\n -------\n h : ndarray, shape (8193,)\n The linear-phase (symmetric) FIR filter coefficients.\n\n Notes\n -----\n This function is provided mostly for reference purposes.\n\n MNE-C uses a frequency-domain filter design technique by creating a\n linear-phase filter of length 8193. In the frequency domain, the\n 4197 frequencies are directly constructed, with zeroes in the stop-band\n and ones in the passband, with squared cosine ramps in between.\n \"\"\"\n n_freqs = (4096 + 2 * 2048) // 2 + 1\n freq_resp = np.ones(n_freqs)\n l_freq = 0 if l_freq is None else float(l_freq)\n if l_trans_bandwidth is None:\n l_width = 3\n else:\n l_width = (int(((n_freqs - 1) * l_trans_bandwidth) /\n (0.5 * sfreq)) + 1) // 2\n l_start = int(((n_freqs - 1) * l_freq) / (0.5 * sfreq))\n h_freq = sfreq / 2. if h_freq is None else float(h_freq)\n h_width = (int(((n_freqs - 1) * h_trans_bandwidth) /\n (0.5 * sfreq)) + 1) // 2\n h_start = int(((n_freqs - 1) * h_freq) / (0.5 * sfreq))\n logger.info('filter : %7.3f ... %6.1f Hz bins : %d ... %d of %d '\n 'hpw : %d lpw : %d' % (l_freq, h_freq, l_start, h_start,\n n_freqs, l_width, h_width))\n if l_freq > 0:\n start = l_start - l_width + 1\n stop = start + 2 * l_width - 1\n if start < 0 or stop >= n_freqs:\n raise RuntimeError('l_freq too low or l_trans_bandwidth too large')\n freq_resp[:start] = 0.\n k = np.arange(-l_width + 1, l_width) / float(l_width) + 3.\n freq_resp[start:stop] = np.cos(np.pi / 4. * k) ** 2\n\n if h_freq < sfreq / 2.:\n start = h_start - h_width + 1\n stop = start + 2 * h_width - 1\n if start < 0 or stop >= n_freqs:\n raise RuntimeError('h_freq too high or h_trans_bandwidth too '\n 'large')\n k = np.arange(-h_width + 1, h_width) / float(h_width) + 1.\n freq_resp[start:stop] *= np.cos(np.pi / 4. * k) ** 2\n freq_resp[stop:] = 0.0\n # Get the time-domain version of this signal\n h = irfft(freq_resp, n=2 * len(freq_resp) - 1)\n h = np.roll(h, n_freqs - 1) # center the impulse like a linear-phase filt\n return h\n\n\ndef _filt_check_picks(info, picks, h_freq, l_freq):\n from .io.pick import _picks_to_idx\n update_info = False\n # This will pick *all* data channels\n picks = _picks_to_idx(info, picks, 'data_or_ica', exclude=())\n if h_freq is not None or l_freq is not None:\n data_picks = _picks_to_idx(info, None, 'data_or_ica', exclude=(),\n allow_empty=True)\n if len(data_picks) == 0:\n logger.info('No data channels found. The highpass and '\n 'lowpass values in the measurement info will not '\n 'be updated.')\n elif np.in1d(data_picks, picks).all():\n update_info = True\n else:\n logger.info('Filtering a subset of channels. The highpass and '\n 'lowpass values in the measurement info will not '\n 'be updated.')\n return update_info, picks\n\n\ndef _filt_update_info(info, update_info, l_freq, h_freq):\n if update_info:\n if h_freq is not None and (l_freq is None or l_freq < h_freq) and \\\n (info[\"lowpass\"] is None or h_freq < info['lowpass']):\n info['lowpass'] = float(h_freq)\n if l_freq is not None and (h_freq is None or l_freq < h_freq) and \\\n (info[\"highpass\"] is None or l_freq > info['highpass']):\n info['highpass'] = float(l_freq)\n\n\n###############################################################################\n# Class for interpolation between adjacent points\n\nclass _Interp2(object):\n r\"\"\"Interpolate between two points.\n\n Parameters\n ----------\n interp : str\n Can be 'zero', 'linear', 'hann', or 'cos2'.\n\n Notes\n -----\n This will process data using overlapping windows of potentially\n different sizes to achieve a constant output value using different\n 2-point interpolation schemes. For example, for linear interpolation,\n and window sizes of 6 and 17, this would look like::\n\n 1 _ _\n |\\ / '-. .-'\n | \\ / '-. .-'\n | x |-.-|\n | / \\ .-' '-.\n |/ \\_.-' '-.\n 0 +----|----|----|----|---\n 0 5 10 15 20 25\n\n \"\"\"\n\n def __init__(self, interp='hann'):\n # set up interpolation\n self._last = dict()\n self._current = dict()\n self._count = dict()\n self._n_samp = None\n self.interp = interp\n\n def __setitem__(self, key, value):\n \"\"\"Update an item.\"\"\"\n if value is None:\n assert key not in self._current\n return\n if key in self._current:\n self._last[key] = self._current[key].copy()\n self._current[key] = value.copy()\n self._count[key] = self._count.get(key, 0) + 1\n\n @property\n def n_samp(self):\n return self._n_samp\n\n @n_samp.setter\n def n_samp(self, n_samp):\n # all up to date\n assert len(set(self._count.values())) == 1\n self._n_samp = n_samp\n self.interp = self.interp\n\n @property\n def interp(self):\n return self._interp\n\n @interp.setter\n def interp(self, interp):\n known_types = ('cos2', 'linear', 'zero', 'hann')\n if interp not in known_types:\n raise ValueError('interp must be one of %s, got \"%s\"'\n % (known_types, interp))\n self._interp = interp\n if self.n_samp is not None:\n if self._interp == 'zero' or np.isinf(self.n_samp): # ZOH\n self._interpolators = None\n else:\n if self._interp == 'linear':\n interp = np.linspace(1, 0, self.n_samp, endpoint=False)\n elif self._interp == 'cos2':\n interp = np.cos(0.5 * np.pi * np.arange(self.n_samp)) ** 2\n else: # interp == 'hann'\n interp = np.hanning(self.n_samp * 2 + 1)[self.n_samp:-1]\n self._interpolators = np.array([interp, 1 - interp])\n\n def interpolate(self, key, data, out, picks=None, interp_sl=None):\n \"\"\"Interpolate.\"\"\"\n picks = slice(None) if picks is None else picks\n interp_sl = slice(None) if interp_sl is None else interp_sl\n # Process data in large chunks to save on memory\n this_data = np.dot(self._last[key], data)\n if self._interpolators is not None:\n this_data *= self._interpolators[0][interp_sl]\n out[picks, ] += this_data\n if self._interpolators is not None:\n this_data = np.dot(self._current[key], data)\n this_data *= self._interpolators[1][interp_sl]\n out[picks, :] += this_data\n"
] | [
[
"numpy.sum",
"numpy.ones",
"scipy.signal.minimum_phase",
"numpy.diff",
"numpy.any",
"numpy.argsort",
"numpy.ones_like",
"scipy.signal.sos2zpk",
"scipy.signal.tf2zpk",
"scipy.signal.get_window",
"scipy.signal.sosfreqz",
"numpy.logical_and",
"numpy.argmin",
"numpy.abs",
"numpy.cos",
"numpy.in1d",
"scipy.signal.hilbert",
"numpy.where",
"numpy.linspace",
"numpy.unique",
"scipy.signal.sosfilt",
"numpy.round",
"scipy.signal.detrend",
"numpy.tile",
"numpy.atleast_2d",
"numpy.ceil",
"numpy.zeros",
"numpy.dot",
"scipy.signal.iirdesign",
"numpy.argmax",
"numpy.asanyarray",
"numpy.arange",
"scipy.signal.freqz",
"numpy.max",
"numpy.prod",
"numpy.maximum",
"numpy.hanning",
"scipy.signal.savgol_filter",
"numpy.zeros_like",
"numpy.roll",
"numpy.log2",
"scipy.signal.firwin",
"scipy.signal.iirfilter",
"numpy.isinf",
"numpy.atleast_1d",
"scipy.signal.lfilter",
"numpy.angle",
"numpy.array",
"numpy.convolve"
]
] |
herrfeder/RaaS | [
"f2da76a0ddce9817117fa2226fe941056bed1620"
] | [
"utils/datasupport.py"
] | [
"import pandas as pd\nfrom utils.exceptions import WrongDataFrameSize, NoScanAvailable\nfrom IPython.core.debugger import Tracer; debughere = Tracer()\nimport json\n\n\ndef pop_all(l):\n r, l[:] = l[:], []\n return r\n\n\ndef extract_scan(df ,ip=\"\",protocol='tcp'):\n no_scan = []\n if ip: \n scan = df[df[\"ip\"] == ip][protocol].iloc[0]\n else:\n if not isinstance(df,pd.core.series.Series):\n raise WrongDataFrameSize\n pass\n else:\n scan = df[protocol]\n scan_df = pd.DataFrame(columns=['conf', 'cpe', 'extrainfo', 'name', 'port', 'product', 'reason', 'state', 'version'])\n if isinstance(scan,str):\n scan = json.loads(scan)\n scan = [dict(scan[x],**{\"port\":x}) for x in list(scan.keys()) ]\n scan_df = scan_df.append(scan[0], ignore_index=True)\n if len(scan) > 1:\n scan_df = scan_df.append(scan[1:], ignore_index=True)\n scan_df.insert(0,\"ip\",ip, allow_duplicates=True)\n return scan_df\n else:\n scan_df.insert(0,\"ip\",ip, allow_duplicates=True)\n return scan_df\n else:\n scan_df = scan_df.append({col: \"noscan\" for col in scan_df.columns}, ignore_index=True)\n scan_df.insert(0,\"ip\",ip, allow_duplicates=True)\n return scan_df\n\n\ndef w_extract_scan(row):\n ### tcp ###\n if \"tcp\" or \"udp\" in row:\n try:\n scan_df = extract_scan(row)\n except NoScanAvailable:\n row['host'] = 'down'\n return row\n\n\n"
] | [
[
"pandas.DataFrame"
]
] |
pinggao18/analytics-zoo | [
"30a50e7f93770cb833e4ab99439d5210e2489e86"
] | [
"pyzoo/zoo/examples/orca/learn/pytorch/mnist/lenet_mnist.py"
] | [
"#\n# Copyright 2018 Analytics Zoo Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nfrom __future__ import print_function\nimport os\nimport argparse\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torchvision import datasets, transforms\n\nfrom zoo.orca import init_orca_context, stop_orca_context\nfrom zoo.orca.learn.pytorch import Estimator\nfrom zoo.orca.learn.metrics import Accuracy\nfrom zoo.orca.learn.trigger import EveryEpoch\n\n\nclass LeNet(nn.Module):\n def __init__(self):\n super(LeNet, self).__init__()\n self.conv1 = nn.Conv2d(1, 20, 5, 1)\n self.conv2 = nn.Conv2d(20, 50, 5, 1)\n self.fc1 = nn.Linear(4*4*50, 500)\n self.fc2 = nn.Linear(500, 10)\n\n def forward(self, x):\n x = F.relu(self.conv1(x))\n x = F.max_pool2d(x, 2, 2)\n x = F.relu(self.conv2(x))\n x = F.max_pool2d(x, 2, 2)\n x = x.view(-1, 4*4*50)\n x = F.relu(self.fc1(x))\n x = self.fc2(x)\n return F.log_softmax(x, dim=1)\n\n\ndef main():\n # Training settings\n parser = argparse.ArgumentParser(description='PyTorch MNIST Example')\n parser.add_argument('--dir', default='/tmp/data', metavar='N',\n help='the folder store mnist data')\n parser.add_argument('--batch-size', type=int, default=256, metavar='N',\n help='input batch size for training per executor(default: 256)')\n parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N',\n help='input batch size for testing per executor(default: 1000)')\n parser.add_argument('--epochs', type=int, default=2, metavar='N',\n help='number of epochs to train (default: 2)')\n parser.add_argument('--lr', type=float, default=0.001, metavar='LR',\n help='learning rate (default: 0.001)')\n parser.add_argument('--seed', type=int, default=1, metavar='S',\n help='random seed (default: 1)')\n parser.add_argument('--save-model', action='store_true', default=False,\n help='For Saving the current Model')\n parser.add_argument('--cluster_mode', type=str, default=\"local\",\n help='The mode for the Spark cluster. local or yarn.')\n args = parser.parse_args()\n\n torch.manual_seed(args.seed)\n\n train_loader = torch.utils.data.DataLoader(\n datasets.MNIST(args.dir, train=True, download=True,\n transform=transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,))\n ])),\n batch_size=args.batch_size, shuffle=True)\n test_loader = torch.utils.data.DataLoader(\n datasets.MNIST(args.dir, train=False,\n transform=transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,))\n ])),\n batch_size=args.test_batch_size, shuffle=False)\n\n if args.cluster_mode == \"local\":\n init_orca_context(cores=1, memory=\"2g\")\n elif args.cluster_mode == \"yarn\":\n init_orca_context(\n cluster_mode=\"yarn-client\", cores=4, num_nodes=2, memory=\"2g\",\n driver_memory=\"10g\", driver_cores=1,\n conf={\"spark.rpc.message.maxSize\": \"1024\",\n \"spark.task.maxFailures\": \"1\",\n \"spark.driver.extraJavaOptions\": \"-Dbigdl.failure.retryTimes=1\"})\n\n model = LeNet()\n model.train()\n criterion = nn.NLLLoss()\n\n adam = torch.optim.Adam(model.parameters(), args.lr)\n est = Estimator.from_torch(model=model, optimizer=adam, loss=criterion)\n est.fit(data=train_loader, epochs=args.epochs, validation_data=test_loader,\n validation_metrics=[Accuracy()], checkpoint_trigger=EveryEpoch())\n result = est.evaluate(data=test_loader, validation_metrics=[Accuracy()])\n for r in result:\n print(str(r))\n stop_orca_context()\n\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"torch.nn.NLLLoss",
"torch.nn.functional.log_softmax",
"torch.nn.Linear",
"torch.nn.functional.max_pool2d",
"torch.manual_seed",
"torch.nn.Conv2d"
]
] |
Tammy-Lee/analysis-llt | [
"ea1bb62d614bb75dac68c010a0cc524a5be185f2"
] | [
"analysis_llt/ml/cv/neural_network.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\ncreate on 2019-03-20 04:19\n\nauthor @lilia\n\"\"\"\nfrom sklearn.neural_network import MLPClassifier\n\nfrom analysis_llt.ml.cv.base import BaseCV\n\n\nclass MLPClassifierCV(BaseCV):\n def __init__(self, hidden_layer_sizes=(100,), learning_rate_init=0.001, cv=None, random_state=None, verbose=0,\n **model_params):\n super(MLPClassifierCV, self).__init__(cv=cv, random_state=random_state, verbose=verbose, **model_params)\n self.hidden_layer_sizes = hidden_layer_sizes\n self.learning_rate_init = learning_rate_init\n\n def build_model(self):\n mlp = MLPClassifier(hidden_layer_sizes=self.hidden_layer_sizes, random_state=self.random_state,\n learning_rate_init=self.learning_rate_init, **self.model_params)\n return mlp\n"
] | [
[
"sklearn.neural_network.MLPClassifier"
]
] |
yamad07/vjvae | [
"dd8d6607f5ec6c46df1794f903b42aee890d970b"
] | [
"utils/analysis.py"
] | [
"import sys, os\r\nsys.path.append(os.path.join(os.path.dirname(__file__), '..'))\r\n\r\nimport logging, math, multiprocessing, random\r\n\r\nimport numpy as np\r\nimport tensorflow as tf\r\nimport sklearn\r\nimport sklearn.manifold.t_sne as tsne\r\n\r\nfrom collections import defaultdict, OrderedDict\r\n\r\n\r\ndef calc_sims(latents):\r\n '''Calculate pairwise cosine similarities'''\r\n dot_mat = np.dot(latents, latents.T)\r\n nrm_mat = np.linalg.norm(latents, axis=1)\r\n mlt_mat = np.outer(nrm_mat, nrm_mat)\r\n sims = dot_mat / mlt_mat\r\n return sims\r\n\r\ndef calc_dists(latents, split=10000):\r\n '''Calculate pairwise Euclidean distances'''\r\n # split high memory load\r\n if latents.shape[0] > split:\r\n dists = np.zeros([latents.shape[0], latents.shape[0]])\r\n for i in range(math.ceil(latents.shape[0]/split)):\r\n start_idx, end_idx = (i * split), ((i + 1) * split)\r\n subset = latents[start_idx:end_idx]\r\n dists[start_idx:end_idx] = sklearn.metrics.pairwise_distances(subset, latents, metric='euclidean', n_jobs=multiprocessing.cpu_count())\r\n # otherwise, calculate in one go\r\n else:\r\n dists = sklearn.metrics.pairwise_distances(latents, latents, metric='euclidean', n_jobs=multiprocessing.cpu_count())\r\n return dists\r\n\r\n\r\ndef get_closest(centroid, latents, rel_idcs):\r\n rel_latents = latents[rel_idcs]\r\n dists = (rel_latents - centroid)**2\r\n dists = np.sum(dists, axis=1)\r\n dists = np.sqrt(dists)\r\n\r\n dists = [(rel_idcs[i], d) for i, d in enumerate(dists)] # re-map to global latent indices\r\n dists = sorted(dists, key=lambda el: el[1])\r\n latent_idcs, dists = zip(*dists)\r\n return np.array(latent_idcs, dtype=int), np.array(dists)\r\n\r\n\r\ndef get_minmax(centroids, eval_trio, true_idx, latents, rel_idcs):\r\n rel_latents = latents[rel_idcs]\r\n dists = np.zeros(rel_idcs.shape[0])\r\n for c in range(centroids.shape[0]):\r\n if c not in eval_trio:\r\n continue\r\n cur_dists = (rel_latents - centroids[c])**2\r\n cur_dists = np.sum(cur_dists, axis=1)\r\n cur_dists = np.sqrt(cur_dists)\r\n if c == true_idx:\r\n dists += cur_dists\r\n else:\r\n dists -= cur_dists/2\r\n\r\n dists = [(rel_idcs[i], d) for i, d in enumerate(dists)] # re-map to global latent indices\r\n dists = sorted(dists, key=lambda el: el[1])\r\n latent_idcs, dists = zip(*dists)\r\n return np.array(latent_idcs, dtype=int), np.array(dists)\r\n\r\n\r\ndef calc_metrics(latents, labels, sims, num_labels, prec_ranks, sim_metric='euclidean'):\r\n # set up result arrays\r\n mean_latents = np.zeros([num_labels, latents.shape[1]])\r\n rel_sim_by_label = np.zeros(num_labels)\r\n oth_sim_by_label = np.zeros(num_labels)\r\n label_precision = defaultdict(lambda: np.zeros(num_labels))\r\n label_idcs = [[] for _ in range(num_labels)]\r\n\r\n # set up multi-class case\r\n if len(labels.shape) > 1:\r\n multi_labels = np.ones([num_labels, latents.shape[0]]) * -1\r\n for idx in range(latents.shape[0]):\r\n true_labels = np.where(labels[idx] == 1)[0]\r\n multi_labels[true_labels, idx] = true_labels\r\n label_counts = np.sum((multi_labels > -1), axis=-1)\r\n label_dist = np.around((label_counts * 100) / np.sum(label_counts), decimals=2)\r\n logging.info(\"Set up multi-class evaluation with class distribution %s.\" % list(zip(label_counts.tolist(), label_dist.tolist())))\r\n\r\n for idx in range(latents.shape[0]):\r\n sys.stdout.write(\"\\rCalculating metrics for %d/%d (%.2f%%)...\" % (idx+1, latents.shape[0], ((idx+1)*100)/latents.shape[0]))\r\n sys.stdout.flush()\r\n\r\n true_labels = [labels[idx]] if len(labels.shape) < 2 else np.where(labels[idx] == 1.)[0] # get true label indices\r\n for lbl in true_labels:\r\n cur_labels = labels if len(labels.shape) < 2 else multi_labels[lbl] # set up labels for binary and multi-class\r\n\r\n # sort neighbours by similarity (without self)\r\n if sims is not None:\r\n cur_sims = [(i, s) for i, s in enumerate(sims[idx]) if i != idx]\r\n cur_sims = sorted(cur_sims, key=lambda el: el[1], reverse=(sim_metric == 'cosine'))\r\n # get sorted neighbours and similarities\r\n sim_idcs, sim_vals = zip(*cur_sims)\r\n sim_idcs, sim_vals = np.array(sim_idcs), np.array(sim_vals)\r\n # if not precomputed, calculate current similarities (more memory efficient)\r\n else:\r\n sim_idcs, sim_vals = get_closest(latents[idx], latents, [i for i in range(latents.shape[0]) if i != idx])\r\n\r\n label_idcs[lbl].append(idx)\r\n\r\n # calculate precision/recall at top n\r\n for rank in prec_ranks:\r\n # get top n\r\n top_idcs, top_vals = sim_idcs[:rank], sim_vals[:rank]\r\n # count TP/FP and calculate precision\r\n tp = np.sum(cur_labels[top_idcs] == lbl)\r\n fp = np.sum(cur_labels[top_idcs] != lbl)\r\n precision = tp / (tp + fp)\r\n # store results\r\n label_precision[rank][lbl] += precision\r\n\r\n # compute mean latents\r\n for lbl in range(num_labels):\r\n mean_latents[lbl] = np.mean(latents[label_idcs[lbl]], axis=0)\r\n\r\n # average out metrics\r\n label_counts = np.array([len(lbl_idcs) for lbl_idcs in label_idcs])\r\n for rank in prec_ranks:\r\n label_precision[rank] /= label_counts\r\n\r\n logging.info(\"\\rCalculated metrics for %d latents.%s\" % (latents.shape[0], ' '*16))\r\n\r\n return mean_latents, label_precision, label_counts\r\n\r\n\r\ndef calc_latent_kl(vis_latents, aud_latents, perplexity):\r\n logging.info(\"Calculating joint probability distribution of visual latent space...\")\r\n vis_dists = calc_dists(vis_latents)\r\n vis_distr = tsne._joint_probabilities(distances=vis_dists, desired_perplexity=perplexity, verbose=True)\r\n logging.info(\"Calculating joint probability distribution of auditive latent space...\")\r\n aud_dists = calc_dists(aud_latents)\r\n aud_distr = tsne._joint_probabilities(distances=aud_dists, desired_perplexity=perplexity, verbose=True)\r\n kl_va = 2.0 * np.dot(vis_distr, np.log(vis_distr / aud_distr))\r\n kl_av = 2.0 * np.dot(aud_distr, np.log(aud_distr / vis_distr))\r\n logging.info(\"Calculated KL divergences of audio-visual latent spaces with perplexity %d: %.2f VA / %.2f AV.\" % (perplexity, kl_va, kl_av))\r\n return kl_va, kl_av\r\n\r\n\r\ndef calc_cls_metrics(labels, predictions):\r\n # compute total accuracy\r\n pred_labels = np.argmax(predictions, axis=1)\r\n\r\n # compute accuracy, precision and recall by label\r\n label_accuracy = np.zeros(predictions.shape[1])\r\n label_precision = np.zeros(predictions.shape[1])\r\n label_recall = np.zeros(predictions.shape[1])\r\n for lbl in range(predictions.shape[1]):\r\n lbl_idcs = np.where(labels == (lbl * np.ones_like(labels)))\r\n oth_idcs = np.where(labels != (lbl * np.ones_like(labels)))\r\n tp = np.sum(pred_labels[lbl_idcs] == lbl)\r\n fp = np.sum(pred_labels[oth_idcs] == lbl)\r\n tn = np.sum(pred_labels[oth_idcs] != lbl)\r\n fn = np.sum(pred_labels[lbl_idcs] != lbl)\r\n label_precision[lbl] = tp / (tp + fp)\r\n label_recall[lbl] = tp / (tp + fn)\r\n label_accuracy[lbl] = (tp + tn) / (tp + fp + tn + fn)\r\n\r\n return np.mean(label_accuracy), label_precision, label_recall, label_accuracy\r\n\r\n\r\ndef calc_mltcls_metrics(labels, predictions):\r\n # round predictions to {0, 1}\r\n predictions = np.around(predictions)\r\n\r\n # compute accuracy, precision and recall by label\r\n label_accuracy = np.zeros(predictions.shape[1])\r\n label_precision = np.zeros(predictions.shape[1])\r\n label_recall = np.zeros(predictions.shape[1])\r\n for lbl in range(predictions.shape[1]):\r\n lbl_idcs = np.where(labels[:, lbl] == 1)\r\n oth_idcs = np.where(labels[:, lbl] == 0)\r\n tp = np.sum(predictions[lbl_idcs, lbl] == 1.)\r\n fp = np.sum(predictions[oth_idcs, lbl] == 1.)\r\n tn = np.sum(predictions[oth_idcs, lbl] == 0.)\r\n fn = np.sum(predictions[lbl_idcs, lbl] == 0.)\r\n print(\"Label %d: %d TP, %d FP, %d TN, %d FN\" % (lbl, tp, fp, tn, fn))\r\n label_precision[lbl] = tp / (tp + fp)\r\n label_recall[lbl] = tp / (tp + fn)\r\n label_accuracy[lbl] = (tp + tn) / (tp + fp + tn + fn)\r\n\r\n return np.mean(label_accuracy), label_precision, label_recall, label_accuracy\r\n\r\n\r\ndef log_metrics(label_descs, top_n, precision_by_label, label_counts):\r\n logging.info(\"Overall metrics:\")\r\n for label_idx, label in enumerate(label_descs):\r\n logging.info(\" %s: %.2f P@%d\" % (\r\n label, precision_by_label[label_idx], top_n))\r\n logging.info(\"Total (avg): %.2f P@%d\" % (\r\n np.sum(precision_by_label * (label_counts/np.sum(label_counts))), top_n))\r\n\r\n\r\ndef get_sorted_triplets(latents):\r\n # set up non-overlapping trios\r\n trio_keys = [\r\n tuple(sorted([i1, i2, i3]))\r\n for i1 in range(latents.shape[0])\r\n for i2 in range(latents.shape[0])\r\n for i3 in range(latents.shape[0])\r\n if len(set([i1, i2, i3])) > 2\r\n ]\r\n # calculate trio similarities\r\n trio_sims = {}\r\n for trio_key in trio_keys:\r\n trio_sims[trio_key] = np.linalg.norm(latents[[trio_key[0]]] - latents[[trio_key[1]]])\\\r\n + np.linalg.norm(latents[[trio_key[1]]] - latents[[trio_key[2]]])\\\r\n + np.linalg.norm(latents[[trio_key[2]]] - latents[[trio_key[0]]])\r\n\r\n sorted_triplets = sorted(list(trio_sims.items()), key=lambda el: el[1], reverse=True)\r\n trio_keys, trio_dists = zip(*sorted_triplets)\r\n return trio_keys, trio_dists\r\n\r\n\r\ndef get_unique_samples(closest_idcs, closest_dists):\r\n # sort distances globally\r\n idx_dist_map = [\r\n ([row, col], closest_dists[row, col])\r\n for row in range(closest_dists.shape[0])\r\n for col in range(closest_dists.shape[1])\r\n if closest_dists[row, col] > -100]\r\n # if closest_dists[row, col] >= 0]\r\n idx_dist_map = sorted(idx_dist_map, key=lambda el: el[1])\r\n\r\n # go through samples in globally sorted order\r\n sample_idcs = np.ones_like(closest_idcs, dtype=int) * -1\r\n for seek, dist in idx_dist_map:\r\n cur_idx = closest_idcs[seek[0], seek[1]]\r\n # check if idx is already used for mean which it is closer to\r\n if cur_idx in sample_idcs:\r\n continue\r\n # insert at leftmost position for appropriate class\r\n sample_idcs[seek[0], np.where(sample_idcs[seek[0]] == -1)[0][0]] = cur_idx\r\n\r\n return sample_idcs\r\n\r\n\r\ndef gen_eval_task(mean_latents, latents, labels, label_descs, num_examples, num_tasks, sel_mode='closest'):\r\n # get triplet of means with largest distance between them\r\n trio_keys, trio_dists = get_sorted_triplets(mean_latents)\r\n # iterate over triplets (in case one set has insufficient amounts of data)\r\n for eval_trio, eval_trio_dist in zip(trio_keys, trio_dists):\r\n logging.info(\"Calculated mean triplet (%s) with cumulative Euclidean distance %.2f.\" % (', '.join([label_descs[l] for l in eval_trio]), eval_trio_dist))\r\n # get samples which lie closest to respective means\r\n closest_idcs = np.ones([3, latents.shape[0]], dtype=int) * -100\r\n closest_dists = np.ones([3, latents.shape[0]]) * -100\r\n is_valid_trio = True\r\n for tidx in range(3):\r\n # get indices of samples with same label as current mean\r\n if len(labels.shape) > 1:\r\n rel_idcs = np.squeeze(np.where(labels[:, eval_trio[tidx]] == 1.))\r\n else:\r\n rel_idcs = np.squeeze(np.where(labels == (eval_trio[tidx] * np.ones_like(labels))))\r\n # check if class has enough samples to generate tasks\r\n if (len(rel_idcs.shape) < 1) or (rel_idcs.shape[0] < (num_examples + num_tasks)):\r\n is_valid_trio = False\r\n break\r\n # get closest latents of same label per mean\r\n if sel_mode == 'closest':\r\n cur_closest_idcs, cur_closest_dists = get_closest(mean_latents[eval_trio[tidx]], latents, rel_idcs)\r\n elif sel_mode == 'minmax':\r\n cur_closest_idcs, cur_closest_dists = get_minmax(mean_latents, eval_trio, eval_trio[tidx], latents, rel_idcs)\r\n else:\r\n assert (sel_mode in ['closest', 'minmax']), \"[Error] Unknown sample selection mode '%s'.\" % sel_mode\r\n closest_idcs[tidx, :cur_closest_idcs.shape[0]] = cur_closest_idcs\r\n closest_dists[tidx, :cur_closest_dists.shape[0]] = cur_closest_dists\r\n # exit loop if sufficient amounts are available\r\n if is_valid_trio:\r\n trio_sample_idcs = get_unique_samples(closest_idcs, closest_dists)\r\n break\r\n # skip to next trio if current one is insufficient\r\n else:\r\n logging.error(\"[Error] Not enough data to generate task based on classes %s.\" % (eval_trio,))\r\n continue\r\n\r\n # truncate trio sample idcs\r\n trio_sample_idcs = trio_sample_idcs[:, :(num_examples + num_tasks + 1)]\r\n\r\n # get examples (randomly choose from available data)\r\n example_idcs = sorted(np.random.choice((num_examples + num_tasks + 1), num_examples, replace=False))\r\n examples = np.squeeze(trio_sample_idcs[:,example_idcs].flatten())\r\n examples = examples.tolist()\r\n trio_sample_idcs[:,example_idcs] = -1\r\n\r\n # get tasks\r\n task_idcs = np.where(trio_sample_idcs >= 0.)\r\n task_trios = np.reshape(trio_sample_idcs[task_idcs], [3, -1])\r\n # convert indices per task into lists\r\n task_trios = [task_trios[:, i].tolist() for i in range(num_tasks)]\r\n\r\n # randomly select truths for tasks\r\n tasks = []\r\n for trio in task_trios:\r\n truth_idx = random.randint(0, 2)\r\n tasks.append(OrderedDict([\r\n ('truth', truth_idx),\r\n ('options', trio)\r\n ]))\r\n return eval_trio, examples, tasks\r\n"
] | [
[
"numpy.sum",
"numpy.ones",
"numpy.linalg.norm",
"numpy.array",
"numpy.zeros",
"sklearn.manifold.t_sne._joint_probabilities",
"numpy.reshape",
"numpy.ones_like",
"numpy.random.choice",
"numpy.argmax",
"numpy.where",
"numpy.log",
"numpy.sqrt",
"numpy.around",
"numpy.dot",
"numpy.outer",
"numpy.mean"
]
] |
a1609jk/seaborn | [
"50b77328d1f9e6739bcd1e69a43c26226ed826da"
] | [
"seaborn/tests/test_distributions.py"
] | [
"import itertools\nfrom distutils.version import LooseVersion\n\nimport numpy as np\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nfrom matplotlib.colors import to_rgb, to_rgba\n\nimport pytest\nfrom numpy.testing import assert_array_equal, assert_array_almost_equal\n\nfrom .. import distributions as dist\nfrom ..palettes import (\n color_palette,\n light_palette,\n)\nfrom .._core import (\n categorical_order,\n)\nfrom .._statistics import (\n KDE,\n Histogram,\n _no_scipy,\n)\nfrom ..distributions import (\n _DistributionPlotter,\n displot,\n distplot,\n histplot,\n ecdfplot,\n kdeplot,\n rugplot,\n)\nfrom ..axisgrid import FacetGrid\nfrom .._testing import (\n assert_plots_equal,\n assert_legends_equal,\n assert_colors_equal,\n)\n\n\nclass TestDistPlot(object):\n\n rs = np.random.RandomState(0)\n x = rs.randn(100)\n\n def test_hist_bins(self):\n\n fd_edges = np.histogram_bin_edges(self.x, \"fd\")\n with pytest.warns(FutureWarning):\n ax = distplot(self.x)\n for edge, bar in zip(fd_edges, ax.patches):\n assert pytest.approx(edge) == bar.get_x()\n\n plt.close(ax.figure)\n n = 25\n n_edges = np.histogram_bin_edges(self.x, n)\n with pytest.warns(FutureWarning):\n ax = distplot(self.x, bins=n)\n for edge, bar in zip(n_edges, ax.patches):\n assert pytest.approx(edge) == bar.get_x()\n\n def test_elements(self):\n\n with pytest.warns(FutureWarning):\n\n n = 10\n ax = distplot(self.x, bins=n,\n hist=True, kde=False, rug=False, fit=None)\n assert len(ax.patches) == 10\n assert len(ax.lines) == 0\n assert len(ax.collections) == 0\n\n plt.close(ax.figure)\n ax = distplot(self.x,\n hist=False, kde=True, rug=False, fit=None)\n assert len(ax.patches) == 0\n assert len(ax.lines) == 1\n assert len(ax.collections) == 0\n\n plt.close(ax.figure)\n ax = distplot(self.x,\n hist=False, kde=False, rug=True, fit=None)\n assert len(ax.patches) == 0\n assert len(ax.lines) == 0\n assert len(ax.collections) == 1\n\n class Norm:\n \"\"\"Dummy object that looks like a scipy RV\"\"\"\n def fit(self, x):\n return ()\n\n def pdf(self, x, *params):\n return np.zeros_like(x)\n\n plt.close(ax.figure)\n ax = distplot(\n self.x, hist=False, kde=False, rug=False, fit=Norm())\n assert len(ax.patches) == 0\n assert len(ax.lines) == 1\n assert len(ax.collections) == 0\n\n def test_distplot_with_nans(self):\n\n f, (ax1, ax2) = plt.subplots(2)\n x_null = np.append(self.x, [np.nan])\n\n with pytest.warns(FutureWarning):\n distplot(self.x, ax=ax1)\n distplot(x_null, ax=ax2)\n\n line1 = ax1.lines[0]\n line2 = ax2.lines[0]\n assert np.array_equal(line1.get_xydata(), line2.get_xydata())\n\n for bar1, bar2 in zip(ax1.patches, ax2.patches):\n assert bar1.get_xy() == bar2.get_xy()\n assert bar1.get_height() == bar2.get_height()\n\n\nclass SharedAxesLevelTests:\n\n def test_color(self, long_df, **kwargs):\n\n ax = plt.figure().subplots()\n self.func(data=long_df, x=\"y\", ax=ax, **kwargs)\n assert_colors_equal(self.get_last_color(ax, **kwargs), \"C0\", check_alpha=False)\n\n ax = plt.figure().subplots()\n self.func(data=long_df, x=\"y\", ax=ax, **kwargs)\n self.func(data=long_df, x=\"y\", ax=ax, **kwargs)\n assert_colors_equal(self.get_last_color(ax, **kwargs), \"C1\", check_alpha=False)\n\n ax = plt.figure().subplots()\n self.func(data=long_df, x=\"y\", color=\"C2\", ax=ax, **kwargs)\n assert_colors_equal(self.get_last_color(ax, **kwargs), \"C2\", check_alpha=False)\n\n\nclass TestRugPlot(SharedAxesLevelTests):\n\n func = staticmethod(rugplot)\n\n def get_last_color(self, ax, **kwargs):\n\n return ax.collections[-1].get_color()\n\n def assert_rug_equal(self, a, b):\n\n assert_array_equal(a.get_segments(), b.get_segments())\n\n @pytest.mark.parametrize(\"variable\", [\"x\", \"y\"])\n def test_long_data(self, long_df, variable):\n\n vector = long_df[variable]\n vectors = [\n variable, vector, np.asarray(vector), vector.to_list(),\n ]\n\n f, ax = plt.subplots()\n for vector in vectors:\n rugplot(data=long_df, **{variable: vector})\n\n for a, b in itertools.product(ax.collections, ax.collections):\n self.assert_rug_equal(a, b)\n\n def test_bivariate_data(self, long_df):\n\n f, (ax1, ax2) = plt.subplots(ncols=2)\n\n rugplot(data=long_df, x=\"x\", y=\"y\", ax=ax1)\n rugplot(data=long_df, x=\"x\", ax=ax2)\n rugplot(data=long_df, y=\"y\", ax=ax2)\n\n self.assert_rug_equal(ax1.collections[0], ax2.collections[0])\n self.assert_rug_equal(ax1.collections[1], ax2.collections[1])\n\n def test_wide_vs_long_data(self, wide_df):\n\n f, (ax1, ax2) = plt.subplots(ncols=2)\n rugplot(data=wide_df, ax=ax1)\n for col in wide_df:\n rugplot(data=wide_df, x=col, ax=ax2)\n\n wide_segments = np.sort(\n np.array(ax1.collections[0].get_segments())\n )\n long_segments = np.sort(\n np.concatenate([c.get_segments() for c in ax2.collections])\n )\n\n assert_array_equal(wide_segments, long_segments)\n\n def test_flat_vector(self, long_df):\n\n f, ax = plt.subplots()\n rugplot(data=long_df[\"x\"])\n rugplot(x=long_df[\"x\"])\n self.assert_rug_equal(*ax.collections)\n\n def test_datetime_data(self, long_df):\n\n ax = rugplot(data=long_df[\"t\"])\n vals = np.stack(ax.collections[0].get_segments())[:, 0, 0]\n assert_array_equal(vals, mpl.dates.date2num(long_df[\"t\"]))\n\n def test_empty_data(self):\n\n ax = rugplot(x=[])\n assert not ax.collections\n\n def test_a_deprecation(self, flat_series):\n\n f, ax = plt.subplots()\n\n with pytest.warns(FutureWarning):\n rugplot(a=flat_series)\n rugplot(x=flat_series)\n\n self.assert_rug_equal(*ax.collections)\n\n @pytest.mark.parametrize(\"variable\", [\"x\", \"y\"])\n def test_axis_deprecation(self, flat_series, variable):\n\n f, ax = plt.subplots()\n\n with pytest.warns(FutureWarning):\n rugplot(flat_series, axis=variable)\n rugplot(**{variable: flat_series})\n\n self.assert_rug_equal(*ax.collections)\n\n def test_vertical_deprecation(self, flat_series):\n\n f, ax = plt.subplots()\n\n with pytest.warns(FutureWarning):\n rugplot(flat_series, vertical=True)\n rugplot(y=flat_series)\n\n self.assert_rug_equal(*ax.collections)\n\n def test_rug_data(self, flat_array):\n\n height = .05\n ax = rugplot(x=flat_array, height=height)\n segments = np.stack(ax.collections[0].get_segments())\n\n n = flat_array.size\n assert_array_equal(segments[:, 0, 1], np.zeros(n))\n assert_array_equal(segments[:, 1, 1], np.full(n, height))\n assert_array_equal(segments[:, 1, 0], flat_array)\n\n def test_rug_colors(self, long_df):\n\n ax = rugplot(data=long_df, x=\"x\", hue=\"a\")\n\n order = categorical_order(long_df[\"a\"])\n palette = color_palette()\n\n expected_colors = np.ones((len(long_df), 4))\n for i, val in enumerate(long_df[\"a\"]):\n expected_colors[i, :3] = palette[order.index(val)]\n\n assert_array_equal(ax.collections[0].get_color(), expected_colors)\n\n def test_expand_margins(self, flat_array):\n\n f, ax = plt.subplots()\n x1, y1 = ax.margins()\n rugplot(x=flat_array, expand_margins=False)\n x2, y2 = ax.margins()\n assert x1 == x2\n assert y1 == y2\n\n f, ax = plt.subplots()\n x1, y1 = ax.margins()\n height = .05\n rugplot(x=flat_array, height=height)\n x2, y2 = ax.margins()\n assert x1 == x2\n assert y1 + height * 2 == pytest.approx(y2)\n\n def test_matplotlib_kwargs(self, flat_series):\n\n lw = 2\n alpha = .2\n ax = rugplot(y=flat_series, linewidth=lw, alpha=alpha)\n rug = ax.collections[0]\n assert np.all(rug.get_alpha() == alpha)\n assert np.all(rug.get_linewidth() == lw)\n\n def test_axis_labels(self, flat_series):\n\n ax = rugplot(x=flat_series)\n assert ax.get_xlabel() == flat_series.name\n assert not ax.get_ylabel()\n\n\nclass TestKDEPlotUnivariate(SharedAxesLevelTests):\n\n func = staticmethod(kdeplot)\n\n def get_last_color(self, ax, fill=True):\n\n if fill:\n return ax.collections[-1].get_facecolor()\n else:\n return ax.lines[-1].get_color()\n\n @pytest.mark.parametrize(\"fill\", [True, False])\n def test_color(self, long_df, fill):\n\n super().test_color(long_df, fill=fill)\n\n if fill:\n\n ax = plt.figure().subplots()\n self.func(data=long_df, x=\"y\", facecolor=\"C3\", fill=True, ax=ax)\n assert_colors_equal(self.get_last_color(ax), \"C3\", check_alpha=False)\n\n ax = plt.figure().subplots()\n self.func(data=long_df, x=\"y\", fc=\"C4\", fill=True, ax=ax)\n assert_colors_equal(self.get_last_color(ax), \"C4\", check_alpha=False)\n\n @pytest.mark.parametrize(\n \"variable\", [\"x\", \"y\"],\n )\n def test_long_vectors(self, long_df, variable):\n\n vector = long_df[variable]\n vectors = [\n variable, vector, vector.to_numpy(), vector.to_list(),\n ]\n\n f, ax = plt.subplots()\n for vector in vectors:\n kdeplot(data=long_df, **{variable: vector})\n\n xdata = [l.get_xdata() for l in ax.lines]\n for a, b in itertools.product(xdata, xdata):\n assert_array_equal(a, b)\n\n ydata = [l.get_ydata() for l in ax.lines]\n for a, b in itertools.product(ydata, ydata):\n assert_array_equal(a, b)\n\n def test_wide_vs_long_data(self, wide_df):\n\n f, (ax1, ax2) = plt.subplots(ncols=2)\n kdeplot(data=wide_df, ax=ax1, common_norm=False, common_grid=False)\n for col in wide_df:\n kdeplot(data=wide_df, x=col, ax=ax2)\n\n for l1, l2 in zip(ax1.lines[::-1], ax2.lines):\n assert_array_equal(l1.get_xydata(), l2.get_xydata())\n\n def test_flat_vector(self, long_df):\n\n f, ax = plt.subplots()\n kdeplot(data=long_df[\"x\"])\n kdeplot(x=long_df[\"x\"])\n assert_array_equal(ax.lines[0].get_xydata(), ax.lines[1].get_xydata())\n\n def test_empty_data(self):\n\n ax = kdeplot(x=[])\n assert not ax.lines\n\n def test_singular_data(self):\n\n with pytest.warns(UserWarning):\n ax = kdeplot(x=np.ones(10))\n assert not ax.lines\n\n with pytest.warns(UserWarning):\n ax = kdeplot(x=[5])\n assert not ax.lines\n\n def test_variable_assignment(self, long_df):\n\n f, ax = plt.subplots()\n kdeplot(data=long_df, x=\"x\", fill=True)\n kdeplot(data=long_df, y=\"x\", fill=True)\n\n v0 = ax.collections[0].get_paths()[0].vertices\n v1 = ax.collections[1].get_paths()[0].vertices[:, [1, 0]]\n\n assert_array_equal(v0, v1)\n\n def test_vertical_deprecation(self, long_df):\n\n f, ax = plt.subplots()\n kdeplot(data=long_df, y=\"x\")\n\n with pytest.warns(FutureWarning):\n kdeplot(data=long_df, x=\"x\", vertical=True)\n\n assert_array_equal(ax.lines[0].get_xydata(), ax.lines[1].get_xydata())\n\n def test_bw_deprecation(self, long_df):\n\n f, ax = plt.subplots()\n kdeplot(data=long_df, x=\"x\", bw_method=\"silverman\")\n\n with pytest.warns(FutureWarning):\n kdeplot(data=long_df, x=\"x\", bw=\"silverman\")\n\n assert_array_equal(ax.lines[0].get_xydata(), ax.lines[1].get_xydata())\n\n def test_kernel_deprecation(self, long_df):\n\n f, ax = plt.subplots()\n kdeplot(data=long_df, x=\"x\")\n\n with pytest.warns(UserWarning):\n kdeplot(data=long_df, x=\"x\", kernel=\"epi\")\n\n assert_array_equal(ax.lines[0].get_xydata(), ax.lines[1].get_xydata())\n\n def test_shade_deprecation(self, long_df):\n\n f, ax = plt.subplots()\n kdeplot(data=long_df, x=\"x\", shade=True)\n kdeplot(data=long_df, x=\"x\", fill=True)\n fill1, fill2 = ax.collections\n assert_array_equal(\n fill1.get_paths()[0].vertices, fill2.get_paths()[0].vertices\n )\n\n @pytest.mark.parametrize(\"multiple\", [\"layer\", \"stack\", \"fill\"])\n def test_hue_colors(self, long_df, multiple):\n\n ax = kdeplot(\n data=long_df, x=\"x\", hue=\"a\",\n multiple=multiple,\n fill=True, legend=False\n )\n\n # Note that hue order is reversed in the plot\n lines = ax.lines[::-1]\n fills = ax.collections[::-1]\n\n palette = color_palette()\n\n for line, fill, color in zip(lines, fills, palette):\n assert_colors_equal(line.get_color(), color)\n assert_colors_equal(fill.get_facecolor(), to_rgba(color, .25))\n\n def test_hue_stacking(self, long_df):\n\n f, (ax1, ax2) = plt.subplots(ncols=2)\n\n kdeplot(\n data=long_df, x=\"x\", hue=\"a\",\n multiple=\"layer\", common_grid=True,\n legend=False, ax=ax1,\n )\n kdeplot(\n data=long_df, x=\"x\", hue=\"a\",\n multiple=\"stack\", fill=False,\n legend=False, ax=ax2,\n )\n\n layered_densities = np.stack([\n l.get_ydata() for l in ax1.lines\n ])\n stacked_densities = np.stack([\n l.get_ydata() for l in ax2.lines\n ])\n\n assert_array_equal(layered_densities.cumsum(axis=0), stacked_densities)\n\n def test_hue_filling(self, long_df):\n\n f, (ax1, ax2) = plt.subplots(ncols=2)\n\n kdeplot(\n data=long_df, x=\"x\", hue=\"a\",\n multiple=\"layer\", common_grid=True,\n legend=False, ax=ax1,\n )\n kdeplot(\n data=long_df, x=\"x\", hue=\"a\",\n multiple=\"fill\", fill=False,\n legend=False, ax=ax2,\n )\n\n layered = np.stack([l.get_ydata() for l in ax1.lines])\n filled = np.stack([l.get_ydata() for l in ax2.lines])\n\n assert_array_almost_equal(\n (layered / layered.sum(axis=0)).cumsum(axis=0),\n filled,\n )\n\n @pytest.mark.parametrize(\"multiple\", [\"stack\", \"fill\"])\n def test_fill_default(self, long_df, multiple):\n\n ax = kdeplot(\n data=long_df, x=\"x\", hue=\"a\", multiple=multiple, fill=None\n )\n\n assert len(ax.collections) > 0\n\n @pytest.mark.parametrize(\"multiple\", [\"layer\", \"stack\", \"fill\"])\n def test_fill_nondefault(self, long_df, multiple):\n\n f, (ax1, ax2) = plt.subplots(ncols=2)\n\n kws = dict(data=long_df, x=\"x\", hue=\"a\")\n kdeplot(**kws, multiple=multiple, fill=False, ax=ax1)\n kdeplot(**kws, multiple=multiple, fill=True, ax=ax2)\n\n assert len(ax1.collections) == 0\n assert len(ax2.collections) > 0\n\n def test_color_cycle_interaction(self, flat_series):\n\n color = (.2, 1, .6)\n\n f, ax = plt.subplots()\n kdeplot(flat_series)\n kdeplot(flat_series)\n assert_colors_equal(ax.lines[0].get_color(), \"C0\")\n assert_colors_equal(ax.lines[1].get_color(), \"C1\")\n plt.close(f)\n\n f, ax = plt.subplots()\n kdeplot(flat_series, color=color)\n kdeplot(flat_series)\n assert_colors_equal(ax.lines[0].get_color(), color)\n assert_colors_equal(ax.lines[1].get_color(), \"C0\")\n plt.close(f)\n\n f, ax = plt.subplots()\n kdeplot(flat_series, fill=True)\n kdeplot(flat_series, fill=True)\n assert_colors_equal(ax.collections[0].get_facecolor(), to_rgba(\"C0\", .25))\n assert_colors_equal(ax.collections[1].get_facecolor(), to_rgba(\"C1\", .25))\n plt.close(f)\n\n @pytest.mark.parametrize(\"fill\", [True, False])\n def test_artist_color(self, long_df, fill):\n\n color = (.2, 1, .6)\n alpha = .5\n\n f, ax = plt.subplots()\n\n kdeplot(long_df[\"x\"], fill=fill, color=color)\n if fill:\n artist_color = ax.collections[-1].get_facecolor().squeeze()\n else:\n artist_color = ax.lines[-1].get_color()\n default_alpha = .25 if fill else 1\n assert_colors_equal(artist_color, to_rgba(color, default_alpha))\n\n kdeplot(long_df[\"x\"], fill=fill, color=color, alpha=alpha)\n if fill:\n artist_color = ax.collections[-1].get_facecolor().squeeze()\n else:\n artist_color = ax.lines[-1].get_color()\n assert_colors_equal(artist_color, to_rgba(color, alpha))\n\n def test_datetime_scale(self, long_df):\n\n f, (ax1, ax2) = plt.subplots(2)\n kdeplot(x=long_df[\"t\"], fill=True, ax=ax1)\n kdeplot(x=long_df[\"t\"], fill=False, ax=ax2)\n assert ax1.get_xlim() == ax2.get_xlim()\n\n def test_multiple_argument_check(self, long_df):\n\n with pytest.raises(ValueError, match=\"`multiple` must be\"):\n kdeplot(data=long_df, x=\"x\", hue=\"a\", multiple=\"bad_input\")\n\n def test_cut(self, rng):\n\n x = rng.normal(0, 3, 1000)\n\n f, ax = plt.subplots()\n kdeplot(x=x, cut=0, legend=False)\n\n xdata_0 = ax.lines[0].get_xdata()\n assert xdata_0.min() == x.min()\n assert xdata_0.max() == x.max()\n\n kdeplot(x=x, cut=2, legend=False)\n\n xdata_2 = ax.lines[1].get_xdata()\n assert xdata_2.min() < xdata_0.min()\n assert xdata_2.max() > xdata_0.max()\n\n assert len(xdata_0) == len(xdata_2)\n\n def test_clip(self, rng):\n\n x = rng.normal(0, 3, 1000)\n\n clip = -1, 1\n ax = kdeplot(x=x, clip=clip)\n\n xdata = ax.lines[0].get_xdata()\n\n assert xdata.min() >= clip[0]\n assert xdata.max() <= clip[1]\n\n def test_line_is_density(self, long_df):\n\n ax = kdeplot(data=long_df, x=\"x\", cut=5)\n x, y = ax.lines[0].get_xydata().T\n assert integrate(y, x) == pytest.approx(1)\n\n @pytest.mark.skipif(_no_scipy, reason=\"Test requires scipy\")\n def test_cumulative(self, long_df):\n\n ax = kdeplot(data=long_df, x=\"x\", cut=5, cumulative=True)\n y = ax.lines[0].get_ydata()\n assert y[0] == pytest.approx(0)\n assert y[-1] == pytest.approx(1)\n\n @pytest.mark.skipif(not _no_scipy, reason=\"Test requires scipy's absence\")\n def test_cumulative_requires_scipy(self, long_df):\n\n with pytest.raises(RuntimeError):\n kdeplot(data=long_df, x=\"x\", cut=5, cumulative=True)\n\n def test_common_norm(self, long_df):\n\n f, (ax1, ax2) = plt.subplots(ncols=2)\n\n kdeplot(\n data=long_df, x=\"x\", hue=\"c\", common_norm=True, cut=10, ax=ax1\n )\n kdeplot(\n data=long_df, x=\"x\", hue=\"c\", common_norm=False, cut=10, ax=ax2\n )\n\n total_area = 0\n for line in ax1.lines:\n xdata, ydata = line.get_xydata().T\n total_area += integrate(ydata, xdata)\n assert total_area == pytest.approx(1)\n\n for line in ax2.lines:\n xdata, ydata = line.get_xydata().T\n assert integrate(ydata, xdata) == pytest.approx(1)\n\n def test_common_grid(self, long_df):\n\n f, (ax1, ax2) = plt.subplots(ncols=2)\n\n order = \"a\", \"b\", \"c\"\n\n kdeplot(\n data=long_df, x=\"x\", hue=\"a\", hue_order=order,\n common_grid=False, cut=0, ax=ax1,\n )\n kdeplot(\n data=long_df, x=\"x\", hue=\"a\", hue_order=order,\n common_grid=True, cut=0, ax=ax2,\n )\n\n for line, level in zip(ax1.lines[::-1], order):\n xdata = line.get_xdata()\n assert xdata.min() == long_df.loc[long_df[\"a\"] == level, \"x\"].min()\n assert xdata.max() == long_df.loc[long_df[\"a\"] == level, \"x\"].max()\n\n for line in ax2.lines:\n xdata = line.get_xdata().T\n assert xdata.min() == long_df[\"x\"].min()\n assert xdata.max() == long_df[\"x\"].max()\n\n def test_bw_method(self, long_df):\n\n f, ax = plt.subplots()\n kdeplot(data=long_df, x=\"x\", bw_method=0.2, legend=False)\n kdeplot(data=long_df, x=\"x\", bw_method=1.0, legend=False)\n kdeplot(data=long_df, x=\"x\", bw_method=3.0, legend=False)\n\n l1, l2, l3 = ax.lines\n\n assert (\n np.abs(np.diff(l1.get_ydata())).mean()\n > np.abs(np.diff(l2.get_ydata())).mean()\n )\n\n assert (\n np.abs(np.diff(l2.get_ydata())).mean()\n > np.abs(np.diff(l3.get_ydata())).mean()\n )\n\n def test_bw_adjust(self, long_df):\n\n f, ax = plt.subplots()\n kdeplot(data=long_df, x=\"x\", bw_adjust=0.2, legend=False)\n kdeplot(data=long_df, x=\"x\", bw_adjust=1.0, legend=False)\n kdeplot(data=long_df, x=\"x\", bw_adjust=3.0, legend=False)\n\n l1, l2, l3 = ax.lines\n\n assert (\n np.abs(np.diff(l1.get_ydata())).mean()\n > np.abs(np.diff(l2.get_ydata())).mean()\n )\n\n assert (\n np.abs(np.diff(l2.get_ydata())).mean()\n > np.abs(np.diff(l3.get_ydata())).mean()\n )\n\n def test_log_scale_implicit(self, rng):\n\n x = rng.lognormal(0, 1, 100)\n\n f, (ax1, ax2) = plt.subplots(ncols=2)\n ax1.set_xscale(\"log\")\n\n kdeplot(x=x, ax=ax1)\n kdeplot(x=x, ax=ax1)\n\n xdata_log = ax1.lines[0].get_xdata()\n assert (xdata_log > 0).all()\n assert (np.diff(xdata_log, 2) > 0).all()\n assert np.allclose(np.diff(np.log(xdata_log), 2), 0)\n\n f, ax = plt.subplots()\n ax.set_yscale(\"log\")\n kdeplot(y=x, ax=ax)\n assert_array_equal(ax.lines[0].get_xdata(), ax1.lines[0].get_ydata())\n\n def test_log_scale_explicit(self, rng):\n\n x = rng.lognormal(0, 1, 100)\n\n f, (ax1, ax2, ax3) = plt.subplots(ncols=3)\n\n ax1.set_xscale(\"log\")\n kdeplot(x=x, ax=ax1)\n kdeplot(x=x, log_scale=True, ax=ax2)\n kdeplot(x=x, log_scale=10, ax=ax3)\n\n for ax in f.axes:\n assert ax.get_xscale() == \"log\"\n\n supports = [ax.lines[0].get_xdata() for ax in f.axes]\n for a, b in itertools.product(supports, supports):\n assert_array_equal(a, b)\n\n densities = [ax.lines[0].get_ydata() for ax in f.axes]\n for a, b in itertools.product(densities, densities):\n assert_array_equal(a, b)\n\n f, ax = plt.subplots()\n kdeplot(y=x, log_scale=True, ax=ax)\n assert ax.get_yscale() == \"log\"\n\n def test_log_scale_with_hue(self, rng):\n\n data = rng.lognormal(0, 1, 50), rng.lognormal(0, 2, 100)\n ax = kdeplot(data=data, log_scale=True, common_grid=True)\n assert_array_equal(ax.lines[0].get_xdata(), ax.lines[1].get_xdata())\n\n def test_log_scale_normalization(self, rng):\n\n x = rng.lognormal(0, 1, 100)\n ax = kdeplot(x=x, log_scale=True, cut=10)\n xdata, ydata = ax.lines[0].get_xydata().T\n integral = integrate(ydata, np.log10(xdata))\n assert integral == pytest.approx(1)\n\n def test_weights(self):\n\n x = [1, 2]\n weights = [2, 1]\n\n ax = kdeplot(x=x, weights=weights, bw_method=.1)\n\n xdata, ydata = ax.lines[0].get_xydata().T\n\n y1 = ydata[np.abs(xdata - 1).argmin()]\n y2 = ydata[np.abs(xdata - 2).argmin()]\n\n assert y1 == pytest.approx(2 * y2)\n\n def test_sticky_edges(self, long_df):\n\n f, (ax1, ax2) = plt.subplots(ncols=2)\n\n kdeplot(data=long_df, x=\"x\", fill=True, ax=ax1)\n assert ax1.collections[0].sticky_edges.y[:] == [0, np.inf]\n\n kdeplot(\n data=long_df, x=\"x\", hue=\"a\", multiple=\"fill\", fill=True, ax=ax2\n )\n assert ax2.collections[0].sticky_edges.y[:] == [0, 1]\n\n def test_line_kws(self, flat_array):\n\n lw = 3\n color = (.2, .5, .8)\n ax = kdeplot(x=flat_array, linewidth=lw, color=color)\n line, = ax.lines\n assert line.get_linewidth() == lw\n assert_colors_equal(line.get_color(), color)\n\n def test_input_checking(self, long_df):\n\n err = \"The x variable is categorical,\"\n with pytest.raises(TypeError, match=err):\n kdeplot(data=long_df, x=\"a\")\n\n def test_axis_labels(self, long_df):\n\n f, (ax1, ax2) = plt.subplots(ncols=2)\n\n kdeplot(data=long_df, x=\"x\", ax=ax1)\n assert ax1.get_xlabel() == \"x\"\n assert ax1.get_ylabel() == \"Density\"\n\n kdeplot(data=long_df, y=\"y\", ax=ax2)\n assert ax2.get_xlabel() == \"Density\"\n assert ax2.get_ylabel() == \"y\"\n\n def test_legend(self, long_df):\n\n ax = kdeplot(data=long_df, x=\"x\", hue=\"a\")\n\n assert ax.legend_.get_title().get_text() == \"a\"\n\n legend_labels = ax.legend_.get_texts()\n order = categorical_order(long_df[\"a\"])\n for label, level in zip(legend_labels, order):\n assert label.get_text() == level\n\n legend_artists = ax.legend_.findobj(mpl.lines.Line2D)[::2]\n palette = color_palette()\n for artist, color in zip(legend_artists, palette):\n assert_colors_equal(artist.get_color(), color)\n\n ax.clear()\n\n kdeplot(data=long_df, x=\"x\", hue=\"a\", legend=False)\n\n assert ax.legend_ is None\n\n\nclass TestKDEPlotBivariate:\n\n def test_long_vectors(self, long_df):\n\n ax1 = kdeplot(data=long_df, x=\"x\", y=\"y\")\n\n x = long_df[\"x\"]\n x_values = [x, x.to_numpy(), x.to_list()]\n\n y = long_df[\"y\"]\n y_values = [y, y.to_numpy(), y.to_list()]\n\n for x, y in zip(x_values, y_values):\n f, ax2 = plt.subplots()\n kdeplot(x=x, y=y, ax=ax2)\n\n for c1, c2 in zip(ax1.collections, ax2.collections):\n assert_array_equal(c1.get_offsets(), c2.get_offsets())\n\n def test_singular_data(self):\n\n with pytest.warns(UserWarning):\n ax = dist.kdeplot(x=np.ones(10), y=np.arange(10))\n assert not ax.lines\n\n with pytest.warns(UserWarning):\n ax = dist.kdeplot(x=[5], y=[6])\n assert not ax.lines\n\n def test_fill_artists(self, long_df):\n\n for fill in [True, False]:\n f, ax = plt.subplots()\n kdeplot(data=long_df, x=\"x\", y=\"y\", hue=\"c\", fill=fill)\n for c in ax.collections:\n if fill:\n assert isinstance(c, mpl.collections.PathCollection)\n else:\n assert isinstance(c, mpl.collections.LineCollection)\n\n def test_common_norm(self, rng):\n\n hue = np.repeat([\"a\", \"a\", \"a\", \"b\"], 40)\n x, y = rng.multivariate_normal([0, 0], [(.2, .5), (.5, 2)], len(hue)).T\n x[hue == \"a\"] -= 2\n x[hue == \"b\"] += 2\n\n f, (ax1, ax2) = plt.subplots(ncols=2)\n kdeplot(x=x, y=y, hue=hue, common_norm=True, ax=ax1)\n kdeplot(x=x, y=y, hue=hue, common_norm=False, ax=ax2)\n\n n_seg_1 = sum([len(c.get_segments()) > 0 for c in ax1.collections])\n n_seg_2 = sum([len(c.get_segments()) > 0 for c in ax2.collections])\n assert n_seg_2 > n_seg_1\n\n def test_log_scale(self, rng):\n\n x = rng.lognormal(0, 1, 100)\n y = rng.uniform(0, 1, 100)\n\n levels = .2, .5, 1\n\n f, ax = plt.subplots()\n kdeplot(x=x, y=y, log_scale=True, levels=levels, ax=ax)\n assert ax.get_xscale() == \"log\"\n assert ax.get_yscale() == \"log\"\n\n f, (ax1, ax2) = plt.subplots(ncols=2)\n kdeplot(x=x, y=y, log_scale=(10, False), levels=levels, ax=ax1)\n assert ax1.get_xscale() == \"log\"\n assert ax1.get_yscale() == \"linear\"\n\n p = _DistributionPlotter()\n kde = KDE()\n density, (xx, yy) = kde(np.log10(x), y)\n levels = p._quantile_to_level(density, levels)\n ax2.contour(10 ** xx, yy, density, levels=levels)\n\n for c1, c2 in zip(ax1.collections, ax2.collections):\n assert_array_equal(c1.get_segments(), c2.get_segments())\n\n def test_bandwiddth(self, rng):\n\n n = 100\n x, y = rng.multivariate_normal([0, 0], [(.2, .5), (.5, 2)], n).T\n\n f, (ax1, ax2) = plt.subplots(ncols=2)\n\n kdeplot(x=x, y=y, ax=ax1)\n kdeplot(x=x, y=y, bw_adjust=2, ax=ax2)\n\n for c1, c2 in zip(ax1.collections, ax2.collections):\n seg1, seg2 = c1.get_segments(), c2.get_segments()\n if seg1 + seg2:\n x1 = seg1[0][:, 0]\n x2 = seg2[0][:, 0]\n assert np.abs(x2).max() > np.abs(x1).max()\n\n def test_weights(self, rng):\n\n import warnings\n warnings.simplefilter(\"error\", np.VisibleDeprecationWarning)\n\n n = 100\n x, y = rng.multivariate_normal([1, 3], [(.2, .5), (.5, 2)], n).T\n hue = np.repeat([0, 1], n // 2)\n weights = rng.uniform(0, 1, n)\n\n f, (ax1, ax2) = plt.subplots(ncols=2)\n kdeplot(x=x, y=y, hue=hue, ax=ax1)\n kdeplot(x=x, y=y, hue=hue, weights=weights, ax=ax2)\n\n for c1, c2 in zip(ax1.collections, ax2.collections):\n if c1.get_segments() and c2.get_segments():\n seg1 = np.concatenate(c1.get_segments(), axis=0)\n seg2 = np.concatenate(c2.get_segments(), axis=0)\n assert not np.array_equal(seg1, seg2)\n\n def test_hue_ignores_cmap(self, long_df):\n\n with pytest.warns(UserWarning, match=\"cmap parameter ignored\"):\n ax = kdeplot(data=long_df, x=\"x\", y=\"y\", hue=\"c\", cmap=\"viridis\")\n\n assert_colors_equal(ax.collections[0].get_color(), \"C0\")\n\n def test_contour_line_colors(self, long_df):\n\n color = (.2, .9, .8, 1)\n ax = kdeplot(data=long_df, x=\"x\", y=\"y\", color=color)\n\n for c in ax.collections:\n assert_colors_equal(c.get_color(), color)\n\n def test_contour_fill_colors(self, long_df):\n\n n = 6\n color = (.2, .9, .8, 1)\n ax = kdeplot(\n data=long_df, x=\"x\", y=\"y\", fill=True, color=color, levels=n,\n )\n\n cmap = light_palette(color, reverse=True, as_cmap=True)\n lut = cmap(np.linspace(0, 1, 256))\n for c in ax.collections:\n color = c.get_facecolor().squeeze()\n assert color in lut\n\n def test_colorbar(self, long_df):\n\n ax = kdeplot(data=long_df, x=\"x\", y=\"y\", fill=True, cbar=True)\n assert len(ax.figure.axes) == 2\n\n def test_levels_and_thresh(self, long_df):\n\n f, (ax1, ax2) = plt.subplots(ncols=2)\n\n n = 8\n thresh = .1\n plot_kws = dict(data=long_df, x=\"x\", y=\"y\")\n kdeplot(**plot_kws, levels=n, thresh=thresh, ax=ax1)\n kdeplot(**plot_kws, levels=np.linspace(thresh, 1, n), ax=ax2)\n\n for c1, c2 in zip(ax1.collections, ax2.collections):\n assert_array_equal(c1.get_segments(), c2.get_segments())\n\n with pytest.raises(ValueError):\n kdeplot(**plot_kws, levels=[0, 1, 2])\n\n ax1.clear()\n ax2.clear()\n\n kdeplot(**plot_kws, levels=n, thresh=None, ax=ax1)\n kdeplot(**plot_kws, levels=n, thresh=0, ax=ax2)\n\n for c1, c2 in zip(ax1.collections, ax2.collections):\n assert_array_equal(c1.get_segments(), c2.get_segments())\n for c1, c2 in zip(ax1.collections, ax2.collections):\n assert_array_equal(c1.get_facecolors(), c2.get_facecolors())\n\n def test_quantile_to_level(self, rng):\n\n x = rng.uniform(0, 1, 100000)\n isoprop = np.linspace(.1, 1, 6)\n\n levels = _DistributionPlotter()._quantile_to_level(x, isoprop)\n for h, p in zip(levels, isoprop):\n assert (x[x <= h].sum() / x.sum()) == pytest.approx(p, abs=1e-4)\n\n def test_input_checking(self, long_df):\n\n with pytest.raises(TypeError, match=\"The x variable is categorical,\"):\n kdeplot(data=long_df, x=\"a\", y=\"y\")\n\n\nclass TestHistPlotUnivariate(SharedAxesLevelTests):\n\n func = staticmethod(histplot)\n\n def get_last_color(self, ax, element=\"bars\", fill=True):\n\n if element == \"bars\":\n if fill:\n return ax.patches[-1].get_facecolor()\n else:\n return ax.patches[-1].get_edgecolor()\n else:\n if fill:\n artist = ax.collections[-1]\n facecolor = artist.get_facecolor()\n edgecolor = artist.get_edgecolor()\n assert_colors_equal(facecolor, edgecolor, check_alpha=False)\n return facecolor\n else:\n return ax.lines[-1].get_color()\n\n @pytest.mark.parametrize(\n \"element,fill\",\n itertools.product([\"bars\", \"step\", \"poly\"], [True, False]),\n )\n def test_color(self, long_df, element, fill):\n\n super().test_color(long_df, element=element, fill=fill)\n\n @pytest.mark.parametrize(\n \"variable\", [\"x\", \"y\"],\n )\n def test_long_vectors(self, long_df, variable):\n\n vector = long_df[variable]\n vectors = [\n variable, vector, vector.to_numpy(), vector.to_list(),\n ]\n\n f, axs = plt.subplots(3)\n for vector, ax in zip(vectors, axs):\n histplot(data=long_df, ax=ax, **{variable: vector})\n\n bars = [ax.patches for ax in axs]\n for a_bars, b_bars in itertools.product(bars, bars):\n for a, b in zip(a_bars, b_bars):\n assert_array_equal(a.get_height(), b.get_height())\n assert_array_equal(a.get_xy(), b.get_xy())\n\n def test_wide_vs_long_data(self, wide_df):\n\n f, (ax1, ax2) = plt.subplots(2)\n\n histplot(data=wide_df, ax=ax1, common_bins=False)\n\n for col in wide_df.columns[::-1]:\n histplot(data=wide_df, x=col, ax=ax2)\n\n for a, b in zip(ax1.patches, ax2.patches):\n assert a.get_height() == b.get_height()\n assert a.get_xy() == b.get_xy()\n\n def test_flat_vector(self, long_df):\n\n f, (ax1, ax2) = plt.subplots(2)\n\n histplot(data=long_df[\"x\"], ax=ax1)\n histplot(data=long_df, x=\"x\", ax=ax2)\n\n for a, b in zip(ax1.patches, ax2.patches):\n assert a.get_height() == b.get_height()\n assert a.get_xy() == b.get_xy()\n\n def test_empty_data(self):\n\n ax = histplot(x=[])\n assert not ax.patches\n\n def test_variable_assignment(self, long_df):\n\n f, (ax1, ax2) = plt.subplots(2)\n\n histplot(data=long_df, x=\"x\", ax=ax1)\n histplot(data=long_df, y=\"x\", ax=ax2)\n\n for a, b in zip(ax1.patches, ax2.patches):\n assert a.get_height() == b.get_width()\n\n @pytest.mark.parametrize(\"element\", [\"bars\", \"step\", \"poly\"])\n @pytest.mark.parametrize(\"multiple\", [\"layer\", \"dodge\", \"stack\", \"fill\"])\n def test_hue_fill_colors(self, long_df, multiple, element):\n\n ax = histplot(\n data=long_df, x=\"x\", hue=\"a\",\n multiple=multiple, bins=1,\n fill=True, element=element, legend=False,\n )\n\n palette = color_palette()\n\n if multiple == \"layer\":\n if element == \"bars\":\n a = .5\n else:\n a = .25\n else:\n a = .75\n\n for bar, color in zip(ax.patches[::-1], palette):\n assert_colors_equal(bar.get_facecolor(), to_rgba(color, a))\n\n for poly, color in zip(ax.collections[::-1], palette):\n assert_colors_equal(poly.get_facecolor(), to_rgba(color, a))\n\n def test_hue_stack(self, long_df):\n\n f, (ax1, ax2) = plt.subplots(2)\n\n n = 10\n\n kws = dict(data=long_df, x=\"x\", hue=\"a\", bins=n, element=\"bars\")\n\n histplot(**kws, multiple=\"layer\", ax=ax1)\n histplot(**kws, multiple=\"stack\", ax=ax2)\n\n layer_heights = np.reshape([b.get_height() for b in ax1.patches], (-1, n))\n stack_heights = np.reshape([b.get_height() for b in ax2.patches], (-1, n))\n assert_array_equal(layer_heights, stack_heights)\n\n stack_xys = np.reshape([b.get_xy() for b in ax2.patches], (-1, n, 2))\n assert_array_equal(\n stack_xys[..., 1] + stack_heights,\n stack_heights.cumsum(axis=0),\n )\n\n def test_hue_fill(self, long_df):\n\n f, (ax1, ax2) = plt.subplots(2)\n\n n = 10\n\n kws = dict(data=long_df, x=\"x\", hue=\"a\", bins=n, element=\"bars\")\n\n histplot(**kws, multiple=\"layer\", ax=ax1)\n histplot(**kws, multiple=\"fill\", ax=ax2)\n\n layer_heights = np.reshape([b.get_height() for b in ax1.patches], (-1, n))\n stack_heights = np.reshape([b.get_height() for b in ax2.patches], (-1, n))\n assert_array_almost_equal(\n layer_heights / layer_heights.sum(axis=0), stack_heights\n )\n\n stack_xys = np.reshape([b.get_xy() for b in ax2.patches], (-1, n, 2))\n assert_array_almost_equal(\n (stack_xys[..., 1] + stack_heights) / stack_heights.sum(axis=0),\n stack_heights.cumsum(axis=0),\n )\n\n def test_hue_dodge(self, long_df):\n\n f, (ax1, ax2) = plt.subplots(2)\n\n bw = 2\n\n kws = dict(data=long_df, x=\"x\", hue=\"c\", binwidth=bw, element=\"bars\")\n\n histplot(**kws, multiple=\"layer\", ax=ax1)\n histplot(**kws, multiple=\"dodge\", ax=ax2)\n\n layer_heights = [b.get_height() for b in ax1.patches]\n dodge_heights = [b.get_height() for b in ax2.patches]\n assert_array_equal(layer_heights, dodge_heights)\n\n layer_xs = np.reshape([b.get_x() for b in ax1.patches], (2, -1))\n dodge_xs = np.reshape([b.get_x() for b in ax2.patches], (2, -1))\n assert_array_almost_equal(layer_xs[1], dodge_xs[1])\n assert_array_almost_equal(layer_xs[0], dodge_xs[0] - bw / 2)\n\n def test_hue_as_numpy_dodged(self, long_df):\n # https://github.com/mwaskom/seaborn/issues/2452\n\n ax = histplot(\n long_df,\n x=\"y\", hue=long_df[\"a\"].to_numpy(),\n multiple=\"dodge\", bins=1,\n )\n # Note hue order reversal\n assert ax.patches[1].get_x() < ax.patches[0].get_x()\n\n def test_multiple_input_check(self, flat_series):\n\n with pytest.raises(ValueError, match=\"`multiple` must be\"):\n histplot(flat_series, multiple=\"invalid\")\n\n def test_element_input_check(self, flat_series):\n\n with pytest.raises(ValueError, match=\"`element` must be\"):\n histplot(flat_series, element=\"invalid\")\n\n def test_count_stat(self, flat_series):\n\n ax = histplot(flat_series, stat=\"count\")\n bar_heights = [b.get_height() for b in ax.patches]\n assert sum(bar_heights) == len(flat_series)\n\n def test_density_stat(self, flat_series):\n\n ax = histplot(flat_series, stat=\"density\")\n bar_heights = [b.get_height() for b in ax.patches]\n bar_widths = [b.get_width() for b in ax.patches]\n assert np.multiply(bar_heights, bar_widths).sum() == pytest.approx(1)\n\n def test_density_stat_common_norm(self, long_df):\n\n ax = histplot(\n data=long_df, x=\"x\", hue=\"a\",\n stat=\"density\", common_norm=True, element=\"bars\",\n )\n bar_heights = [b.get_height() for b in ax.patches]\n bar_widths = [b.get_width() for b in ax.patches]\n assert np.multiply(bar_heights, bar_widths).sum() == pytest.approx(1)\n\n def test_density_stat_unique_norm(self, long_df):\n\n n = 10\n ax = histplot(\n data=long_df, x=\"x\", hue=\"a\",\n stat=\"density\", bins=n, common_norm=False, element=\"bars\",\n )\n\n bar_groups = ax.patches[:n], ax.patches[-n:]\n\n for bars in bar_groups:\n bar_heights = [b.get_height() for b in bars]\n bar_widths = [b.get_width() for b in bars]\n bar_areas = np.multiply(bar_heights, bar_widths)\n assert bar_areas.sum() == pytest.approx(1)\n\n def test_probability_stat(self, flat_series):\n\n ax = histplot(flat_series, stat=\"probability\")\n bar_heights = [b.get_height() for b in ax.patches]\n assert sum(bar_heights) == pytest.approx(1)\n\n def test_probability_stat_common_norm(self, long_df):\n\n ax = histplot(\n data=long_df, x=\"x\", hue=\"a\",\n stat=\"probability\", common_norm=True, element=\"bars\",\n )\n bar_heights = [b.get_height() for b in ax.patches]\n assert sum(bar_heights) == pytest.approx(1)\n\n def test_probability_stat_unique_norm(self, long_df):\n\n n = 10\n ax = histplot(\n data=long_df, x=\"x\", hue=\"a\",\n stat=\"probability\", bins=n, common_norm=False, element=\"bars\",\n )\n\n bar_groups = ax.patches[:n], ax.patches[-n:]\n\n for bars in bar_groups:\n bar_heights = [b.get_height() for b in bars]\n assert sum(bar_heights) == pytest.approx(1)\n\n def test_percent_stat(self, flat_series):\n\n ax = histplot(flat_series, stat=\"percent\")\n bar_heights = [b.get_height() for b in ax.patches]\n assert sum(bar_heights) == 100\n\n def test_common_bins(self, long_df):\n\n n = 10\n ax = histplot(\n long_df, x=\"x\", hue=\"a\", common_bins=True, bins=n, element=\"bars\",\n )\n\n bar_groups = ax.patches[:n], ax.patches[-n:]\n assert_array_equal(\n [b.get_xy() for b in bar_groups[0]],\n [b.get_xy() for b in bar_groups[1]]\n )\n\n def test_unique_bins(self, wide_df):\n\n ax = histplot(wide_df, common_bins=False, bins=10, element=\"bars\")\n\n bar_groups = np.split(np.array(ax.patches), len(wide_df.columns))\n\n for i, col in enumerate(wide_df.columns[::-1]):\n bars = bar_groups[i]\n start = bars[0].get_x()\n stop = bars[-1].get_x() + bars[-1].get_width()\n assert start == wide_df[col].min()\n assert stop == wide_df[col].max()\n\n def test_weights_with_missing(self, missing_df):\n\n ax = histplot(missing_df, x=\"x\", weights=\"s\", bins=5)\n\n bar_heights = [bar.get_height() for bar in ax.patches]\n total_weight = missing_df[[\"x\", \"s\"]].dropna()[\"s\"].sum()\n assert sum(bar_heights) == pytest.approx(total_weight)\n\n def test_discrete(self, long_df):\n\n ax = histplot(long_df, x=\"s\", discrete=True)\n\n data_min = long_df[\"s\"].min()\n data_max = long_df[\"s\"].max()\n assert len(ax.patches) == (data_max - data_min + 1)\n\n for i, bar in enumerate(ax.patches):\n assert bar.get_width() == 1\n assert bar.get_x() == (data_min + i - .5)\n\n def test_discrete_categorical_default(self, long_df):\n\n ax = histplot(long_df, x=\"a\")\n for i, bar in enumerate(ax.patches):\n assert bar.get_width() == 1\n\n def test_categorical_yaxis_inversion(self, long_df):\n\n ax = histplot(long_df, y=\"a\")\n ymax, ymin = ax.get_ylim()\n assert ymax > ymin\n\n def test_discrete_requires_bars(self, long_df):\n\n with pytest.raises(ValueError, match=\"`element` must be 'bars'\"):\n histplot(long_df, x=\"s\", discrete=True, element=\"poly\")\n\n @pytest.mark.skipif(\n LooseVersion(np.__version__) < \"1.17\",\n reason=\"Histogram over datetime64 requires numpy >= 1.17\",\n )\n def test_datetime_scale(self, long_df):\n\n f, (ax1, ax2) = plt.subplots(2)\n histplot(x=long_df[\"t\"], fill=True, ax=ax1)\n histplot(x=long_df[\"t\"], fill=False, ax=ax2)\n assert ax1.get_xlim() == ax2.get_xlim()\n\n @pytest.mark.parametrize(\"stat\", [\"count\", \"density\", \"probability\"])\n def test_kde(self, flat_series, stat):\n\n ax = histplot(\n flat_series, kde=True, stat=stat, kde_kws={\"cut\": 10}\n )\n\n bar_widths = [b.get_width() for b in ax.patches]\n bar_heights = [b.get_height() for b in ax.patches]\n hist_area = np.multiply(bar_widths, bar_heights).sum()\n\n density, = ax.lines\n kde_area = integrate(density.get_ydata(), density.get_xdata())\n\n assert kde_area == pytest.approx(hist_area)\n\n @pytest.mark.parametrize(\"multiple\", [\"layer\", \"dodge\"])\n @pytest.mark.parametrize(\"stat\", [\"count\", \"density\", \"probability\"])\n def test_kde_with_hue(self, long_df, stat, multiple):\n\n n = 10\n ax = histplot(\n long_df, x=\"x\", hue=\"c\", multiple=multiple,\n kde=True, stat=stat, element=\"bars\",\n kde_kws={\"cut\": 10}, bins=n,\n )\n\n bar_groups = ax.patches[:n], ax.patches[-n:]\n\n for i, bars in enumerate(bar_groups):\n bar_widths = [b.get_width() for b in bars]\n bar_heights = [b.get_height() for b in bars]\n hist_area = np.multiply(bar_widths, bar_heights).sum()\n\n x, y = ax.lines[i].get_xydata().T\n kde_area = integrate(y, x)\n\n if multiple == \"layer\":\n assert kde_area == pytest.approx(hist_area)\n elif multiple == \"dodge\":\n assert kde_area == pytest.approx(hist_area * 2)\n\n def test_kde_default_cut(self, flat_series):\n\n ax = histplot(flat_series, kde=True)\n support = ax.lines[0].get_xdata()\n assert support.min() == flat_series.min()\n assert support.max() == flat_series.max()\n\n def test_kde_hue(self, long_df):\n\n n = 10\n ax = histplot(data=long_df, x=\"x\", hue=\"a\", kde=True, bins=n)\n\n for bar, line in zip(ax.patches[::n], ax.lines):\n assert_colors_equal(\n bar.get_facecolor(), line.get_color(), check_alpha=False\n )\n\n def test_kde_yaxis(self, flat_series):\n\n f, ax = plt.subplots()\n histplot(x=flat_series, kde=True)\n histplot(y=flat_series, kde=True)\n\n x, y = ax.lines\n assert_array_equal(x.get_xdata(), y.get_ydata())\n assert_array_equal(x.get_ydata(), y.get_xdata())\n\n def test_kde_line_kws(self, flat_series):\n\n lw = 5\n ax = histplot(flat_series, kde=True, line_kws=dict(lw=lw))\n assert ax.lines[0].get_linewidth() == lw\n\n def test_kde_singular_data(self):\n\n with pytest.warns(UserWarning):\n ax = histplot(x=np.ones(10), kde=True)\n assert not ax.lines\n\n with pytest.warns(UserWarning):\n ax = histplot(x=[5], kde=True)\n assert not ax.lines\n\n def test_element_default(self, long_df):\n\n f, (ax1, ax2) = plt.subplots(2)\n histplot(long_df, x=\"x\", ax=ax1)\n histplot(long_df, x=\"x\", ax=ax2, element=\"bars\")\n assert len(ax1.patches) == len(ax2.patches)\n\n f, (ax1, ax2) = plt.subplots(2)\n histplot(long_df, x=\"x\", hue=\"a\", ax=ax1)\n histplot(long_df, x=\"x\", hue=\"a\", ax=ax2, element=\"bars\")\n assert len(ax1.patches) == len(ax2.patches)\n\n def test_bars_no_fill(self, flat_series):\n\n alpha = .5\n ax = histplot(flat_series, element=\"bars\", fill=False, alpha=alpha)\n for bar in ax.patches:\n assert bar.get_facecolor() == (0, 0, 0, 0)\n assert bar.get_edgecolor()[-1] == alpha\n\n def test_step_fill(self, flat_series):\n\n f, (ax1, ax2) = plt.subplots(2)\n\n n = 10\n histplot(flat_series, element=\"bars\", fill=True, bins=n, ax=ax1)\n histplot(flat_series, element=\"step\", fill=True, bins=n, ax=ax2)\n\n bar_heights = [b.get_height() for b in ax1.patches]\n bar_widths = [b.get_width() for b in ax1.patches]\n bar_edges = [b.get_x() for b in ax1.patches]\n\n fill = ax2.collections[0]\n x, y = fill.get_paths()[0].vertices[::-1].T\n\n assert_array_equal(x[1:2 * n:2], bar_edges)\n assert_array_equal(y[1:2 * n:2], bar_heights)\n\n assert x[n * 2] == bar_edges[-1] + bar_widths[-1]\n assert y[n * 2] == bar_heights[-1]\n\n def test_poly_fill(self, flat_series):\n\n f, (ax1, ax2) = plt.subplots(2)\n\n n = 10\n histplot(flat_series, element=\"bars\", fill=True, bins=n, ax=ax1)\n histplot(flat_series, element=\"poly\", fill=True, bins=n, ax=ax2)\n\n bar_heights = np.array([b.get_height() for b in ax1.patches])\n bar_widths = np.array([b.get_width() for b in ax1.patches])\n bar_edges = np.array([b.get_x() for b in ax1.patches])\n\n fill = ax2.collections[0]\n x, y = fill.get_paths()[0].vertices[::-1].T\n\n assert_array_equal(x[1:n + 1], bar_edges + bar_widths / 2)\n assert_array_equal(y[1:n + 1], bar_heights)\n\n def test_poly_no_fill(self, flat_series):\n\n f, (ax1, ax2) = plt.subplots(2)\n\n n = 10\n histplot(flat_series, element=\"bars\", fill=False, bins=n, ax=ax1)\n histplot(flat_series, element=\"poly\", fill=False, bins=n, ax=ax2)\n\n bar_heights = np.array([b.get_height() for b in ax1.patches])\n bar_widths = np.array([b.get_width() for b in ax1.patches])\n bar_edges = np.array([b.get_x() for b in ax1.patches])\n\n x, y = ax2.lines[0].get_xydata().T\n\n assert_array_equal(x, bar_edges + bar_widths / 2)\n assert_array_equal(y, bar_heights)\n\n def test_step_no_fill(self, flat_series):\n\n f, (ax1, ax2) = plt.subplots(2)\n\n histplot(flat_series, element=\"bars\", fill=False, ax=ax1)\n histplot(flat_series, element=\"step\", fill=False, ax=ax2)\n\n bar_heights = [b.get_height() for b in ax1.patches]\n bar_widths = [b.get_width() for b in ax1.patches]\n bar_edges = [b.get_x() for b in ax1.patches]\n\n x, y = ax2.lines[0].get_xydata().T\n\n assert_array_equal(x[:-1], bar_edges)\n assert_array_equal(y[:-1], bar_heights)\n assert x[-1] == bar_edges[-1] + bar_widths[-1]\n assert y[-1] == y[-2]\n\n def test_step_fill_xy(self, flat_series):\n\n f, ax = plt.subplots()\n\n histplot(x=flat_series, element=\"step\", fill=True)\n histplot(y=flat_series, element=\"step\", fill=True)\n\n xverts = ax.collections[0].get_paths()[0].vertices\n yverts = ax.collections[1].get_paths()[0].vertices\n\n assert_array_equal(xverts, yverts[:, ::-1])\n\n def test_step_no_fill_xy(self, flat_series):\n\n f, ax = plt.subplots()\n\n histplot(x=flat_series, element=\"step\", fill=False)\n histplot(y=flat_series, element=\"step\", fill=False)\n\n xline, yline = ax.lines\n\n assert_array_equal(xline.get_xdata(), yline.get_ydata())\n assert_array_equal(xline.get_ydata(), yline.get_xdata())\n\n def test_weighted_histogram(self):\n\n ax = histplot(x=[0, 1, 2], weights=[1, 2, 3], discrete=True)\n\n bar_heights = [b.get_height() for b in ax.patches]\n assert bar_heights == [1, 2, 3]\n\n def test_weights_with_auto_bins(self, long_df):\n\n with pytest.warns(UserWarning):\n ax = histplot(long_df, x=\"x\", weights=\"f\")\n assert len(ax.patches) == 10\n\n def test_shrink(self, long_df):\n\n bw = 2\n shrink = .5\n ax = histplot(long_df, x=\"x\", binwidth=bw, shrink=shrink)\n assert ax.patches[0].get_width() == bw * shrink\n\n def test_log_scale_explicit(self, rng):\n\n x = rng.lognormal(0, 2, 1000)\n ax = histplot(x, log_scale=True, binwidth=1)\n\n bar_widths = [b.get_width() for b in ax.patches]\n steps = np.divide(bar_widths[1:], bar_widths[:-1])\n assert np.allclose(steps, 10)\n\n def test_log_scale_implicit(self, rng):\n\n x = rng.lognormal(0, 2, 1000)\n\n f, ax = plt.subplots()\n ax.set_xscale(\"log\")\n histplot(x, binwidth=1, ax=ax)\n\n bar_widths = [b.get_width() for b in ax.patches]\n steps = np.divide(bar_widths[1:], bar_widths[:-1])\n assert np.allclose(steps, 10)\n\n @pytest.mark.parametrize(\n \"fill\", [True, False],\n )\n def test_auto_linewidth(self, flat_series, fill):\n\n get_lw = lambda ax: ax.patches[0].get_linewidth() # noqa: E731\n\n kws = dict(element=\"bars\", fill=fill)\n\n f, (ax1, ax2) = plt.subplots(2)\n histplot(flat_series, **kws, bins=10, ax=ax1)\n histplot(flat_series, **kws, bins=100, ax=ax2)\n assert get_lw(ax1) > get_lw(ax2)\n\n f, ax1 = plt.subplots(figsize=(10, 5))\n f, ax2 = plt.subplots(figsize=(2, 5))\n histplot(flat_series, **kws, bins=30, ax=ax1)\n histplot(flat_series, **kws, bins=30, ax=ax2)\n assert get_lw(ax1) > get_lw(ax2)\n\n def test_bar_kwargs(self, flat_series):\n\n lw = 2\n ec = (1, .2, .9, .5)\n ax = histplot(flat_series, binwidth=1, ec=ec, lw=lw)\n for bar in ax.patches:\n assert_colors_equal(bar.get_edgecolor(), ec)\n assert bar.get_linewidth() == lw\n\n def test_step_fill_kwargs(self, flat_series):\n\n lw = 2\n ec = (1, .2, .9, .5)\n ax = histplot(flat_series, element=\"step\", ec=ec, lw=lw)\n poly = ax.collections[0]\n assert_colors_equal(poly.get_edgecolor(), ec)\n assert poly.get_linewidth() == lw\n\n def test_step_line_kwargs(self, flat_series):\n\n lw = 2\n ls = \"--\"\n ax = histplot(flat_series, element=\"step\", fill=False, lw=lw, ls=ls)\n line = ax.lines[0]\n assert line.get_linewidth() == lw\n assert line.get_linestyle() == ls\n\n\nclass TestHistPlotBivariate:\n\n def test_mesh(self, long_df):\n\n hist = Histogram()\n counts, (x_edges, y_edges) = hist(long_df[\"x\"], long_df[\"y\"])\n\n ax = histplot(long_df, x=\"x\", y=\"y\")\n mesh = ax.collections[0]\n mesh_data = mesh.get_array()\n\n assert_array_equal(mesh_data.data, counts.T.flat)\n assert_array_equal(mesh_data.mask, counts.T.flat == 0)\n\n edges = itertools.product(y_edges[:-1], x_edges[:-1])\n for i, (y, x) in enumerate(edges):\n path = mesh.get_paths()[i]\n assert path.vertices[0, 0] == x\n assert path.vertices[0, 1] == y\n\n def test_mesh_with_hue(self, long_df):\n\n ax = histplot(long_df, x=\"x\", y=\"y\", hue=\"c\")\n\n hist = Histogram()\n hist.define_bin_edges(long_df[\"x\"], long_df[\"y\"])\n\n for i, sub_df in long_df.groupby(\"c\"):\n\n mesh = ax.collections[i]\n mesh_data = mesh.get_array()\n\n counts, (x_edges, y_edges) = hist(sub_df[\"x\"], sub_df[\"y\"])\n\n assert_array_equal(mesh_data.data, counts.T.flat)\n assert_array_equal(mesh_data.mask, counts.T.flat == 0)\n\n edges = itertools.product(y_edges[:-1], x_edges[:-1])\n for i, (y, x) in enumerate(edges):\n path = mesh.get_paths()[i]\n assert path.vertices[0, 0] == x\n assert path.vertices[0, 1] == y\n\n def test_mesh_with_hue_unique_bins(self, long_df):\n\n ax = histplot(long_df, x=\"x\", y=\"y\", hue=\"c\", common_bins=False)\n\n for i, sub_df in long_df.groupby(\"c\"):\n\n hist = Histogram()\n\n mesh = ax.collections[i]\n mesh_data = mesh.get_array()\n\n counts, (x_edges, y_edges) = hist(sub_df[\"x\"], sub_df[\"y\"])\n\n assert_array_equal(mesh_data.data, counts.T.flat)\n assert_array_equal(mesh_data.mask, counts.T.flat == 0)\n\n edges = itertools.product(y_edges[:-1], x_edges[:-1])\n for i, (y, x) in enumerate(edges):\n path = mesh.get_paths()[i]\n assert path.vertices[0, 0] == x\n assert path.vertices[0, 1] == y\n\n def test_mesh_log_scale(self, rng):\n\n x, y = rng.lognormal(0, 1, (2, 1000))\n hist = Histogram()\n counts, (x_edges, y_edges) = hist(np.log10(x), np.log10(y))\n\n ax = histplot(x=x, y=y, log_scale=True)\n mesh = ax.collections[0]\n mesh_data = mesh.get_array()\n\n assert_array_equal(mesh_data.data, counts.T.flat)\n\n edges = itertools.product(y_edges[:-1], x_edges[:-1])\n for i, (y_i, x_i) in enumerate(edges):\n path = mesh.get_paths()[i]\n assert path.vertices[0, 0] == 10 ** x_i\n assert path.vertices[0, 1] == 10 ** y_i\n\n def test_mesh_thresh(self, long_df):\n\n hist = Histogram()\n counts, (x_edges, y_edges) = hist(long_df[\"x\"], long_df[\"y\"])\n\n thresh = 5\n ax = histplot(long_df, x=\"x\", y=\"y\", thresh=thresh)\n mesh = ax.collections[0]\n mesh_data = mesh.get_array()\n\n assert_array_equal(mesh_data.data, counts.T.flat)\n assert_array_equal(mesh_data.mask, (counts <= thresh).T.flat)\n\n def test_mesh_sticky_edges(self, long_df):\n\n ax = histplot(long_df, x=\"x\", y=\"y\", thresh=None)\n mesh = ax.collections[0]\n assert mesh.sticky_edges.x == [long_df[\"x\"].min(), long_df[\"x\"].max()]\n assert mesh.sticky_edges.y == [long_df[\"y\"].min(), long_df[\"y\"].max()]\n\n ax.clear()\n ax = histplot(long_df, x=\"x\", y=\"y\")\n mesh = ax.collections[0]\n assert not mesh.sticky_edges.x\n assert not mesh.sticky_edges.y\n\n def test_mesh_common_norm(self, long_df):\n\n stat = \"density\"\n ax = histplot(\n long_df, x=\"x\", y=\"y\", hue=\"c\", common_norm=True, stat=stat,\n )\n\n hist = Histogram(stat=\"density\")\n hist.define_bin_edges(long_df[\"x\"], long_df[\"y\"])\n\n for i, sub_df in long_df.groupby(\"c\"):\n\n mesh = ax.collections[i]\n mesh_data = mesh.get_array()\n\n density, (x_edges, y_edges) = hist(sub_df[\"x\"], sub_df[\"y\"])\n\n scale = len(sub_df) / len(long_df)\n assert_array_equal(mesh_data.data, (density * scale).T.flat)\n\n def test_mesh_unique_norm(self, long_df):\n\n stat = \"density\"\n ax = histplot(\n long_df, x=\"x\", y=\"y\", hue=\"c\", common_norm=False, stat=stat,\n )\n\n hist = Histogram()\n hist.define_bin_edges(long_df[\"x\"], long_df[\"y\"])\n\n for i, sub_df in long_df.groupby(\"c\"):\n\n sub_hist = Histogram(bins=hist.bin_edges, stat=stat)\n\n mesh = ax.collections[i]\n mesh_data = mesh.get_array()\n\n density, (x_edges, y_edges) = sub_hist(sub_df[\"x\"], sub_df[\"y\"])\n assert_array_equal(mesh_data.data, density.T.flat)\n\n @pytest.mark.parametrize(\"stat\", [\"probability\", \"percent\"])\n def test_mesh_normalization(self, long_df, stat):\n\n ax = histplot(\n long_df, x=\"x\", y=\"y\", stat=stat,\n )\n\n mesh_data = ax.collections[0].get_array()\n expected_sum = {\"probability\": 1, \"percent\": 100}[stat]\n assert mesh_data.data.sum() == expected_sum\n\n def test_mesh_colors(self, long_df):\n\n color = \"r\"\n f, ax = plt.subplots()\n histplot(\n long_df, x=\"x\", y=\"y\", color=color,\n )\n mesh = ax.collections[0]\n assert_array_equal(\n mesh.get_cmap().colors,\n _DistributionPlotter()._cmap_from_color(color).colors,\n )\n\n f, ax = plt.subplots()\n histplot(\n long_df, x=\"x\", y=\"y\", hue=\"c\",\n )\n colors = color_palette()\n for i, mesh in enumerate(ax.collections):\n assert_array_equal(\n mesh.get_cmap().colors,\n _DistributionPlotter()._cmap_from_color(colors[i]).colors,\n )\n\n def test_color_limits(self, long_df):\n\n f, (ax1, ax2, ax3) = plt.subplots(3)\n kws = dict(data=long_df, x=\"x\", y=\"y\")\n hist = Histogram()\n counts, _ = hist(long_df[\"x\"], long_df[\"y\"])\n\n histplot(**kws, ax=ax1)\n assert ax1.collections[0].get_clim() == (0, counts.max())\n\n vmax = 10\n histplot(**kws, vmax=vmax, ax=ax2)\n counts, _ = hist(long_df[\"x\"], long_df[\"y\"])\n assert ax2.collections[0].get_clim() == (0, vmax)\n\n pmax = .8\n pthresh = .1\n f = _DistributionPlotter()._quantile_to_level\n\n histplot(**kws, pmax=pmax, pthresh=pthresh, ax=ax3)\n counts, _ = hist(long_df[\"x\"], long_df[\"y\"])\n mesh = ax3.collections[0]\n assert mesh.get_clim() == (0, f(counts, pmax))\n assert_array_equal(\n mesh.get_array().mask,\n (counts <= f(counts, pthresh)).T.flat,\n )\n\n def test_hue_color_limits(self, long_df):\n\n _, (ax1, ax2, ax3, ax4) = plt.subplots(4)\n kws = dict(data=long_df, x=\"x\", y=\"y\", hue=\"c\", bins=4)\n\n hist = Histogram(bins=kws[\"bins\"])\n hist.define_bin_edges(long_df[\"x\"], long_df[\"y\"])\n full_counts, _ = hist(long_df[\"x\"], long_df[\"y\"])\n\n sub_counts = []\n for _, sub_df in long_df.groupby(kws[\"hue\"]):\n c, _ = hist(sub_df[\"x\"], sub_df[\"y\"])\n sub_counts.append(c)\n\n pmax = .8\n pthresh = .05\n f = _DistributionPlotter()._quantile_to_level\n\n histplot(**kws, common_norm=True, ax=ax1)\n for i, mesh in enumerate(ax1.collections):\n assert mesh.get_clim() == (0, full_counts.max())\n\n histplot(**kws, common_norm=False, ax=ax2)\n for i, mesh in enumerate(ax2.collections):\n assert mesh.get_clim() == (0, sub_counts[i].max())\n\n histplot(**kws, common_norm=True, pmax=pmax, pthresh=pthresh, ax=ax3)\n for i, mesh in enumerate(ax3.collections):\n assert mesh.get_clim() == (0, f(full_counts, pmax))\n assert_array_equal(\n mesh.get_array().mask,\n (sub_counts[i] <= f(full_counts, pthresh)).T.flat,\n )\n\n histplot(**kws, common_norm=False, pmax=pmax, pthresh=pthresh, ax=ax4)\n for i, mesh in enumerate(ax4.collections):\n assert mesh.get_clim() == (0, f(sub_counts[i], pmax))\n assert_array_equal(\n mesh.get_array().mask,\n (sub_counts[i] <= f(sub_counts[i], pthresh)).T.flat,\n )\n\n def test_colorbar(self, long_df):\n\n f, ax = plt.subplots()\n histplot(long_df, x=\"x\", y=\"y\", cbar=True, ax=ax)\n assert len(ax.figure.axes) == 2\n\n f, (ax, cax) = plt.subplots(2)\n histplot(long_df, x=\"x\", y=\"y\", cbar=True, cbar_ax=cax, ax=ax)\n assert len(ax.figure.axes) == 2\n\n\nclass TestECDFPlotUnivariate(SharedAxesLevelTests):\n\n func = staticmethod(ecdfplot)\n\n def get_last_color(self, ax):\n\n return to_rgb(ax.lines[-1].get_color())\n\n @pytest.mark.parametrize(\"variable\", [\"x\", \"y\"])\n def test_long_vectors(self, long_df, variable):\n\n vector = long_df[variable]\n vectors = [\n variable, vector, vector.to_numpy(), vector.to_list(),\n ]\n\n f, ax = plt.subplots()\n for vector in vectors:\n ecdfplot(data=long_df, ax=ax, **{variable: vector})\n\n xdata = [l.get_xdata() for l in ax.lines]\n for a, b in itertools.product(xdata, xdata):\n assert_array_equal(a, b)\n\n ydata = [l.get_ydata() for l in ax.lines]\n for a, b in itertools.product(ydata, ydata):\n assert_array_equal(a, b)\n\n def test_hue(self, long_df):\n\n ax = ecdfplot(long_df, x=\"x\", hue=\"a\")\n\n for line, color in zip(ax.lines[::-1], color_palette()):\n assert_colors_equal(line.get_color(), color)\n\n def test_line_kwargs(self, long_df):\n\n color = \"r\"\n ls = \"--\"\n lw = 3\n ax = ecdfplot(long_df, x=\"x\", color=color, ls=ls, lw=lw)\n\n for line in ax.lines:\n assert_colors_equal(line.get_color(), color)\n assert line.get_linestyle() == ls\n assert line.get_linewidth() == lw\n\n @pytest.mark.parametrize(\"data_var\", [\"x\", \"y\"])\n def test_drawstyle(self, flat_series, data_var):\n\n ax = ecdfplot(**{data_var: flat_series})\n drawstyles = dict(x=\"steps-post\", y=\"steps-pre\")\n assert ax.lines[0].get_drawstyle() == drawstyles[data_var]\n\n @pytest.mark.parametrize(\n \"data_var,stat_var\", [[\"x\", \"y\"], [\"y\", \"x\"]],\n )\n def test_proportion_limits(self, flat_series, data_var, stat_var):\n\n ax = ecdfplot(**{data_var: flat_series})\n data = getattr(ax.lines[0], f\"get_{stat_var}data\")()\n assert data[0] == 0\n assert data[-1] == 1\n sticky_edges = getattr(ax.lines[0].sticky_edges, stat_var)\n assert sticky_edges[:] == [0, 1]\n\n @pytest.mark.parametrize(\n \"data_var,stat_var\", [[\"x\", \"y\"], [\"y\", \"x\"]],\n )\n def test_proportion_limits_complementary(self, flat_series, data_var, stat_var):\n\n ax = ecdfplot(**{data_var: flat_series}, complementary=True)\n data = getattr(ax.lines[0], f\"get_{stat_var}data\")()\n assert data[0] == 1\n assert data[-1] == 0\n sticky_edges = getattr(ax.lines[0].sticky_edges, stat_var)\n assert sticky_edges[:] == [0, 1]\n\n @pytest.mark.parametrize(\n \"data_var,stat_var\", [[\"x\", \"y\"], [\"y\", \"x\"]],\n )\n def test_proportion_count(self, flat_series, data_var, stat_var):\n\n n = len(flat_series)\n ax = ecdfplot(**{data_var: flat_series}, stat=\"count\")\n data = getattr(ax.lines[0], f\"get_{stat_var}data\")()\n assert data[0] == 0\n assert data[-1] == n\n sticky_edges = getattr(ax.lines[0].sticky_edges, stat_var)\n assert sticky_edges[:] == [0, n]\n\n def test_weights(self):\n\n ax = ecdfplot(x=[1, 2, 3], weights=[1, 1, 2])\n y = ax.lines[0].get_ydata()\n assert_array_equal(y, [0, .25, .5, 1])\n\n def test_bivariate_error(self, long_df):\n\n with pytest.raises(NotImplementedError, match=\"Bivariate ECDF plots\"):\n ecdfplot(data=long_df, x=\"x\", y=\"y\")\n\n\nclass TestDisPlot:\n\n # TODO probably good to move these utility attributes/methods somewhere else\n @pytest.mark.parametrize(\n \"kwargs\", [\n dict(),\n dict(x=\"x\"),\n dict(x=\"t\"),\n dict(x=\"a\"),\n dict(x=\"z\", log_scale=True),\n dict(x=\"x\", binwidth=4),\n dict(x=\"x\", weights=\"f\", bins=5),\n dict(x=\"x\", color=\"green\", linewidth=2, binwidth=4),\n dict(x=\"x\", hue=\"a\", fill=False),\n dict(x=\"y\", hue=\"a\", fill=False),\n dict(x=\"x\", hue=\"a\", multiple=\"stack\"),\n dict(x=\"x\", hue=\"a\", element=\"step\"),\n dict(x=\"x\", hue=\"a\", palette=\"muted\"),\n dict(x=\"x\", hue=\"a\", kde=True),\n dict(x=\"x\", hue=\"a\", stat=\"density\", common_norm=False),\n dict(x=\"x\", y=\"y\"),\n ],\n )\n def test_versus_single_histplot(self, long_df, kwargs):\n\n ax = histplot(long_df, **kwargs)\n g = displot(long_df, **kwargs)\n assert_plots_equal(ax, g.ax)\n\n if ax.legend_ is not None:\n assert_legends_equal(ax.legend_, g._legend)\n\n if kwargs:\n long_df[\"_\"] = \"_\"\n g2 = displot(long_df, col=\"_\", **kwargs)\n assert_plots_equal(ax, g2.ax)\n\n @pytest.mark.parametrize(\n \"kwargs\", [\n dict(),\n dict(x=\"x\"),\n dict(x=\"t\"),\n dict(x=\"z\", log_scale=True),\n dict(x=\"x\", bw_adjust=.5),\n dict(x=\"x\", weights=\"f\"),\n dict(x=\"x\", color=\"green\", linewidth=2),\n dict(x=\"x\", hue=\"a\", multiple=\"stack\"),\n dict(x=\"x\", hue=\"a\", fill=True),\n dict(x=\"y\", hue=\"a\", fill=False),\n dict(x=\"x\", hue=\"a\", palette=\"muted\"),\n dict(x=\"x\", y=\"y\"),\n ],\n )\n def test_versus_single_kdeplot(self, long_df, kwargs):\n\n ax = kdeplot(data=long_df, **kwargs)\n g = displot(long_df, kind=\"kde\", **kwargs)\n assert_plots_equal(ax, g.ax)\n\n if ax.legend_ is not None:\n assert_legends_equal(ax.legend_, g._legend)\n\n if kwargs:\n long_df[\"_\"] = \"_\"\n g2 = displot(long_df, kind=\"kde\", col=\"_\", **kwargs)\n assert_plots_equal(ax, g2.ax)\n\n @pytest.mark.parametrize(\n \"kwargs\", [\n dict(),\n dict(x=\"x\"),\n dict(x=\"t\"),\n dict(x=\"z\", log_scale=True),\n dict(x=\"x\", weights=\"f\"),\n dict(y=\"x\"),\n dict(x=\"x\", color=\"green\", linewidth=2),\n dict(x=\"x\", hue=\"a\", complementary=True),\n dict(x=\"x\", hue=\"a\", stat=\"count\"),\n dict(x=\"x\", hue=\"a\", palette=\"muted\"),\n ],\n )\n def test_versus_single_ecdfplot(self, long_df, kwargs):\n\n ax = ecdfplot(data=long_df, **kwargs)\n g = displot(long_df, kind=\"ecdf\", **kwargs)\n assert_plots_equal(ax, g.ax)\n\n if ax.legend_ is not None:\n assert_legends_equal(ax.legend_, g._legend)\n\n if kwargs:\n long_df[\"_\"] = \"_\"\n g2 = displot(long_df, kind=\"ecdf\", col=\"_\", **kwargs)\n assert_plots_equal(ax, g2.ax)\n\n @pytest.mark.parametrize(\n \"kwargs\", [\n dict(x=\"x\"),\n dict(x=\"x\", y=\"y\"),\n dict(x=\"x\", hue=\"a\"),\n ]\n )\n def test_with_rug(self, long_df, kwargs):\n\n ax = plt.figure().subplots()\n histplot(data=long_df, **kwargs, ax=ax)\n rugplot(data=long_df, **kwargs, ax=ax)\n\n g = displot(long_df, rug=True, **kwargs)\n\n assert_plots_equal(ax, g.ax, labels=False)\n\n long_df[\"_\"] = \"_\"\n g2 = displot(long_df, col=\"_\", rug=True, **kwargs)\n\n assert_plots_equal(ax, g2.ax, labels=False)\n\n @pytest.mark.parametrize(\n \"facet_var\", [\"col\", \"row\"],\n )\n def test_facets(self, long_df, facet_var):\n\n kwargs = {facet_var: \"a\"}\n ax = kdeplot(data=long_df, x=\"x\", hue=\"a\")\n g = displot(long_df, x=\"x\", kind=\"kde\", **kwargs)\n\n legend_texts = ax.legend_.get_texts()\n\n for i, line in enumerate(ax.lines[::-1]):\n facet_ax = g.axes.flat[i]\n facet_line = facet_ax.lines[0]\n assert_array_equal(line.get_xydata(), facet_line.get_xydata())\n\n text = legend_texts[i].get_text()\n assert text in facet_ax.get_title()\n\n @pytest.mark.parametrize(\"multiple\", [\"dodge\", \"stack\", \"fill\"])\n def test_facet_multiple(self, long_df, multiple):\n\n bins = np.linspace(0, 20, 5)\n ax = histplot(\n data=long_df[long_df[\"c\"] == 0],\n x=\"x\", hue=\"a\", hue_order=[\"a\", \"b\", \"c\"],\n multiple=multiple, bins=bins,\n )\n\n g = displot(\n data=long_df, x=\"x\", hue=\"a\", col=\"c\", hue_order=[\"a\", \"b\", \"c\"],\n multiple=multiple, bins=bins,\n )\n\n assert_plots_equal(ax, g.axes_dict[0])\n\n def test_ax_warning(self, long_df):\n\n ax = plt.figure().subplots()\n with pytest.warns(UserWarning, match=\"`displot` is a figure-level\"):\n displot(long_df, x=\"x\", ax=ax)\n\n @pytest.mark.parametrize(\"key\", [\"col\", \"row\"])\n def test_array_faceting(self, long_df, key):\n\n a = long_df[\"a\"].to_numpy()\n vals = categorical_order(a)\n g = displot(long_df, x=\"x\", **{key: a})\n assert len(g.axes.flat) == len(vals)\n for ax, val in zip(g.axes.flat, vals):\n assert val in ax.get_title()\n\n def test_legend(self, long_df):\n\n g = displot(long_df, x=\"x\", hue=\"a\")\n assert g._legend is not None\n\n def test_empty(self):\n\n g = displot(x=[], y=[])\n assert isinstance(g, FacetGrid)\n\n def test_bivariate_ecdf_error(self, long_df):\n\n with pytest.raises(NotImplementedError):\n displot(long_df, x=\"x\", y=\"y\", kind=\"ecdf\")\n\n def test_bivariate_kde_norm(self, rng):\n\n x, y = rng.normal(0, 1, (2, 100))\n z = [0] * 80 + [1] * 20\n\n g = displot(x=x, y=y, col=z, kind=\"kde\", levels=10)\n l1 = sum(bool(c.get_segments()) for c in g.axes.flat[0].collections)\n l2 = sum(bool(c.get_segments()) for c in g.axes.flat[1].collections)\n assert l1 > l2\n\n g = displot(x=x, y=y, col=z, kind=\"kde\", levels=10, common_norm=False)\n l1 = sum(bool(c.get_segments()) for c in g.axes.flat[0].collections)\n l2 = sum(bool(c.get_segments()) for c in g.axes.flat[1].collections)\n assert l1 == l2\n\n def test_bivariate_hist_norm(self, rng):\n\n x, y = rng.normal(0, 1, (2, 100))\n z = [0] * 80 + [1] * 20\n\n g = displot(x=x, y=y, col=z, kind=\"hist\")\n clim1 = g.axes.flat[0].collections[0].get_clim()\n clim2 = g.axes.flat[1].collections[0].get_clim()\n assert clim1 == clim2\n\n g = displot(x=x, y=y, col=z, kind=\"hist\", common_norm=False)\n clim1 = g.axes.flat[0].collections[0].get_clim()\n clim2 = g.axes.flat[1].collections[0].get_clim()\n assert clim1[1] > clim2[1]\n\n\ndef integrate(y, x):\n \"\"\"\"Simple numerical integration for testing KDE code.\"\"\"\n y = np.asarray(y)\n x = np.asarray(x)\n dx = np.diff(x)\n return (dx * y[:-1] + dx * y[1:]).sum() / 2\n"
] | [
[
"numpy.ones",
"numpy.multiply",
"numpy.diff",
"numpy.histogram_bin_edges",
"numpy.asarray",
"numpy.random.RandomState",
"numpy.log",
"numpy.allclose",
"numpy.append",
"matplotlib.colors.to_rgba",
"matplotlib.pyplot.figure",
"numpy.abs",
"numpy.testing.assert_array_equal",
"numpy.log10",
"numpy.linspace",
"matplotlib.dates.date2num",
"numpy.zeros",
"numpy.repeat",
"matplotlib.pyplot.subplots",
"numpy.arange",
"numpy.testing.assert_array_almost_equal",
"matplotlib.pyplot.close",
"numpy.zeros_like",
"numpy.divide",
"numpy.array_equal",
"numpy.array",
"numpy.full"
]
] |
shermanlo77/cptimeseries | [
"2a847ac15f7ea4925896c2a7baec78e8717e63f4"
] | [
"script/prior/downscale/check_valid_gp_prior.py"
] | [
"import numpy as np\nfrom scipy import linalg\n\nimport compound_poisson\nimport dataset\n\ndef main():\n downscale = compound_poisson.Downscale(dataset.AnaDual10Training())\n\n precision_array = np.linspace(250, 300, 50)\n for precision in precision_array:\n cov_chol = downscale.square_error.copy()\n cov_chol *= -precision / 2\n cov_chol = np.exp(cov_chol)\n print(\"Precision: \" + str(precision))\n try:\n cov_chol = linalg.cholesky(cov_chol, True)\n print(\"Success\")\n except:\n print(\"Fail\")\n\n\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"scipy.linalg.cholesky",
"numpy.linspace",
"numpy.exp"
]
] |
thu-ml/realsafe | [
"474d549aa402b4cdd5e3629d23d035c31b60a360"
] | [
"pytorch_ares/third_party/fast_adversarial/ImageNet/main_fast.py"
] | [
"# This module is adapted from https://github.com/mahyarnajibi/FreeAdversarialTraining/blob/master/main_free.py\n# Which in turn was adapted from https://github.com/pytorch/examples/blob/master/imagenet/main.py\nimport init_paths\nimport argparse\nimport os\nimport time\nimport sys\nimport torch\nimport torch.nn as nn\nimport torch.backends.cudnn as cudnn\nimport torch.optim\nimport torchvision.transforms as transforms\nimport torchvision.datasets as datasets\nfrom torch.autograd import Variable\nimport math\nimport numpy as np\nfrom utils import *\nfrom validation import validate, validate_pgd\nimport torchvision.models as models\n\nfrom apex import amp\nimport copy\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')\n parser.add_argument('data', metavar='DIR',\n help='path to dataset')\n parser.add_argument('--output_prefix', default='fast_adv', type=str,\n help='prefix used to define output path')\n parser.add_argument('-c', '--config', default='configs.yml', type=str, metavar='Path',\n help='path to the config file (default: configs.yml)')\n parser.add_argument('--resume', default='', type=str, metavar='PATH',\n help='path to latest checkpoint (default: none)')\n parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',\n help='evaluate model on validation set')\n parser.add_argument('--pretrained', dest='pretrained', action='store_true',\n help='use pre-trained model')\n parser.add_argument('--restarts', default=1, type=int)\n return parser.parse_args()\n\n\n# Parase config file and initiate logging\nconfigs = parse_config_file(parse_args())\nlogger = initiate_logger(configs.output_name, configs.evaluate)\nprint = logger.info\ncudnn.benchmark = True\n\ndef main():\n # Scale and initialize the parameters\n best_prec1 = 0\n configs.TRAIN.epochs = int(math.ceil(configs.TRAIN.epochs / configs.ADV.n_repeats))\n configs.ADV.fgsm_step /= configs.DATA.max_color_value\n configs.ADV.clip_eps /= configs.DATA.max_color_value\n \n # Create output folder\n if not os.path.isdir(os.path.join('trained_models', configs.output_name)):\n os.makedirs(os.path.join('trained_models', configs.output_name))\n \n # Log the config details\n logger.info(pad_str(' ARGUMENTS '))\n for k, v in configs.items(): print('{}: {}'.format(k, v))\n logger.info(pad_str(''))\n\n \n # Create the model\n if configs.pretrained:\n print(\"=> using pre-trained model '{}'\".format(configs.TRAIN.arch))\n model = models.__dict__[configs.TRAIN.arch](pretrained=True)\n else:\n print(\"=> creating model '{}'\".format(configs.TRAIN.arch))\n model = models.__dict__[configs.TRAIN.arch]()\n # Wrap the model into DataParallel\n model.cuda()\n\n # reverse mapping\n param_to_moduleName = {}\n for m in model.modules():\n for p in m.parameters(recurse=False):\n param_to_moduleName[p] = str(type(m).__name__)\n\n # Criterion:\n criterion = nn.CrossEntropyLoss().cuda()\n \n group_decay = [p for p in model.parameters() if 'BatchNorm' not in param_to_moduleName[p]]\n group_no_decay = [p for p in model.parameters() if 'BatchNorm' in param_to_moduleName[p]]\n groups = [dict(params=group_decay), dict(params=group_no_decay, weight_decay=0)]\n optimizer = torch.optim.SGD(groups, configs.TRAIN.lr,\n momentum=configs.TRAIN.momentum,\n weight_decay=configs.TRAIN.weight_decay)\n\n if configs.TRAIN.half and not configs.evaluate:\n model, optimizer = amp.initialize(model, optimizer, opt_level=\"O1\")\n model = torch.nn.DataParallel(model)\n\n # Resume if a valid checkpoint path is provided\n if configs.resume:\n if os.path.isfile(configs.resume):\n print(\"=> loading checkpoint '{}'\".format(configs.resume))\n checkpoint = torch.load(configs.resume)\n configs.TRAIN.start_epoch = checkpoint['epoch']\n best_prec1 = checkpoint['best_prec1']\n model.load_state_dict(checkpoint['state_dict'])\n optimizer.load_state_dict(checkpoint['optimizer'])\n print(\"=> loaded checkpoint '{}' (epoch {})\"\n .format(configs.resume, checkpoint['epoch']))\n else:\n print(\"=> no checkpoint found at '{}'\".format(configs.resume))\n \n # Initiate data loaders\n traindir = os.path.join(configs.data, 'train')\n valdir = os.path.join(configs.data, 'val')\n \n resize_transform = []\n\n if configs.DATA.img_size > 0: \n resize_transform = [ transforms.Resize(configs.DATA.img_size) ] \n\n train_dataset = datasets.ImageFolder(\n traindir,\n transforms.Compose(resize_transform + [\n transforms.RandomResizedCrop(configs.DATA.crop_size),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n ]))\n\n train_loader = torch.utils.data.DataLoader(\n train_dataset, batch_size=configs.DATA.batch_size, shuffle=True,\n num_workers=configs.DATA.workers, pin_memory=True, sampler=None)\n \n normalize = transforms.Normalize(mean=configs.TRAIN.mean,\n std=configs.TRAIN.std)\n\n val_loader = torch.utils.data.DataLoader(\n datasets.ImageFolder(valdir, transforms.Compose( resize_transform + [\n transforms.CenterCrop(configs.DATA.crop_size),\n transforms.ToTensor(),\n ])),\n batch_size=configs.DATA.batch_size, shuffle=False,\n num_workers=configs.DATA.workers, pin_memory=True)\n\n # If in evaluate mode: perform validation on PGD attacks as well as clean samples\n if configs.evaluate:\n logger.info(pad_str(' Performing PGD Attacks '))\n for pgd_param in configs.ADV.pgd_attack:\n validate_pgd(val_loader, model, criterion, pgd_param[0], pgd_param[1], configs, logger)\n validate(val_loader, model, criterion, configs, logger)\n return\n \n lr_schedule = lambda t: np.interp([t], configs.TRAIN.lr_epochs, configs.TRAIN.lr_values)[0]\n \n for epoch in range(configs.TRAIN.start_epoch, configs.TRAIN.epochs):\n # train for one epoch\n train(train_loader, model, criterion, optimizer, epoch, lr_schedule, configs.TRAIN.half)\n\n # evaluate on validation set\n prec1 = validate(val_loader, model, criterion, configs, logger)\n\n # remember best prec@1 and save checkpoint\n is_best = prec1 > best_prec1\n best_prec1 = max(prec1, best_prec1)\n save_checkpoint({\n 'epoch': epoch + 1,\n 'arch': configs.TRAIN.arch,\n 'state_dict': model.state_dict(),\n 'best_prec1': best_prec1,\n 'optimizer' : optimizer.state_dict(),\n }, is_best, os.path.join('trained_models', f'{configs.output_name}'),\n epoch + 1)\n \n # Automatically perform PGD Attacks at the end of training\n # logger.info(pad_str(' Performing PGD Attacks '))\n # for pgd_param in configs.ADV.pgd_attack:\n # validate_pgd(val_loader, val_model, criterion, pgd_param[0], pgd_param[1], configs, logger)\n\n \n# Fast Adversarial Training Module \nglobal global_noise_data\nglobal_noise_data = torch.zeros([configs.DATA.batch_size, 3, configs.DATA.crop_size, configs.DATA.crop_size]).cuda()\ndef train(train_loader, model, criterion, optimizer, epoch, lr_schedule, half=False): \n global global_noise_data\n\n mean = torch.Tensor(np.array(configs.TRAIN.mean)[:, np.newaxis, np.newaxis])\n mean = mean.expand(3,configs.DATA.crop_size, configs.DATA.crop_size).cuda()\n std = torch.Tensor(np.array(configs.TRAIN.std)[:, np.newaxis, np.newaxis])\n std = std.expand(3, configs.DATA.crop_size, configs.DATA.crop_size).cuda()\n\n # Initialize the meters\n batch_time = AverageMeter()\n data_time = AverageMeter()\n losses = AverageMeter()\n top1 = AverageMeter()\n top5 = AverageMeter()\n # switch to train mode\n model.train()\n end = time.time()\n for i, (input, target) in enumerate(train_loader):\n input = input.cuda(non_blocking=True)\n target = target.cuda(non_blocking=True)\n data_time.update(time.time() - end)\n\n if configs.TRAIN.random_init: \n global_noise_data.uniform_(-configs.ADV.clip_eps, configs.ADV.clip_eps)\n for j in range(configs.ADV.n_repeats):\n # update learning rate\n lr = lr_schedule(epoch + (i*configs.ADV.n_repeats + j + 1)/len(train_loader))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n\n # Ascend on the global noise\n noise_batch = Variable(global_noise_data[0:input.size(0)], requires_grad=True)#.cuda()\n in1 = input + noise_batch\n in1.clamp_(0, 1.0)\n in1.sub_(mean).div_(std)\n output = model(in1)\n loss = criterion(output, target)\n if half: \n with amp.scale_loss(loss, optimizer) as scaled_loss: \n scaled_loss.backward()\n else:\n loss.backward()\n \n # Update the noise for the next iteration\n pert = fgsm(noise_batch.grad, configs.ADV.fgsm_step)\n global_noise_data[0:input.size(0)] += pert.data\n global_noise_data.clamp_(-configs.ADV.clip_eps, configs.ADV.clip_eps)\n\n # Descend on global noise\n noise_batch = Variable(global_noise_data[0:input.size(0)], requires_grad=False)#.cuda()\n in1 = input + noise_batch\n in1.clamp_(0, 1.0)\n in1.sub_(mean).div_(std)\n output = model(in1)\n loss = criterion(output, target)\n\n # compute gradient and do SGD step\n optimizer.zero_grad()\n if half: \n with amp.scale_loss(loss, optimizer) as scaled_loss: \n scaled_loss.backward()\n else: \n loss.backward()\n\n optimizer.step()\n\n prec1, prec5 = accuracy(output, target, topk=(1, 5))\n losses.update(loss.item(), input.size(0))\n top1.update(prec1[0], input.size(0))\n top5.update(prec5[0], input.size(0))\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n if i % configs.TRAIN.print_freq == 0:\n print('Train Epoch: [{0}][{1}/{2}]\\t'\n 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t'\n 'Data {data_time.val:.3f} ({data_time.avg:.3f})\\t'\n 'Loss {cls_loss.val:.4f} ({cls_loss.avg:.4f})\\t'\n 'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\\t'\n 'Prec@5 {top5.val:.3f} ({top5.avg:.3f})\\t'\n 'LR {lr:.3f}'.format(\n epoch, i, len(train_loader), batch_time=batch_time,\n data_time=data_time, top1=top1,\n top5=top5,cls_loss=losses, lr=lr))\n sys.stdout.flush()\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"torch.utils.data.DataLoader",
"torch.optim.SGD",
"torch.load",
"numpy.interp",
"torch.nn.CrossEntropyLoss",
"torch.zeros",
"torch.nn.DataParallel",
"numpy.array"
]
] |
KunalRRathod/Matplotlib_Basics | [
"ad895c97135b9cc4dedd41f1cce00072efb24c8e"
] | [
"Colors.py"
] | [
"import matplotlib.pyplot as plt; import importlib; importlib.reload(plt)\r\nplt.clf()\r\nimport numpy as np\r\nimport pandas as pd\r\n# Any Dataset\r\ndf = pd.read_csv('.csv', index_col = 0)\r\ngdp_cap = list(df.gdp_cap)\r\nlife_exp = list(df.life_exp)\r\npop = list(df['population']/1e6)\r\ncont = list(df.cont)\r\nlut = {\r\n 'Asia':'red',\r\n 'Europe':'green',\r\n 'Africa':'blue',\r\n 'Americas':'yellow',\r\n 'Oceania':'black'\r\n}\r\ncol = [lut[x] for x in cont]\r\n\r\n# Specify c and alpha inside plt.scatter()\r\nplt.scatter(x = gdp_cap, y = life_exp, s = np.array(pop) * 2, c = col, alpha = 0.8)\r\n\r\n# Previous customizations\r\nplt.xscale('log')\r\nplt.xlabel('GDP per Capita [in USD]')\r\nplt.ylabel('Life Expectancy [in years]')\r\nplt.title('World Development in 2007')\r\nplt.xticks([1000,10000,100000], ['1k','10k','100k'])\r\n\r\n# Show the plot\r\nplt.show()\r\n"
] | [
[
"matplotlib.pyplot.xticks",
"pandas.read_csv",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.title",
"matplotlib.pyplot.xscale",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.show",
"numpy.array",
"matplotlib.pyplot.xlabel"
]
] |
JCoetzee123/spira | [
"dae08feba1578ecc8745b45109f4fb7bef374546"
] | [
"spira/yevon/geometry/edges/edges.3.py"
] | [
"import gdspy\nimport numpy as np\n\nfrom copy import deepcopy\nfrom spira.core.transforms import *\nfrom spira.yevon import constants\nfrom spira.yevon.gdsii.elem_list import ElementList\nfrom spira.yevon.gdsii.group import Group\nfrom spira.yevon.gdsii.polygon import __ShapeElement__\nfrom spira.yevon.geometry import shapes\nfrom spira.yevon.geometry.shapes import ShapeParameter\nfrom spira.yevon.geometry.coord import Coord\nfrom spira.yevon.gdsii.base import __LayerElement__\nfrom spira.core.parameters.descriptor import Parameter\nfrom spira.yevon.process.process_layer import ProcessParameter\nfrom spira.core.parameters.variables import *\nfrom spira.core.transforms import Stretch\nfrom spira.yevon.process.physical_layer import PLayer\nfrom spira.yevon.process import get_rule_deck\n\n\n__all__ = [\n 'Edge',\n 'EdgeAdapter',\n 'generate_edges',\n]\n\n\nRDD = get_rule_deck()\n\n\nclass Edge(__ShapeElement__):\n \"\"\" Edge elements are object that represents the edge\n of a polygonal shape.\n\n Example\n -------\n >>> edge Edge()\n \"\"\"\n\n width = NumberParameter(default=1, doc='The width of the edge.')\n extend = NumberParameter(default=1, doc='The distance the edge extends from the shape.')\n pid = StringParameter(default='no_pid', doc='A unique polygon ID to which the edge connects.')\n edge_type = IntegerParameter(default=constants.EDGE_TYPE_NORMAL)\n\n def __init__(self, shape, layer, transformation=None, **kwargs):\n super().__init__(shape=shape, layer=layer, transformation=transformation, **kwargs)\n\n def __repr__(self):\n if self is None:\n return 'Edge is None!'\n layer = RDD.GDSII.IMPORT_LAYER_MAP[self.layer]\n class_string = \"[SPiRA: Edge \\'{}\\'] (center {}, width {}, extend {}, process {}, purpose {})\"\n return class_string.format(self.edge_type, self.center, self.width,\n self.extend, self.layer.process.symbol, self.layer.purpose.symbol)\n\n def __str__(self):\n return self.__repr__()\n\n\ndef EdgeAdapter(original_edge, edge_type, **kwargs):\n \"\"\" Adapter class to modify the edge shape. \"\"\"\n\n shape = original_edge.shape\n extend = original_edge.extend\n width = original_edge.width\n\n if edge_type == constants.EDGE_TYPE_INSIDE:\n shp = shape.move((0,extend/2.0))\n elif edge_type == constants.EDGE_TYPE_OUTSIDE:\n shp = shape.move((0,-extend/2.0))\n elif edge_type == constants.EDGE_TYPE_SQUARE:\n sf = 1 + 2*extend/width\n shp = Stretch(stretch_factor=(sf,1), stretch_center=shape.center_of_mass)(shape)\n elif edge_type == constants.EDGE_TYPE_SIDE_EXTEND:\n if 'side_extend' not in kwargs:\n raise ValueError('No `side_extend` parameter given.')\n side_extend = kwargs['side_extend']\n sf = 1 + 2*side_extend/width\n shp = Stretch(stretch_factor=(sf,1), stretch_center=shape.center_of_mass)(shape)\n elif edge_type == constants.EDGE_TYPE_EUCLIDEAN:\n pass\n elif edge_type == constants.EDGE_TYPE_NORMAL:\n pass\n\n original_edge = original_edge.copy(shape=shp, edge_type=edge_type)\n\n return original_edge\n\n\ndef EdgeSymmetric(width=1, extend=1, process=None, transformation=None):\n \"\"\" \"\"\"\n layer = PLayer(process=process, purpose=RDD.PURPOSE.PORT.OUTSIDE_EDGE_DISABLED)\n shape = shapes.BoxShape(width=width, height=2*extend)\n return Edge(shape=shape, layer=layer, width=width, extend=2*extend, transformation=transformation)\n\n\nclass EdgeGenerator(Group, __LayerElement__):\n \"\"\" Generates edge objects for each shape segment. \"\"\"\n\n shape = ShapeParameter()\n\n def create_elements(self, elems):\n\n xpts = list(self.shape.x_coords)\n ypts = list(self.shape.y_coords)\n\n n = len(xpts)\n xpts.append(xpts[0])\n ypts.append(ypts[0])\n\n clockwise = 0\n for i in range(0, n):\n clockwise += ((xpts[i+1] - xpts[i]) * (ypts[i+1] + ypts[i]))\n\n if self.layer.name == 'BBOX': bbox = True\n else: bbox = False\n\n for i in range(0, n):\n\n name = '{}_e{}'.format(self.layer.name, i)\n x = np.sign(clockwise) * (xpts[i+1] - xpts[i])\n y = np.sign(clockwise) * (ypts[i] - ypts[i+1])\n orientation = (np.arctan2(x, y) * constants.RAD2DEG) + 90\n midpoint = [(xpts[i+1] + xpts[i])/2, (ypts[i+1] + ypts[i])/2]\n width = np.abs(np.sqrt((xpts[i+1] - xpts[i])**2 + (ypts[i+1]-ypts[i])**2))\n\n layer = RDD.GDSII.IMPORT_LAYER_MAP[self.layer]\n extend = RDD[layer.process.symbol].MIN_SIZE\n\n T = Rotation(orientation) + Translation(midpoint)\n layer = PLayer(process=layer.process, purpose=RDD.PURPOSE.PORT.OUTSIDE_EDGE_DISABLED)\n shape = shapes.BoxShape(width=width, height=extend)\n # elems += EdgeSymmetric(width=width, extend=extend, process=layer.process, transformation=T)\n elems += Edge(shape=shape, layer=layer, width=width, extend=extend, transformation=T)\n\n return elems\n\n\ndef generate_edges(shape, layer):\n \"\"\" Method call for edge generator. \"\"\"\n edge_gen = EdgeGenerator(shape=shape, layer=layer)\n return edge_gen.elements\n\n\n# def shape_edge_ports(shape, layer, local_pid='None', center=(0,0), loc_name=''):\n\n# edges = PortList()\n\n# xpts = list(shape.x_coords)\n# ypts = list(shape.y_coords)\n\n# n = len(xpts)\n\n# xpts.append(xpts[0])\n# ypts.append(ypts[0])\n\n# clockwise = 0\n# for i in range(0, n):\n# clockwise += ((xpts[i+1] - xpts[i]) * (ypts[i+1] + ypts[i]))\n\n# if layer.name == 'BBOX': bbox = True\n# else: bbox = False\n\n# layer = RDD.GDSII.IMPORT_LAYER_MAP[layer]\n\n# for i in range(0, n):\n# # name = 'E{}_{}'.format(i, layer.process.symbol)\n# # name = 'E{}_{}_{}'.format(i, layer.process.symbol, shape.bbox_info.center)\n# name = '{}E{}_{}'.format(loc_name, i, layer.process.symbol)\n# x = np.sign(clockwise) * (xpts[i+1] - xpts[i])\n# y = np.sign(clockwise) * (ypts[i] - ypts[i+1])\n# orientation = (np.arctan2(x, y) * constants.RAD2DEG)\n# midpoint = [(xpts[i+1] + xpts[i])/2, (ypts[i+1] + ypts[i])/2]\n# width = np.abs(np.sqrt((xpts[i+1] - xpts[i])**2 + (ypts[i+1]-ypts[i])**2))\n# P = Port(\n# name=name,\n# process=layer.process,\n# purpose=RDD.PURPOSE.PORT.OUTSIDE_EDGE_DISABLED,\n# midpoint=midpoint,\n# orientation=orientation,\n# width=width,\n# length=0.2,\n# local_pid=local_pid\n# )\n# edges += P\n# return edges\n\n"
] | [
[
"numpy.sqrt",
"numpy.arctan2",
"numpy.sign"
]
] |
NUS-LID/SANE | [
"fc16d4fd19f60960b83812adb8240f8fbedd4ef0"
] | [
"common/exploration_scheduler.py"
] | [
"import numpy as np\n\nclass ExplorationExploitationScheduler(object):\n def __init__(self, DQN, n_actions, eps_initial=1, eps_final=0.1, eps_final_frame=0.01, \n eps_evaluation=0.0, eps_annealing_frames=1000000, \n replay_memory_start_size=50000, max_frames=25000000, cutoff_frame=50000):\n \"\"\"\n Args:\n DQN: A DQN object\n n_actions: Integer, number of possible actions\n eps_initial: Float, Exploration probability for the first \n replay_memory_start_size frames\n eps_final: Float, Exploration probability after \n replay_memory_start_size + eps_annealing_frames frames\n eps_final_frame: Float, Exploration probability after max_frames frames\n eps_evaluation: Float, Exploration probability during evaluation\n eps_annealing_frames: Int, Number of frames over which the \n exploration probabilty is annealed from eps_initial to eps_final\n replay_memory_start_size: Integer, Number of frames during \n which the agent only explores\n max_frames: Integer, Total number of frames shown to the agent\n cutoff_frame=50000: Integer, frame to cutoff noisy exploration\n \"\"\"\n self.n_actions = n_actions\n self.eps_initial = eps_initial\n self.eps_final = eps_final\n self.eps_final_frame = eps_final_frame\n self.eps_evaluation = eps_evaluation\n self.eps_annealing_frames = eps_annealing_frames\n self.replay_memory_start_size = replay_memory_start_size\n self.max_frames = max_frames\n self.cutoff_frame = cutoff_frame\n self.slope = -(self.eps_initial - self.eps_final)/self.eps_annealing_frames\n self.intercept = self.eps_initial - self.slope*self.replay_memory_start_size\n self.slope_2 = -(self.eps_final - self.eps_final_frame)/(self.max_frames - self.eps_annealing_frames - self.replay_memory_start_size)\n self.intercept_2 = self.eps_final_frame - self.slope_2*self.max_frames\n \n self.DQN = DQN\n\n def get_action(self, session, frame_number, state, evaluation=False, no_noise=False, other_args = {}):\n \"\"\"\n Args:\n session: A tensorflow session object\n frame_number: Integer, number of the current frame\n state: A (84, 84, 4) sequence of frames of an Atari game in grayscale\n evaluation: A boolean saying whether the agent is being evaluated\n Returns:\n An integer between 0 and n_actions - 1 determining the action the agent perfoms next\n \"\"\"\n if evaluation:\n self.eps = self.eps_evaluation\n elif frame_number < self.replay_memory_start_size:\n self.eps = self.eps_initial\n elif frame_number >= self.replay_memory_start_size and frame_number < self.replay_memory_start_size + self.eps_annealing_frames:\n self.eps = self.slope*frame_number + self.intercept\n elif frame_number >= self.replay_memory_start_size + self.eps_annealing_frames:\n self.eps = self.slope_2*frame_number + self.intercept_2\n \n if np.random.rand(1) < self.eps:\n return np.random.randint(0, self.n_actions), 0, 0\n if frame_number > self.cutoff_frame :\n cond_variable=False\n else :\n cond_variable = True\n\n if no_noise :\n cond_variable = False\n feed_dict = other_args\n feed_dict[self.DQN.input] = [state]\n feed_dict[self.DQN.cond_variable] = [cond_variable]\n\n [action, sigma, q_values] = session.run([self.DQN.best_action,self.DQN.common_variance, self.DQN.q_values] ,feed_dict=feed_dict)\n return action[0], np.abs(sigma[0][0]), q_values\n"
] | [
[
"numpy.random.randint",
"numpy.abs",
"numpy.random.rand"
]
] |
ericmjl/influenza-reassortment | [
"21b11507be0f72376237c4ad31129d0148a69456"
] | [
"source_pair_manual_sh.py"
] | [
"handle = '20141103\\ All\\ IRD'\n\ndef get_header(n_nodes):\n\theader = '\\\n#!/bin/sh \\n\\\n#$ -S /bin/sh \\n\\\n#$ -cwd \\n\\\n#$ -V\\n\\\n#$ -m e\\n\\\n#$ -M [email protected] \\n\\\n#$ -pe whole_nodes {0}\\n\\\n#$ -l mem_free=2G\\n\\\n#############################################\\n\\n'.format(n_nodes)\n\n\treturn header\n\nimport os\nimport pickle as pkl \nimport numpy as np\n\ndef check_dirs(dirname):\n\tif dirname not in os.listdir(os.getcwd()):\n\t\tos.mkdir(dirname)\n\telse:\n\t\tpass\n\n\ncheck_dirs('shell_scripts')\ncheck_dirs('reassortant_edges')\n\nwith open('20141103 All IRD Isolates for Source Pair Search.pkllist', 'r') as f:\n\tisolates = pkl.load(f)\n\n\nnum_per_batch = 20 # number of isolates to process at a time.\ntotal_isolates = len(isolates)\n\n# Check to see which isolates have been completed.\nos.chdir('reassortant_edges')\ncompleted = [int(f.split('.')[0].split(' '.format(handle))[5]) for f in os.listdir(os.getcwd()) if f.split('.')[1] == 'pkl']\nprint(completed)\nos.chdir('..')\nnot_completed = []\n\nfor start in np.arange(0, total_isolates):\n\tif start not in completed:\n\t\tnot_completed.append(start)\n\t\twith open('shell_scripts/source_pair/source_pair{0}.sh'.format(start), 'w') as f:\n\t\t\tf.write(get_header(1))\n\n\t\t\tf.write('cd ..\\n')\n\t\t\tf.write('cd ..\\n')\n\n\t\t\tf.write('python source_pair.py {0} {1} {2}'.format(handle, start, start + 1))\n\nwith open('shell_scripts/source_pair_manual.sh', 'w') as f:\n\tf.write(get_header(1))\n\tf.write('cd source_pair\\n')\t\n\tfor start in not_completed:\n\t\tf.write('qsub source_pair{0}.sh\\n'.format(start))"
] | [
[
"numpy.arange"
]
] |
zhangsiyu1103/ESNAC | [
"8f9d304976ceb5fed8f4fbb37e7be209796dd573"
] | [
"models/shufflenet_m.py"
] | [
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom .extension import *\n\nclass BottleneckM(nn.Module):\n\n def __init__(self, in_planes, out_planes, stride, groups):\n super(BottleneckM, self).__init__()\n self.stride = stride\n mid_planes = out_planes // 4\n g = 1 if in_planes == 24 else groups\n\n self.conv1 = nn.Conv2d(in_planes, mid_planes, kernel_size=1, groups=g,\n bias=False)\n self.bn1 = nn.BatchNorm2d(mid_planes)\n self.relu1 = nn.ReLU()\n self.shuffle = Shuffle(groups=g)\n self.conv2 = nn.Conv2d(mid_planes, mid_planes, kernel_size=3,\n stride=stride, padding=1, groups=mid_planes,\n bias=False)\n self.bn2 = nn.BatchNorm2d(mid_planes)\n self.conv3 = nn.Conv2d(mid_planes, out_planes, kernel_size=1,\n groups=groups, bias=False)\n self.bn3 = nn.BatchNorm2d(out_planes)\n self.relu3 = nn.ReLU()\n\n if stride == 2:\n self.conv4 = nn.Conv2d(in_planes, in_planes, kernel_size=1,\n groups=2, bias=False)\n self.avgpool = nn.AvgPool2d(3, stride=2, padding=1)\n self.concat = Concat(dim=1)\n\n def forward(self, x):\n out = self.relu1(self.bn1(self.conv1(x)))\n out = self.shuffle(out)\n out = self.bn2(self.conv2(out))\n out = self.bn3(self.conv3(out))\n\n if self.stride == 2:\n res = self.avgpool(self.conv4(x))\n out = self.relu3(self.concat([out, res]))\n else:\n res = x\n out = self.relu3(out + res)\n return out\n\nclass ShuffleNetM(nn.Module):\n\n def __init__(self, cfg, num_classes=100):\n super(ShuffleNetM, self).__init__()\n out_planes = cfg['out_planes']\n num_blocks = cfg['num_blocks']\n groups = cfg['groups']\n\n self.conv1 = nn.Conv2d(3, 24, kernel_size=1, bias=False)\n self.bn1 = nn.BatchNorm2d(24)\n self.relu1 = nn.ReLU()\n self.in_planes = 24\n self.layer1 = self._make_layer(out_planes[0], num_blocks[0], groups)\n self.layer2 = self._make_layer(out_planes[1], num_blocks[1], groups)\n self.layer3 = self._make_layer(out_planes[2], num_blocks[2], groups)\n self.avgpool = nn.AvgPool2d(4)\n self.flatten = Flatten()\n self.fc = nn.Linear(out_planes[2], num_classes)\n\n def _make_layer(self, out_planes, num_blocks, groups):\n layers = []\n for i in range(num_blocks):\n stride = 2 if i == 0 else 1\n cat_planes = self.in_planes if i == 0 else 0\n layers.append(BottleneckM(self.in_planes, out_planes - cat_planes,\n stride=stride, groups=groups))\n self.in_planes = out_planes\n return nn.Sequential(*layers)\n\n def forward(self, x):\n x = self.relu1(self.bn1(self.conv1(x)))\n\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n\n x = self.fc(self.flatten(self.avgpool(x)))\n return x\n\ndef shufflenet(**kwargs):\n cfg = {\n 'out_planes': [200, 400, 800],\n 'num_blocks': [4, 8, 4],\n 'groups': 2\n }\n return ShuffleNetM(cfg, **kwargs)"
] | [
[
"torch.nn.BatchNorm2d",
"torch.nn.Linear",
"torch.nn.Conv2d",
"torch.nn.Sequential",
"torch.nn.AvgPool2d",
"torch.nn.ReLU"
]
] |
titusquah/hal9000 | [
"620c1c5ce76db481e6da5e8cfba8d728afe0cb39"
] | [
"Jack's Code/runable_model_yize.py"
] | [
"#-*- coding: utf-8 -*-\nimport tensorflow as tf\nimport numpy as np\n#import ipdb\n\n#batchnormalize:看\ndef batchnormalize(X, eps=1e-8, g=None, b=None):\n if X.get_shape().ndims == 4:\n mean = tf.reduce_mean(X, [0,1,2])\n std = tf.reduce_mean( tf.square(X-mean), [0,1,2] )\n X = (X-mean) / tf.sqrt(std+eps)\n\n if g is not None and b is not None:\n g = tf.reshape(g, [1,1,1,-1])\n b = tf.reshape(b, [1,1,1,-1])\n X = X*g + b\n\n elif X.get_shape().ndims == 2:\n mean = tf.reduce_mean(X, 0)\n std = tf.reduce_mean(tf.square(X-mean), 0)\n X = (X-mean) / tf.sqrt(std+eps)\n\n if g is not None and b is not None:\n g = tf.reshape(g, [1,-1])\n b = tf.reshape(b, [1,-1])\n X = X*g + b\n\n else:\n raise NotImplementedError\n\n return X\n\ndef lrelu(X, leak=0.2):\n f1 = 0.5 * (1 + leak)\n f2 = 0.5 * (1 - leak)\n return f1 * X + f2 * tf.abs(X)\n\ndef bce(o, t):\n o = tf.clip_by_value(o, 1e-7, 1. - 1e-7)\n return tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=o, logits=t))\n\nclass DCGAN():\n def __init__(\n self,\n batch_size=100,\n image_shape=[24,24,1],\n dim_z=100,\n dim_y=5,\n dim_W1=1024,\n dim_W2=128,\n dim_W3=64,\n dim_channel=1,\n lam=0.05\n ):\n\n self.lam=lam\n self.batch_size = batch_size\n self.image_shape = image_shape\n self.dim_z = dim_z\n self.dim_y = dim_y\n\n self.dim_W1 = dim_W1\n self.dim_W2 = dim_W2\n self.dim_W3 = dim_W3\n self.dim_channel = dim_channel\n\n self.gen_W1 = tf.Variable(tf.random_normal([dim_z+dim_y, dim_W1], stddev=0.02), name='gen_W1')\n self.gen_W2 = tf.Variable(tf.random_normal([dim_W1+dim_y, dim_W2*6*6], stddev=0.02), name='gen_W2')\n self.gen_W3 = tf.Variable(tf.random_normal([5,5,dim_W3,dim_W2+dim_y], stddev=0.02), name='gen_W3')\n self.gen_W4 = tf.Variable(tf.random_normal([5,5,dim_channel,dim_W3+dim_y], stddev=0.02), name='gen_W4')\n\n self.discrim_W1 = tf.Variable(tf.random_normal([5,5,dim_channel+dim_y,dim_W3], stddev=0.02), name='discrim_W1')\n self.discrim_W2 = tf.Variable(tf.random_normal([5,5,dim_W3+dim_y,dim_W2], stddev=0.02), name='discrim_W2')\n self.discrim_W3 = tf.Variable(tf.random_normal([dim_W2*6*6+dim_y,dim_W1], stddev=0.02), name='discrim_W3')\n self.discrim_W4 = tf.Variable(tf.random_normal([dim_W1+dim_y,1], stddev=0.02), name='discrim_W4')\n\n\n\n def build_model(self):\n Z = tf.placeholder(tf.float32, [self.batch_size, self.dim_z])\n Y = tf.placeholder(tf.float32, [self.batch_size, self.dim_y])\n\n image_real = tf.placeholder(tf.float32, [self.batch_size]+self.image_shape)\n pred_high = tf.placeholder(tf.float32, [self.batch_size]+self.image_shape)\n pred_low = tf.placeholder(tf.float32, [self.batch_size]+self.image_shape)\n h4 = self.generate(Z, Y)\n #image_gen comes from sigmoid output of generator\n image_gen = tf.nn.sigmoid(h4)\n\n raw_real2 = self.discriminate(image_real, Y)\n #p_real = tf.nn.sigmoid(raw_real)\n p_real = tf.reduce_mean(raw_real2)\n\n raw_gen2 = self.discriminate(image_gen, Y)\n #p_gen = tf.nn.sigmoid(raw_gen)\n p_gen = tf.reduce_mean(raw_gen2)\n\n\n discrim_cost = tf.reduce_mean(raw_real2) - tf.reduce_mean(raw_gen2)\n gen_cost = -tf.reduce_mean(raw_gen2)\n\n mask = tf.placeholder(tf.float32, [self.batch_size] + self.image_shape, name='mask')\n '''contextual_loss_latter = tf.reduce_sum(tf.contrib.layers.flatten(\n -tf.log(tf.abs(image_real-image_gen))), 1)'''\n #contextual_loss_latter = tf.reduce_sum(tf.log(tf.contrib.layers.flatten(tf.abs(image_gen - pred_high))), 1)\n\n #log loss\n '''contextual_loss_latter = tf.reduce_sum(tf.contrib.layers.flatten(\n -tf.log(tf.maximum(\n (mask + tf.multiply(tf.ones_like(mask) - mask, pred_high)) - tf.multiply(\n tf.ones_like(mask) - mask, image_gen), 0.0001*tf.ones_like(mask)))\n -tf.log(tf.maximum(\n (mask + tf.multiply(tf.ones_like(mask) - mask, image_gen)) - tf.multiply(\n tf.ones_like(mask) - mask, pred_low), 0.0001*tf.ones_like(mask)))), 1)'''\n contextual_loss_latter = tf.contrib.layers.flatten(\n -tf.log(\n (mask + tf.multiply(tf.ones_like(mask) - mask, pred_high)) - tf.multiply(\n tf.ones_like(mask) - mask, image_gen))\n - tf.log(\n (mask + tf.multiply(tf.ones_like(mask) - mask, image_gen)) - tf.multiply(\n tf.ones_like(mask) - mask, pred_low)))\n contextual_loss_latter = tf.where(tf.is_nan(contextual_loss_latter), tf.ones_like(contextual_loss_latter) * 1000000.0, contextual_loss_latter)\n contextual_loss_latter2 = tf.reduce_sum(contextual_loss_latter, 1)\n #square loss\n '''contextual_loss_latter = tf.reduce_sum(tf.contrib.layers.flatten(\n tf.square(tf.multiply(tf.ones_like(mask) - mask, image_gen) - tf.multiply(tf.ones_like(mask) - mask, pred_high)))\n +tf.contrib.layers.flatten(\n tf.square(\n tf.multiply(tf.ones_like(mask) - mask, image_gen) - tf.multiply(tf.ones_like(mask) - mask, pred_high)))\n , 1)'''\n contextual_loss_former = tf.reduce_sum(tf.contrib.layers.flatten(\n tf.square(tf.multiply(mask, image_gen) - tf.multiply(mask, image_real))), 1)\n contextual_loss_prepare = tf.reduce_sum(tf.contrib.layers.flatten(\n tf.square(tf.multiply(tf.ones_like(mask) - mask, image_gen) - tf.multiply(tf.ones_like(mask)-mask, image_real))), 1)\n perceptual_loss = gen_cost\n complete_loss = contextual_loss_former + self.lam * perceptual_loss + 0.05*contextual_loss_latter2\n grad_complete_loss = tf.gradients(complete_loss, Z)\n grad_uniform_loss = tf.gradients(contextual_loss_prepare, Z)\n\n return Z, Y, image_real, discrim_cost, gen_cost, p_real, p_gen, grad_complete_loss, \\\n pred_high, pred_low, mask, contextual_loss_latter, contextual_loss_former, grad_uniform_loss\n\n\n def discriminate(self, image, Y):\n yb = tf.reshape(Y, tf.stack([self.batch_size, 1, 1, self.dim_y]))\n X = tf.concat([image, yb * tf.ones([self.batch_size, 24, 24, self.dim_y])],3)\n\n h1 = lrelu( tf.nn.conv2d( X, self.discrim_W1, strides=[1,2,2,1], padding='SAME' ))\n h1 = tf.concat([h1, yb * tf.ones([self.batch_size, 12, 12, self.dim_y])],3)\n\n h2 = lrelu(batchnormalize( tf.nn.conv2d( h1, self.discrim_W2, strides=[1,2,2,1], padding='SAME')) )\n h2 = tf.reshape(h2, [self.batch_size, -1])\n h2 = tf.concat([h2, Y], 1)\n discri=tf.matmul(h2, self.discrim_W3 )\n h3 = lrelu(batchnormalize(discri))\n return h3\n\n\n def generate(self, Z, Y):\n\n yb = tf.reshape(Y, [self.batch_size, 1, 1, self.dim_y])\n Z = tf.concat([Z,Y],1)\n h1 = tf.nn.relu(batchnormalize(tf.matmul(Z, self.gen_W1)))\n h1 = tf.concat([h1, Y],1)\n h2 = tf.nn.relu(batchnormalize(tf.matmul(h1, self.gen_W2)))\n h2 = tf.reshape(h2, [self.batch_size,6,6,self.dim_W2])\n h2 = tf.concat([h2, yb*tf.ones([self.batch_size, 6,6, self.dim_y])],3)\n\n output_shape_l3 = [self.batch_size,12,12,self.dim_W3]\n h3 = tf.nn.conv2d_transpose(h2, self.gen_W3, output_shape=output_shape_l3, strides=[1,2,2,1])\n h3 = tf.nn.relu( batchnormalize(h3) )\n h3 = tf.concat([h3, yb*tf.ones([self.batch_size, 12, 12, self.dim_y])], 3)\n\n output_shape_l4 = [self.batch_size,24,24,self.dim_channel]\n h4 = tf.nn.conv2d_transpose(h3, self.gen_W4, output_shape=output_shape_l4, strides=[1,2,2,1])\n return h4\n\n\n def samples_generator(self, batch_size):\n Z = tf.placeholder(tf.float32, [batch_size, self.dim_z])\n Y = tf.placeholder(tf.float32, [batch_size, self.dim_y])\n\n yb = tf.reshape(Y, [batch_size, 1, 1, self.dim_y])\n Z_ = tf.concat([Z,Y], 1)\n h1 = tf.nn.relu(batchnormalize(tf.matmul(Z_, self.gen_W1)))\n h1 = tf.concat([h1, Y], 1)\n h2 = tf.nn.relu(batchnormalize(tf.matmul(h1, self.gen_W2)))\n h2 = tf.reshape(h2, [batch_size,6, 6,self.dim_W2])\n h2 = tf.concat([h2, yb*tf.ones([batch_size, 6,6, self.dim_y])], 3)\n\n output_shape_l3 = [batch_size,12, 12,self.dim_W3]\n h3 = tf.nn.conv2d_transpose(h2, self.gen_W3, output_shape=output_shape_l3, strides=[1,2,2,1])\n h3 = tf.nn.relu( batchnormalize(h3) )\n h3 = tf.concat([h3, yb*tf.ones([batch_size, 12,12,self.dim_y])], 3)\n\n output_shape_l4 = [batch_size,24, 24,self.dim_channel]\n h4 = tf.nn.conv2d_transpose(h3, self.gen_W4, output_shape=output_shape_l4, strides=[1,2,2,1])\n x = tf.nn.sigmoid(h4)\n return Z,Y,x\n"
] | [
[
"tensorflow.reshape",
"tensorflow.ones",
"tensorflow.matmul",
"tensorflow.abs",
"tensorflow.is_nan",
"tensorflow.concat",
"tensorflow.random_normal",
"tensorflow.reduce_sum",
"tensorflow.multiply",
"tensorflow.clip_by_value",
"tensorflow.nn.sigmoid_cross_entropy_with_logits",
"tensorflow.stack",
"tensorflow.nn.conv2d_transpose",
"tensorflow.ones_like",
"tensorflow.gradients",
"tensorflow.nn.sigmoid",
"tensorflow.placeholder",
"tensorflow.sqrt",
"tensorflow.reduce_mean",
"tensorflow.nn.conv2d",
"tensorflow.square"
]
] |
txu2014/zipline | [
"e96b9ed7455be12ce77cd28cc65782ce94e11492"
] | [
"zipline/data/bcolz_daily_bars.py"
] | [
"# Copyright 2015 Quantopian, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom functools import partial\nimport warnings\n\nfrom bcolz import carray, ctable\nimport logbook\nimport numpy as np\nfrom numpy import (\n array,\n full,\n iinfo,\n nan,\n)\nfrom pandas import (\n DatetimeIndex,\n NaT,\n read_csv,\n to_datetime,\n Timestamp,\n)\nfrom six import iteritems, viewkeys\nfrom toolz import compose\nfrom trading_calendars import get_calendar\n\nfrom zipline.data.session_bars import SessionBarReader\nfrom zipline.data.bar_reader import (\n NoDataAfterDate,\n NoDataBeforeDate,\n NoDataOnDate,\n)\nfrom zipline.utils.functional import apply\nfrom zipline.utils.input_validation import expect_element\nfrom zipline.utils.numpy_utils import iNaT, float64_dtype, uint32_dtype\nfrom zipline.utils.memoize import lazyval\nfrom zipline.utils.cli import maybe_show_progress\nfrom ._equities import _compute_row_slices, _read_bcolz_data\n\n\nlogger = logbook.Logger('UsEquityPricing')\n\nOHLC = frozenset(['open', 'high', 'low', 'close'])\nUS_EQUITY_PRICING_BCOLZ_COLUMNS = (\n 'open', 'high', 'low', 'close', 'volume', 'day', 'id'\n)\n\nUINT32_MAX = iinfo(np.uint32).max\n\n\ndef check_uint32_safe(value, colname):\n if value >= UINT32_MAX:\n raise ValueError(\n \"Value %s from column '%s' is too large\" % (value, colname)\n )\n\n\n@expect_element(invalid_data_behavior={'warn', 'raise', 'ignore'})\ndef winsorise_uint32(df, invalid_data_behavior, column, *columns):\n \"\"\"Drops any record where a value would not fit into a uint32.\n\n Parameters\n ----------\n df : pd.DataFrame\n The dataframe to winsorise.\n invalid_data_behavior : {'warn', 'raise', 'ignore'}\n What to do when data is outside the bounds of a uint32.\n *columns : iterable[str]\n The names of the columns to check.\n\n Returns\n -------\n truncated : pd.DataFrame\n ``df`` with values that do not fit into a uint32 zeroed out.\n \"\"\"\n columns = list((column,) + columns)\n mask = df[columns] > UINT32_MAX\n\n if invalid_data_behavior != 'ignore':\n mask |= df[columns].isnull()\n else:\n # we are not going to generate a warning or error for this so just use\n # nan_to_num\n df[columns] = np.nan_to_num(df[columns])\n\n mv = mask.values\n if mv.any():\n if invalid_data_behavior == 'raise':\n raise ValueError(\n '%d values out of bounds for uint32: %r' % (\n mv.sum(), df[mask.any(axis=1)],\n ),\n )\n if invalid_data_behavior == 'warn':\n warnings.warn(\n 'Ignoring %d values because they are out of bounds for'\n ' uint32: %r' % (\n mv.sum(), df[mask.any(axis=1)],\n ),\n stacklevel=3, # one extra frame for `expect_element`\n )\n\n df[mask] = np.nan\n return df\n\n\nclass BcolzDailyBarWriter(object):\n \"\"\"\n Class capable of writing daily OHLCV data to disk in a format that can\n be read efficiently by BcolzDailyOHLCVReader.\n\n Parameters\n ----------\n filename : str\n The location at which we should write our output.\n calendar : zipline.utils.calendar.trading_calendar\n Calendar to use to compute asset calendar offsets.\n start_session: pd.Timestamp\n Midnight UTC session label.\n end_session: pd.Timestamp\n Midnight UTC session label.\n\n See Also\n --------\n zipline.data.bcolz_daily_bars.BcolzDailyBarReader\n \"\"\"\n _csv_dtypes = {\n 'open': float64_dtype,\n 'high': float64_dtype,\n 'low': float64_dtype,\n 'close': float64_dtype,\n 'volume': float64_dtype,\n }\n\n def __init__(self, filename, calendar, start_session, end_session):\n self._filename = filename\n\n if start_session != end_session:\n if not calendar.is_session(start_session):\n raise ValueError(\n \"Start session %s is invalid!\" % start_session\n )\n if not calendar.is_session(end_session):\n raise ValueError(\n \"End session %s is invalid!\" % end_session\n )\n\n self._start_session = start_session\n self._end_session = end_session\n\n self._calendar = calendar\n\n @property\n def progress_bar_message(self):\n return \"Merging daily equity files:\"\n\n def progress_bar_item_show_func(self, value):\n return value if value is None else str(value[0])\n\n def write(self,\n data,\n assets=None,\n show_progress=False,\n invalid_data_behavior='warn'):\n \"\"\"\n Parameters\n ----------\n data : iterable[tuple[int, pandas.DataFrame or bcolz.ctable]]\n The data chunks to write. Each chunk should be a tuple of sid\n and the data for that asset.\n assets : set[int], optional\n The assets that should be in ``data``. If this is provided\n we will check ``data`` against the assets and provide better\n progress information.\n show_progress : bool, optional\n Whether or not to show a progress bar while writing.\n invalid_data_behavior : {'warn', 'raise', 'ignore'}, optional\n What to do when data is encountered that is outside the range of\n a uint32.\n\n Returns\n -------\n table : bcolz.ctable\n The newly-written table.\n \"\"\"\n ctx = maybe_show_progress(\n (\n (sid, self.to_ctable(df, invalid_data_behavior))\n for sid, df in data\n ),\n show_progress=show_progress,\n item_show_func=self.progress_bar_item_show_func,\n label=self.progress_bar_message,\n length=len(assets) if assets is not None else None,\n )\n with ctx as it:\n return self._write_internal(it, assets)\n\n def write_csvs(self,\n asset_map,\n show_progress=False,\n invalid_data_behavior='warn'):\n \"\"\"Read CSVs as DataFrames from our asset map.\n\n Parameters\n ----------\n asset_map : dict[int -> str]\n A mapping from asset id to file path with the CSV data for that\n asset\n show_progress : bool\n Whether or not to show a progress bar while writing.\n invalid_data_behavior : {'warn', 'raise', 'ignore'}\n What to do when data is encountered that is outside the range of\n a uint32.\n \"\"\"\n read = partial(\n read_csv,\n parse_dates=['day'],\n index_col='day',\n dtype=self._csv_dtypes,\n )\n return self.write(\n ((asset, read(path)) for asset, path in iteritems(asset_map)),\n assets=viewkeys(asset_map),\n show_progress=show_progress,\n invalid_data_behavior=invalid_data_behavior,\n )\n\n def _write_internal(self, iterator, assets):\n \"\"\"\n Internal implementation of write.\n\n `iterator` should be an iterator yielding pairs of (asset, ctable).\n \"\"\"\n total_rows = 0\n first_row = {}\n last_row = {}\n calendar_offset = {}\n\n # Maps column name -> output carray.\n columns = {\n k: carray(array([], dtype=uint32_dtype))\n for k in US_EQUITY_PRICING_BCOLZ_COLUMNS\n }\n\n earliest_date = None\n sessions = self._calendar.sessions_in_range(\n self._start_session, self._end_session\n )\n\n if assets is not None:\n @apply\n def iterator(iterator=iterator, assets=set(assets)):\n for asset_id, table in iterator:\n if asset_id not in assets:\n raise ValueError('unknown asset id %r' % asset_id)\n yield asset_id, table\n\n for asset_id, table in iterator:\n nrows = len(table)\n for column_name in columns:\n if column_name == 'id':\n # We know what the content of this column is, so don't\n # bother reading it.\n columns['id'].append(\n full((nrows,), asset_id, dtype='uint32'),\n )\n continue\n\n columns[column_name].append(table[column_name])\n\n if earliest_date is None:\n earliest_date = table[\"day\"][0]\n else:\n earliest_date = min(earliest_date, table[\"day\"][0])\n\n # Bcolz doesn't support ints as keys in `attrs`, so convert\n # assets to strings for use as attr keys.\n asset_key = str(asset_id)\n\n # Calculate the index into the array of the first and last row\n # for this asset. This allows us to efficiently load single\n # assets when querying the data back out of the table.\n first_row[asset_key] = total_rows\n last_row[asset_key] = total_rows + nrows - 1\n total_rows += nrows\n\n table_day_to_session = compose(\n self._calendar.minute_to_session_label,\n partial(Timestamp, unit='s', tz='UTC'),\n )\n asset_first_day = table_day_to_session(table['day'][0])\n asset_last_day = table_day_to_session(table['day'][-1])\n\n asset_sessions = sessions[\n sessions.slice_indexer(asset_first_day, asset_last_day)\n ]\n # assert len(table) == len(asset_sessions), (\n # 'Got {} rows for daily bars table with first day={}, last '\n # 'day={}, expected {} rows.\\n'\n # 'Missing sessions: {}\\n'\n # 'Extra sessions: {}'.format(\n # len(table),\n # asset_first_day.date(),\n # asset_last_day.date(),\n # len(asset_sessions),\n # asset_sessions.difference(\n # to_datetime(\n # np.array(table['day']),\n # unit='s',\n # utc=True,\n # )\n # ).tolist(),\n # to_datetime(\n # np.array(table['day']),\n # unit='s',\n # utc=True,\n # ).difference(asset_sessions).tolist(),\n # )\n # )\n\n # Calculate the number of trading days between the first date\n # in the stored data and the first date of **this** asset. This\n # offset used for output alignment by the reader.\n calendar_offset[asset_key] = sessions.get_loc(asset_first_day)\n\n # This writes the table to disk.\n full_table = ctable(\n columns=[\n columns[colname]\n for colname in US_EQUITY_PRICING_BCOLZ_COLUMNS\n ],\n names=US_EQUITY_PRICING_BCOLZ_COLUMNS,\n rootdir=self._filename,\n mode='w',\n )\n\n full_table.attrs['first_trading_day'] = (\n earliest_date if earliest_date is not None else iNaT\n )\n\n full_table.attrs['first_row'] = first_row\n full_table.attrs['last_row'] = last_row\n full_table.attrs['calendar_offset'] = calendar_offset\n full_table.attrs['calendar_name'] = self._calendar.name\n full_table.attrs['start_session_ns'] = self._start_session.value\n full_table.attrs['end_session_ns'] = self._end_session.value\n full_table.flush()\n return full_table\n\n @expect_element(invalid_data_behavior={'warn', 'raise', 'ignore'})\n def to_ctable(self, raw_data, invalid_data_behavior):\n if isinstance(raw_data, ctable):\n # we already have a ctable so do nothing\n return raw_data\n\n winsorise_uint32(raw_data, invalid_data_behavior, 'volume', *OHLC)\n processed = (raw_data[list(OHLC)] * 1000).round().astype('uint32')\n dates = raw_data.index.values.astype('datetime64[s]')\n check_uint32_safe(dates.max().view(np.int64), 'day')\n processed['day'] = dates.astype('uint32')\n processed['volume'] = raw_data.volume.astype('uint32')\n return ctable.fromdataframe(processed)\n\n\nclass BcolzDailyBarReader(SessionBarReader):\n \"\"\"\n Reader for raw pricing data written by BcolzDailyOHLCVWriter.\n\n Parameters\n ----------\n table : bcolz.ctable\n The ctable contaning the pricing data, with attrs corresponding to the\n Attributes list below.\n read_all_threshold : int\n The number of equities at which; below, the data is read by reading a\n slice from the carray per asset. above, the data is read by pulling\n all of the data for all assets into memory and then indexing into that\n array for each day and asset pair. Used to tune performance of reads\n when using a small or large number of equities.\n\n Attributes\n ----------\n The table with which this loader interacts contains the following\n attributes:\n\n first_row : dict\n Map from asset_id -> index of first row in the dataset with that id.\n last_row : dict\n Map from asset_id -> index of last row in the dataset with that id.\n calendar_offset : dict\n Map from asset_id -> calendar index of first row.\n start_session_ns: int\n Epoch ns of the first session used in this dataset.\n end_session_ns: int\n Epoch ns of the last session used in this dataset.\n calendar_name: str\n String identifier of trading calendar used (ie, \"NYSE\").\n\n We use first_row and last_row together to quickly find ranges of rows to\n load when reading an asset's data into memory.\n\n We use calendar_offset and calendar to orient loaded blocks within a\n range of queried dates.\n\n Notes\n ------\n A Bcolz CTable is comprised of Columns and Attributes.\n The table with which this loader interacts contains the following columns:\n\n ['open', 'high', 'low', 'close', 'volume', 'day', 'id'].\n\n The data in these columns is interpreted as follows:\n\n - Price columns ('open', 'high', 'low', 'close') are interpreted as 1000 *\n as-traded dollar value.\n - Volume is interpreted as as-traded volume.\n - Day is interpreted as seconds since midnight UTC, Jan 1, 1970.\n - Id is the asset id of the row.\n\n The data in each column is grouped by asset and then sorted by day within\n each asset block.\n\n The table is built to represent a long time range of data, e.g. ten years\n of equity data, so the lengths of each asset block is not equal to each\n other. The blocks are clipped to the known start and end date of each asset\n to cut down on the number of empty values that would need to be included to\n make a regular/cubic dataset.\n\n When read across the open, high, low, close, and volume with the same\n index should represent the same asset and day.\n\n See Also\n --------\n zipline.data.bcolz_daily_bars.BcolzDailyBarWriter\n \"\"\"\n def __init__(self, table, read_all_threshold=3000):\n self._maybe_table_rootdir = table\n # Cache of fully read np.array for the carrays in the daily bar table.\n # raw_array does not use the same cache, but it could.\n # Need to test keeping the entire array in memory for the course of a\n # process first.\n self._spot_cols = {}\n self.PRICE_ADJUSTMENT_FACTOR = 0.001\n self._read_all_threshold = read_all_threshold\n\n @lazyval\n def _table(self):\n maybe_table_rootdir = self._maybe_table_rootdir\n if isinstance(maybe_table_rootdir, ctable):\n return maybe_table_rootdir\n return ctable(rootdir=maybe_table_rootdir, mode='r')\n\n @lazyval\n def sessions(self):\n if 'calendar' in self._table.attrs.attrs:\n # backwards compatibility with old formats, will remove\n return DatetimeIndex(self._table.attrs['calendar'], tz='UTC')\n else:\n cal = get_calendar(self._table.attrs['calendar_name'])\n start_session_ns = self._table.attrs['start_session_ns']\n start_session = Timestamp(start_session_ns, tz='UTC')\n\n end_session_ns = self._table.attrs['end_session_ns']\n end_session = Timestamp(end_session_ns, tz='UTC')\n\n sessions = cal.sessions_in_range(start_session, end_session)\n\n return sessions\n\n @lazyval\n def _first_rows(self):\n return {\n int(asset_id): start_index\n for asset_id, start_index in iteritems(\n self._table.attrs['first_row'],\n )\n }\n\n @lazyval\n def _last_rows(self):\n return {\n int(asset_id): end_index\n for asset_id, end_index in iteritems(\n self._table.attrs['last_row'],\n )\n }\n\n @lazyval\n def _calendar_offsets(self):\n return {\n int(id_): offset\n for id_, offset in iteritems(\n self._table.attrs['calendar_offset'],\n )\n }\n\n @lazyval\n def first_trading_day(self):\n try:\n return Timestamp(\n self._table.attrs['first_trading_day'],\n unit='s',\n tz='UTC'\n )\n except KeyError:\n return None\n\n @lazyval\n def trading_calendar(self):\n if 'calendar_name' in self._table.attrs.attrs:\n return get_calendar(self._table.attrs['calendar_name'])\n else:\n return None\n\n @property\n def last_available_dt(self):\n return self.sessions[-1]\n\n def _compute_slices(self, start_idx, end_idx, assets):\n \"\"\"\n Compute the raw row indices to load for each asset on a query for the\n given dates after applying a shift.\n\n Parameters\n ----------\n start_idx : int\n Index of first date for which we want data.\n end_idx : int\n Index of last date for which we want data.\n assets : pandas.Int64Index\n Assets for which we want to compute row indices\n\n Returns\n -------\n A 3-tuple of (first_rows, last_rows, offsets):\n first_rows : np.array[intp]\n Array with length == len(assets) containing the index of the first\n row to load for each asset in `assets`.\n last_rows : np.array[intp]\n Array with length == len(assets) containing the index of the last\n row to load for each asset in `assets`.\n offset : np.array[intp]\n Array with length == (len(asset) containing the index in a buffer\n of length `dates` corresponding to the first row of each asset.\n\n The value of offset[i] will be 0 if asset[i] existed at the start\n of a query. Otherwise, offset[i] will be equal to the number of\n entries in `dates` for which the asset did not yet exist.\n \"\"\"\n # The core implementation of the logic here is implemented in Cython\n # for efficiency.\n return _compute_row_slices(\n self._first_rows,\n self._last_rows,\n self._calendar_offsets,\n start_idx,\n end_idx,\n assets,\n )\n\n def load_raw_arrays(self, columns, start_date, end_date, assets):\n start_idx = self._load_raw_arrays_date_to_index(start_date)\n end_idx = self._load_raw_arrays_date_to_index(end_date)\n\n first_rows, last_rows, offsets = self._compute_slices(\n start_idx,\n end_idx,\n assets,\n )\n read_all = len(assets) > self._read_all_threshold\n return _read_bcolz_data(\n self._table,\n (end_idx - start_idx + 1, len(assets)),\n list(columns),\n first_rows,\n last_rows,\n offsets,\n read_all,\n )\n\n def _load_raw_arrays_date_to_index(self, date):\n try:\n return self.sessions.get_loc(date)\n except KeyError:\n raise NoDataOnDate(date)\n\n def _spot_col(self, colname):\n \"\"\"\n Get the colname from daily_bar_table and read all of it into memory,\n caching the result.\n\n Parameters\n ----------\n colname : string\n A name of a OHLCV carray in the daily_bar_table\n\n Returns\n -------\n array (uint32)\n Full read array of the carray in the daily_bar_table with the\n given colname.\n \"\"\"\n try:\n col = self._spot_cols[colname]\n except KeyError:\n col = self._spot_cols[colname] = self._table[colname]\n return col\n\n def get_last_traded_dt(self, asset, day):\n volumes = self._spot_col('volume')\n\n search_day = day\n\n while True:\n try:\n ix = self.sid_day_index(asset, search_day)\n except NoDataBeforeDate:\n return NaT\n except NoDataAfterDate:\n prev_day_ix = self.sessions.get_loc(search_day) - 1\n if prev_day_ix > -1:\n search_day = self.sessions[prev_day_ix]\n continue\n except NoDataOnDate:\n return NaT\n if volumes[ix] != 0:\n return search_day\n prev_day_ix = self.sessions.get_loc(search_day) - 1\n if prev_day_ix > -1:\n search_day = self.sessions[prev_day_ix]\n else:\n return NaT\n\n def sid_day_index(self, sid, day):\n \"\"\"\n Parameters\n ----------\n sid : int\n The asset identifier.\n day : datetime64-like\n Midnight of the day for which data is requested.\n\n Returns\n -------\n int\n Index into the data tape for the given sid and day.\n Raises a NoDataOnDate exception if the given day and sid is before\n or after the date range of the equity.\n \"\"\"\n try:\n day_loc = self.sessions.get_loc(day)\n except Exception:\n raise NoDataOnDate(\"day={0} is outside of calendar={1}\".format(\n day, self.sessions))\n offset = day_loc - self._calendar_offsets[sid]\n if offset < 0:\n raise NoDataBeforeDate(\n \"No data on or before day={0} for sid={1}\".format(\n day, sid))\n ix = self._first_rows[sid] + offset\n if ix > self._last_rows[sid]:\n raise NoDataAfterDate(\n \"No data on or after day={0} for sid={1}\".format(\n day, sid))\n return ix\n\n def get_value(self, sid, dt, field):\n \"\"\"\n Parameters\n ----------\n sid : int\n The asset identifier.\n day : datetime64-like\n Midnight of the day for which data is requested.\n colname : string\n The price field. e.g. ('open', 'high', 'low', 'close', 'volume')\n\n Returns\n -------\n float\n The spot price for colname of the given sid on the given day.\n Raises a NoDataOnDate exception if the given day and sid is before\n or after the date range of the equity.\n Returns -1 if the day is within the date range, but the price is\n 0.\n \"\"\"\n ix = self.sid_day_index(sid, dt)\n price = self._spot_col(field)[ix]\n if field != 'volume':\n if price == 0:\n return nan\n else:\n return price * 0.001\n else:\n return price\n"
] | [
[
"pandas.DatetimeIndex",
"numpy.iinfo",
"numpy.full",
"numpy.array",
"numpy.nan_to_num",
"pandas.Timestamp"
]
] |
byukan/nlpia | [
"73c03f651e54e945f9a7eebe4714095dc3e5609a"
] | [
"src/nlpia/plots.py"
] | [
"# from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot\n# import plotly.graph_objs as go\n\nimport os\n\nimport matplotlib\n\n# matplotlib.use('TkAgg') # noqa\nimport seaborn # noqa\nfrom matplotlib import pyplot as plt\n\nfrom mpl_toolkits.mplot3d import Axes3D # noqa\nimport pandas as pd\n\ntry:\n import plotly.chartstudio as plotly\nexcept ImportError:\n import plotly.plotly as plotly\nfrom plotly.offline.offline import _plot_html\nfrom pugnlp.util import clean_columns\n# from plotly import graph_objs # Scatter, scatter.Marker, Layout, layout.YAxis, layout.XAxis\nfrom plotly.graph_objs import Scatter, Layout\nfrom plotly.graph_objs.scatter import Marker\nfrom plotly.graph_objs.layout import XAxis, YAxis\n# import cufflinks as cf # noqa\n\nfrom nlpia.constants import DATA_PATH\n\nnp = pd.np\n\nPLOTLY_HTML = \"\"\"\n<html>\n <head>\n <meta charset=\"utf-8\" />\n <!-- <meta http-equiv=\"Content-Type\" content=\"text/html; charset=utf-8\"> -->\n <script type=\"text/javascript\">\n {plotlyjs}\n </script>\n </head>\n <body>\n {plotlyhtml}\n </body>\n</html>\n\"\"\"\n\nDEFAULT_PLOTLY_CONFIG = {\n 'staticPlot': False, # no interactivity, for export or image generation\n 'workspace': False, # we're in the workspace, so need toolbar etc\n 'editable': False, # we can edit titles, move annotations, etc\n 'autosizable': False, # plot will respect layout.autosize=true and infer its container size\n 'fillFrame': False, # if we DO autosize, do we fill the container or the screen?\n 'scrollZoom': False, # mousewheel or two-finger scroll zooms the plot\n 'doubleClick': 'reset+autosize', # double click interaction (false, 'reset', 'autosize' or 'reset+autosize')\n 'showTips': True, # new users see some hints about interactivity\n 'showLink': True, # link to open this plot in plotly\n 'sendData': True, # if we show a link, does it contain data or just link to a plotly file?\n 'linkText': 'Edit chart', # text appearing in the sendData link\n 'displayModeBar': 'true', # display the modebar (true, false, or 'hover')\n 'displaylogo': False, # add the plotly logo on the end of the modebar\n 'plot3dPixelRatio': 2, # increase the pixel ratio for 3D plot images\n 'setBackground': 'opaque' # fn to add the background color to a different container or 'opaque'\n # to ensure there's white behind it\n}\n\n\ndef plotly_timeseries(df):\n \"\"\" NotImplemented: cufflinks has some strict plotly limits so couldn't be reliably installed \"\"\"\n fig = df.iplot([{\n 'x': df.index,\n 'y': df[col],\n 'name': col\n } for col in df.columns], filename='cufflinks/simple-line')\n return fig\n\n\ndef scatter_3d(df, labels=None, depthshade=True):\n df = getattr(df, 'embedding_', df)\n labels = df[labels] if (isinstance(labels, (int, str, bytes)) and\n labels in getattr(df, 'columns', set())) else labels\n labels = np.array(np.zeros(shape=(len(df),)) if labels is None else labels)\n try:\n labels = labels.astype(int) # TODO: use LabelEncoder\n except (TypeError, AttributeError):\n pass\n if str(labels.dtype).startswith('int'):\n labels = np.array(list('grbkcym'))[labels % 7]\n\n try:\n df = df.values\n except AttributeError:\n pass\n\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n ax.scatter(df[:, 0], df[:, 1], df[:, 2], zdir='z', s=20, c=labels, depthshade=depthshade)\n return fig\n\n\ndef get_array(df, x, default=None):\n if x is None:\n if default is None:\n x = df[df.columns[0]]\n else:\n x = df[default] if default in df else default\n elif isinstance(x, (pd.Series, np.ndarray, list, tuple)):\n x = np.nd.array(x)\n else:\n x = df[x] if x in df.columns else df[df.columns[x]]\n return np.array(x)\n\n\ndef offline_plotly_scatter3d(df, x=0, y=1, z=-1):\n \"\"\" Plot an offline scatter plot colored according to the categories in the 'name' column.\n\n >> df = pd.read_csv('https://raw.githubusercontent.com/plotly/datasets/master/iris.csv')\n >> offline_plotly(df)\n \"\"\"\n data = []\n # clusters = []\n colors = ['rgb(228,26,28)', 'rgb(55,126,184)', 'rgb(77,175,74)']\n\n # df.columns = clean_columns(df.columns)\n\n x = get_array(df, x, default=0)\n y = get_array(df, y, default=1)\n z = get_array(df, z, default=-1)\n for i in range(len(df['name'].unique())):\n name = df['Name'].unique()[i]\n color = colors[i]\n x = x[np.array(df['name'] == name)]\n y = y[np.array(df['name'] == name)]\n z = z[np.array(df['name'] == name)]\n\n trace = dict(\n name=name,\n x=x, y=y, z=z,\n type=\"scatter3d\",\n mode='markers',\n marker=dict(size=3, color=color, line=dict(width=0)))\n data.append(trace)\n\n layout = dict(\n width=800,\n height=550,\n autosize=False,\n title='Iris dataset',\n scene=dict(\n xaxis=dict(\n gridcolor='rgb(255, 255, 255)',\n zerolinecolor='rgb(255, 255, 255)',\n showbackground=True,\n backgroundcolor='rgb(230, 230,230)'\n ),\n yaxis=dict(\n gridcolor='rgb(255, 255, 255)',\n zerolinecolor='rgb(255, 255, 255)',\n showbackground=True,\n backgroundcolor='rgb(230, 230,230)'\n ),\n zaxis=dict(\n gridcolor='rgb(255, 255, 255)',\n zerolinecolor='rgb(255, 255, 255)',\n showbackground=True,\n backgroundcolor='rgb(230, 230,230)'\n ),\n aspectratio=dict(x=1, y=1, z=0.7),\n aspectmode='manual'\n ),\n )\n\n fig = dict(data=data, layout=layout)\n\n # IPython notebook\n # plotly.iplot(fig, filename='pandas-3d-iris', validate=False)\n\n url = plotly.offline.plot(fig, filename='pandas-3d-iris', validate=False)\n return url\n\n\ndef annotate(row, ax, x='x', y='y', text='name', xytext=(7, -5), textcoords='offset points', **kwargs):\n \"\"\"Add a text label to the plot of a DataFrame indicated by the provided axis (ax).\n\n Reference:\n https://stackoverflow.com/a/40979683/623735\n \"\"\"\n # idx = row.name\n text = row[text] if text in row else str(text)\n x = row[x] if x in row else float(x)\n y = row[y] if y in row else float(y)\n ax.annotate(text, (row[x], row[y]), xytext=xytext, textcoords=textcoords, **kwargs)\n return row[text]\n\n\ndef offline_plotly_data(data, filename=None, config=None, validate=True,\n default_width='100%', default_height=525, global_requirejs=False):\n r\"\"\" Write a plotly scatter plot to HTML file that doesn't require server\n\n >>> from nlpia.loaders import get_data\n >>> df = get_data('etpinard') # pd.read_csv('https://plot.ly/~etpinard/191.csv')\n >>> df.columns = [eval(c) if c[0] in '\"\\'' else str(c) for c in df.columns]\n >>> data = {'data': [\n ... Scatter(x=df[continent+', x'],\n ... y=df[continent+', y'],\n ... text=df[continent+', text'],\n ... marker=Marker(size=df[continent+', size'].fillna(10000), sizemode='area', sizeref=131868,),\n ... mode='markers',\n ... name=continent) for continent in ['Africa', 'Americas', 'Asia', 'Europe', 'Oceania']\n ... ],\n ... 'layout': Layout(xaxis=XAxis(title='Life Expectancy'), yaxis=YAxis(title='GDP per Capita', type='log'))\n ... }\n >>> html = offline_plotly_data(data, filename=None)\n \"\"\"\n config_default = dict(DEFAULT_PLOTLY_CONFIG)\n if config is not None:\n config_default.update(config)\n with open(os.path.join(DATA_PATH, 'plotly.js.min'), 'rt') as f:\n js = f.read()\n html, divid, width, height = _plot_html(\n data,\n config=config_default,\n validate=validate,\n default_width=default_width, default_height=default_height,\n global_requirejs=global_requirejs)\n html = PLOTLY_HTML.format(plotlyjs=js, plotlyhtml=html)\n if filename and isinstance(filename, str):\n with open(filename, 'wt') as f:\n f.write(html)\n return html\n\n\ndef normalize_etpinard_df(df='https://plot.ly/~etpinard/191.csv', columns='x y size text'.split(),\n category_col='category', possible_categories=['Africa', 'Americas', 'Asia', 'Europe', 'Oceania']):\n \"\"\"Reformat a dataframe in etpinard's format for use in plot functions and sklearn models\"\"\"\n possible_categories = ['Africa', 'Americas', 'Asia', 'Europe',\n 'Oceania'] if possible_categories is None else possible_categories\n df.columns = clean_columns(df.columns)\n df = pd.read_csv(df) if isinstance(df, str) else df\n columns = clean_columns(list(columns))\n df2 = pd.DataFrame(columns=columns)\n df2[category_col] = np.concatenate([np.array([categ] * len(df)) for categ in possible_categories])\n columns = zip(columns, [[clean_columns(categ + ', ' + column) for categ in possible_categories] for column in columns])\n for col, category_cols in columns:\n df2[col] = np.concatenate([df[label].values for label in category_cols])\n return df2\n\n\ndef offline_plotly_scatter_bubble(df, x='x', y='y', size_col='size', text_col='text',\n category_col='category', possible_categories=None,\n filename=None,\n config={'displaylogo': False},\n xscale=None, yscale='log',\n layout={'hovermode': 'closest', 'showlegend': False, 'autosize': True},\n marker={'sizemode': 'area'},\n min_size=10,\n ):\n r\"\"\"Interactive scatterplot of a DataFrame with the size and color of circles linke to two columns\n\n config keys:\n fillFrame setBackground displaylogo sendData showLink linkText staticPlot scrollZoom plot3dPixelRatio displayModeBar\n showTips workspace doubleClick autosizable editable\n\n layout keys:\n angularaxis annotations autosize bargap bargroupgap barmode barnorm boxgap boxgroupgap boxmode calendar\n direction dragmode font geo height hiddenlabels hiddenlabelssrc hidesources hovermode images legend\n mapbox margin orientation paper_bgcolor plot_bgcolor radialaxis scene separators shapes showlegend sliders smith\n ternary title titlefont updatemenus width xaxis yaxis\n\n marker keys:\n autocolorscale blend border cauto cmax cmin color colorbar colors colorscale colorsrc colorssrc line maxdisplayed\n opacity opacitysrc outliercolor reversescale showscale size sizemax sizemin sizemode sizeref sizesrc symbol symbolsrc\n\n marker['sizeref'] gives the denominator of the circle scaling factor.\n Typically it should be about a tenth of the minimum 'size' column value\n\n >>> from nlpia.data.loaders import get_data\n >>> df = get_data('cities_us_wordvectors_pca2_meta').iloc[:100]\n >>> html = offline_plotly_scatter_bubble(\n ... df.sort_values('population', ascending=False)[:350].copy().sort_values('population'),\n ... x='x', y='y',\n ... size_col='population', text_col='name', category_col='timezone',\n ... xscale=None, yscale=None, # 'log' or None\n ... layout={}, marker={'sizeref': 3000})\n \"\"\"\n config_default = dict(DEFAULT_PLOTLY_CONFIG)\n marker_default = {\n 'size': size_col or min_size,\n 'sizemode': 'area',\n 'sizeref': int(df[size_col].min() * .8) if size_col else min_size}\n marker_default.update(marker)\n size_col = marker_default.pop('size')\n layout_default = {\n 'xaxis': XAxis(title=x, type=xscale),\n 'yaxis': YAxis(title=y, type=yscale),\n }\n layout_default.update(**layout)\n if config is not None:\n config_default.update(config)\n df.columns = clean_columns(df.columns)\n if possible_categories is None and category_col is not None:\n if category_col in df.columns:\n category_labels = df[category_col]\n else:\n category_labels = np.array(category_col)\n possible_categories = list(set(category_labels))\n possible_categories = [None] if possible_categories is None else possible_categories\n if category_col and category_col in df:\n masks = [np.array(df[category_col] == label) for label in possible_categories]\n else:\n masks = [np.array([True] * len(df))] * len(possible_categories)\n data = {'data': [\n Scatter(x=df[x][mask].values,\n y=df[y][mask].values,\n text=df[text_col][mask].values,\n marker=Marker(size=df[size_col][mask] if size_col in df.columns else size_col,\n **marker_default),\n mode='markers',\n name=str(category_name)) for (category_name, mask) in zip(possible_categories, masks)\n ],\n 'layout': Layout(**layout_default)\n }\n return offline_plotly_data(data, filename=filename, config=config_default)\n"
] | [
[
"pandas.read_csv",
"matplotlib.pyplot.figure",
"pandas.DataFrame"
]
] |
CINPLA/edNEGmodel_analysis | [
"be8854c563376a14ee7d15e51d98d0d82be96a35"
] | [
"figures/plot_figure6.py"
] | [
"import warnings\nwarnings.filterwarnings(\"ignore\")\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.gridspec as gridspec\nfrom set_style import set_style\n\nset_style('default', w=1, h=3.3)\n\nfig = plt.figure()\ngs = gridspec.GridSpec(8,6)\nax00 = plt.subplot(gs[0:2,0:2])\nax0 = plt.subplot(gs[0:2,2:4])\nax1 = plt.subplot(gs[2:4,0:2])\nax2 = plt.subplot(gs[2:4,2:4], sharey=ax1)\nax3 = plt.subplot(gs[2:4,4:], sharey=ax1)\nax4 = plt.subplot(gs[4:6,0:2])\nax5 = plt.subplot(gs[4:6,2:4], sharey=ax4)\nax6 = plt.subplot(gs[4:6,4:], sharey=ax4)\nax7 = plt.subplot(gs[6:,0:2])\nax8 = plt.subplot(gs[6:,2:4], sharey=ax7)\nax9 = plt.subplot(gs[6:,4:], sharey=ax7)\n\n### Panel A ###\ndata = np.load('../data/figure6_K_soma_ss_1.npz')\nt = data['t']\nphi_e_sum = data['phi_e_n']+data['phi_e_g']+data['phi_e_diff'][:-1]\nax00.plot(t[:-1], phi_e_sum*1000, 'k')[0]\nax00.set_xlim(0,60)\nax00.set_title('Physiological')\n\n### Panel B ###\ndata = np.load('../data/figure6_K_soma_db_1.npz')\nt = data['t']\nphi_e_sum = data['phi_e_n']+data['phi_e_g']+data['phi_e_diff'][:-1]\nax0.plot(t[:-1], phi_e_sum*1000, 'k')[0]\nax0.set_xlim(0,600)\nax0.set_title('Pathological')\n\ndata = np.load('../data/figure6.npz')\n\nI_stim_K_soma_ss = data['I_stim_K_soma_ss']\nn_K_soma_ss = data['n_K_soma_ss']\ng_K_soma_ss = data['g_K_soma_ss']\ndiff_K_soma_ss = data['diff_K_soma_ss']\nI_stim_K_soma_db = data['I_stim_K_soma_db']\nn_K_soma_db = data['n_K_soma_db']\ng_K_soma_db = data['g_K_soma_db']\ndiff_K_soma_db = data['diff_K_soma_db']\n\nI_stim_K_dendrite_ss = data['I_stim_K_dendrite_ss']\nn_K_dendrite_ss = data['n_K_dendrite_ss']\ng_K_dendrite_ss = data['g_K_dendrite_ss']\ndiff_K_dendrite_ss = data['diff_K_dendrite_ss']\nI_stim_K_dendrite_db = data['I_stim_K_dendrite_db']\nn_K_dendrite_db = data['n_K_dendrite_db']\ng_K_dendrite_db = data['g_K_dendrite_db']\ndiff_K_dendrite_db = data['diff_K_dendrite_db']\n\nI_stim_K_both_ss = data['I_stim_K_both_ss']\nn_K_both_ss = data['n_K_both_ss']\ng_K_both_ss = data['g_K_both_ss']\ndiff_K_both_ss = data['diff_K_both_ss']\nI_stim_K_both_db = data['I_stim_K_both_db']\nn_K_both_db = data['n_K_both_db']\ng_K_both_db = data['g_K_both_db']\ndiff_K_both_db = data['diff_K_both_db']\n\nI_stim_Na_soma_ss = data['I_stim_Na_soma_ss']\nn_Na_soma_ss = data['n_Na_soma_ss']\ng_Na_soma_ss = data['g_Na_soma_ss']\ndiff_Na_soma_ss = data['diff_Na_soma_ss']\nI_stim_Na_soma_db = data['I_stim_Na_soma_db']\nn_Na_soma_db = data['n_Na_soma_db']\ng_Na_soma_db = data['g_Na_soma_db']\ndiff_Na_soma_db = data['diff_Na_soma_db']\n\nI_stim_Na_dendrite_ss = data['I_stim_Na_dendrite_ss']\nn_Na_dendrite_ss = data['n_Na_dendrite_ss']\ng_Na_dendrite_ss = data['g_Na_dendrite_ss']\ndiff_Na_dendrite_ss = data['diff_Na_dendrite_ss']\nI_stim_Na_dendrite_db = data['I_stim_Na_dendrite_db']\nn_Na_dendrite_db = data['n_Na_dendrite_db']\ng_Na_dendrite_db = data['g_Na_dendrite_db']\ndiff_Na_dendrite_db = data['diff_Na_dendrite_db']\n\nI_stim_Na_both_ss = data['I_stim_Na_both_ss']\nn_Na_both_ss = data['n_Na_both_ss']\ng_Na_both_ss = data['g_Na_both_ss']\ndiff_Na_both_ss = data['diff_Na_both_ss']\nI_stim_Na_both_db = data['I_stim_Na_both_db']\nn_Na_both_db = data['n_Na_both_db']\ng_Na_both_db = data['g_Na_both_db']\ndiff_Na_both_db = data['diff_Na_both_db']\n\nI_stim_Cl_soma_ss = data['I_stim_Cl_soma_ss']\nn_Cl_soma_ss = data['n_Cl_soma_ss']\ng_Cl_soma_ss = data['g_Cl_soma_ss']\ndiff_Cl_soma_ss = data['diff_Cl_soma_ss']\nI_stim_Cl_soma_db = data['I_stim_Cl_soma_db']\nn_Cl_soma_db = data['n_Cl_soma_db']\ng_Cl_soma_db = data['g_Cl_soma_db']\ndiff_Cl_soma_db = data['diff_Cl_soma_db']\n\nI_stim_Cl_dendrite_ss = data['I_stim_Cl_dendrite_ss']\nn_Cl_dendrite_ss = data['n_Cl_dendrite_ss']\ng_Cl_dendrite_ss = data['g_Cl_dendrite_ss']\ndiff_Cl_dendrite_ss = data['diff_Cl_dendrite_ss']\nI_stim_Cl_dendrite_db = data['I_stim_Cl_dendrite_db']\nn_Cl_dendrite_db = data['n_Cl_dendrite_db']\ng_Cl_dendrite_db = data['g_Cl_dendrite_db']\ndiff_Cl_dendrite_db = data['diff_Cl_dendrite_db']\n\nI_stim_Cl_both_ss = data['I_stim_Cl_both_ss']\nn_Cl_both_ss = data['n_Cl_both_ss']\ng_Cl_both_ss = data['g_Cl_both_ss']\ndiff_Cl_both_ss = data['diff_Cl_both_ss']\nI_stim_Cl_both_db = data['I_stim_Cl_both_db']\nn_Cl_both_db = data['n_Cl_both_db']\ng_Cl_both_db = data['g_Cl_both_db']\ndiff_Cl_both_db = data['diff_Cl_both_db']\n\n###### Panel C ###\nl1 = ax1.plot(I_stim_K_soma_ss, n_K_soma_ss, 'o-', color='k', markersize=3)[0]\nl2 = ax1.plot(I_stim_K_soma_ss, g_K_soma_ss, 'v-', color='tab:purple', markersize=3)[0]\nl3 = ax1.plot(I_stim_K_soma_ss, diff_K_soma_ss, '*-', color='tab:red', markersize=3)[0]\nl4 = ax1.plot(I_stim_K_soma_ss, n_K_soma_ss+g_K_soma_ss+diff_K_soma_ss, 'k--', markersize=3)[0]\nax1.plot(I_stim_K_soma_db, n_K_soma_db, 'o-', color='k', markersize=3)[0]\nax1.plot(I_stim_K_soma_db, g_K_soma_db, 'v-', color='tab:purple', markersize=3)[0]\nax1.plot(I_stim_K_soma_db, diff_K_soma_db, '*-', color='tab:red', markersize=3)[0]\nax1.plot(I_stim_K_soma_db, n_K_soma_db+g_K_soma_db+diff_K_soma_db, 'k--', markersize=3)[0]\nax1.set_title('K$^+$, soma')\nfig.legend([l1, l2, l3, l4], [r'$\\bar{\\phi}\\mathrm{_{se,n}}$', r'$\\bar{\\phi}\\mathrm{_{se,g}}$', r'$\\bar{\\phi}\\mathrm{_{se,diff}}$', r'$\\bar{\\phi}\\mathrm{_{se,sum}}$'], \\\n loc=(0.75,0.77), ncol=1, fontsize='large', handlelength=0.9, handletextpad=0.4)\n\n### Panel D ###\nax2.plot(I_stim_K_dendrite_ss, n_K_dendrite_ss, 'o-', color='k', markersize=3)[0]\nax2.plot(I_stim_K_dendrite_ss, g_K_dendrite_ss, 'v-', color='tab:purple', markersize=3)[0]\nax2.plot(I_stim_K_dendrite_ss, diff_K_dendrite_ss, '*-', color='tab:red', markersize=3)[0]\nax2.plot(I_stim_K_dendrite_ss, n_K_dendrite_ss+g_K_dendrite_ss+diff_K_dendrite_ss, 'k--', markersize=3)[0]\nax2.plot(I_stim_K_dendrite_db, n_K_dendrite_db, 'o-', color='k', markersize=3)[0]\nax2.plot(I_stim_K_dendrite_db, g_K_dendrite_db, 'v-', color='tab:purple', markersize=3)[0]\nax2.plot(I_stim_K_dendrite_db, diff_K_dendrite_db, '*-', color='tab:red', markersize=3)[0]\nax2.plot(I_stim_K_dendrite_db, n_K_dendrite_db+g_K_dendrite_db+diff_K_dendrite_db, 'k--', markersize=3)[0]\nax2.set_title('K$^+$, dendrite')\n\n### Panel E ###\nax3.plot(I_stim_K_both_ss, n_K_both_ss, 'o-', color='k', markersize=3)[0]\nax3.plot(I_stim_K_both_ss, g_K_both_ss, 'v-', color='tab:purple', markersize=3)[0]\nax3.plot(I_stim_K_both_ss, diff_K_both_ss, '*-', color='tab:red', markersize=3)[0]\nax3.plot(I_stim_K_both_ss, n_K_both_ss+g_K_both_ss+diff_K_both_ss, 'k--', markersize=3)[0]\nax3.plot(I_stim_K_both_db, n_K_both_db, 'o-', color='k', markersize=3)[0]\nax3.plot(I_stim_K_both_db, g_K_both_db, 'v-', color='tab:purple', markersize=3)[0]\nax3.plot(I_stim_K_both_db, diff_K_both_db, '*-', color='tab:red', markersize=3)[0]\nax3.plot(I_stim_K_both_db, n_K_both_db+g_K_both_db+diff_K_both_db, 'k--', markersize=3)[0]\nax3.set_title('K$^+$, both')\n\n### Panel F ###\nax4.plot(I_stim_Na_soma_ss, n_Na_soma_ss, 'o-', color='k', markersize=3)[0]\nax4.plot(I_stim_Na_soma_ss, g_Na_soma_ss, 'v-', color='tab:purple', markersize=3)[0]\nax4.plot(I_stim_Na_soma_ss, diff_Na_soma_ss, '*-', color='tab:red', markersize=3)[0]\nax4.plot(I_stim_Na_soma_ss, n_Na_soma_ss+g_Na_soma_ss+diff_Na_soma_ss, 'k--', markersize=3)[0]\nax4.plot(I_stim_Na_soma_db, n_Na_soma_db, 'o-', color='k', markersize=3)[0]\nax4.plot(I_stim_Na_soma_db, g_Na_soma_db, 'v-', color='tab:purple', markersize=3)[0]\nax4.plot(I_stim_Na_soma_db, diff_Na_soma_db, '*-', color='tab:red', markersize=3)[0]\nax4.plot(I_stim_Na_soma_db, n_Na_soma_db+g_Na_soma_db+diff_Na_soma_db, 'k--', markersize=3)[0]\nax4.set_title('Na$^+$, soma')\n\n### Panel G ###\nax5.plot(I_stim_Na_dendrite_ss, n_Na_dendrite_ss, 'o-', color='k', markersize=3)[0]\nax5.plot(I_stim_Na_dendrite_ss, g_Na_dendrite_ss, 'v-', color='tab:purple', markersize=3)[0]\nax5.plot(I_stim_Na_dendrite_ss, diff_Na_dendrite_ss, '*-', color='tab:red', markersize=3)[0]\nax5.plot(I_stim_Na_dendrite_ss, n_Na_dendrite_ss+g_Na_dendrite_ss+diff_Na_dendrite_ss, 'k--', markersize=3)[0]\nax5.plot(I_stim_Na_dendrite_db, n_Na_dendrite_db, 'o-', color='k', markersize=3)[0]\nax5.plot(I_stim_Na_dendrite_db, g_Na_dendrite_db, 'v-', color='tab:purple', markersize=3)[0]\nax5.plot(I_stim_Na_dendrite_db, diff_Na_dendrite_db, '*-', color='tab:red', markersize=3)[0]\nax5.plot(I_stim_Na_dendrite_db, n_Na_dendrite_db+g_Na_dendrite_db+diff_Na_dendrite_db, 'k--', markersize=3)[0]\nax5.set_title('Na$^+$, dendrite')\n\n### Panel H ###\nax6.plot(I_stim_Na_both_ss, n_Na_both_ss, 'o-', color='k', markersize=3)[0]\nax6.plot(I_stim_Na_both_ss, g_Na_both_ss, 'v-', color='tab:purple', markersize=3)[0]\nax6.plot(I_stim_Na_both_ss, diff_Na_both_ss, '*-', color='tab:red', markersize=3)[0]\nax6.plot(I_stim_Na_both_ss, n_Na_both_ss+g_Na_both_ss+diff_Na_both_ss, 'k--', markersize=3)[0]\nax6.plot(I_stim_Na_both_db, n_Na_both_db, 'o-', color='k', markersize=3)[0]\nax6.plot(I_stim_Na_both_db, g_Na_both_db, 'v-', color='tab:purple', markersize=3)[0]\nax6.plot(I_stim_Na_both_db, diff_Na_both_db, '*-', color='tab:red', markersize=3)[0]\nax6.plot(I_stim_Na_both_db, n_Na_both_db+g_Na_both_db+diff_Na_both_db, 'k--', markersize=3)[0]\nax6.set_title('Na$^+$, both')\n\n### Panel I ###\nax7.plot(I_stim_Cl_soma_ss, n_Cl_soma_ss, 'o-', color='k', markersize=3)[0]\nax7.plot(I_stim_Cl_soma_ss, g_Cl_soma_ss, 'v-', color='tab:purple', markersize=3)[0]\nax7.plot(I_stim_Cl_soma_ss, diff_Cl_soma_ss, '*-', color='tab:red', markersize=3)[0]\nax7.plot(I_stim_Cl_soma_ss, n_Cl_soma_ss+g_Cl_soma_ss+diff_Cl_soma_ss, 'k--', markersize=3)[0]\nax7.plot(I_stim_Cl_soma_db, n_Cl_soma_db, 'o-', color='k', markersize=3)[0]\nax7.plot(I_stim_Cl_soma_db, g_Cl_soma_db, 'v-', color='tab:purple', markersize=3)[0]\nax7.plot(I_stim_Cl_soma_db, diff_Cl_soma_db, '*-', color='tab:red', markersize=3)[0]\nax7.plot(I_stim_Cl_soma_db, n_Cl_soma_db+g_Cl_soma_db+diff_Cl_soma_db, 'k--', markersize=3)[0]\nax7.set_title('Cl$^-$, soma')\n\n### Panel J ###\nax8.plot(I_stim_Cl_dendrite_ss, n_Cl_dendrite_ss, 'o-', color='k', markersize=3)[0]\nax8.plot(I_stim_Cl_dendrite_ss, g_Cl_dendrite_ss, 'v-', color='tab:purple', markersize=3)[0]\nax8.plot(I_stim_Cl_dendrite_ss, diff_Cl_dendrite_ss, '*-', color='tab:red', markersize=3)[0]\nax8.plot(I_stim_Cl_dendrite_ss, n_Cl_dendrite_ss+g_Cl_dendrite_ss+diff_Cl_dendrite_ss, 'k--', markersize=3)[0]\nax8.plot(I_stim_Cl_dendrite_db, n_Cl_dendrite_db, 'o-', color='k', markersize=3)[0]\nax8.plot(I_stim_Cl_dendrite_db, g_Cl_dendrite_db, 'v-', color='tab:purple', markersize=3)[0]\nax8.plot(I_stim_Cl_dendrite_db, diff_Cl_dendrite_db, '*-', color='tab:red', markersize=3)[0]\nax8.plot(I_stim_Cl_dendrite_db, n_Cl_dendrite_db+g_Cl_dendrite_db+diff_Cl_dendrite_db, 'k--', markersize=3)[0]\nax8.set_title('Cl$^-$, dendrite')\n\n### Panel K ###\nax9.plot(I_stim_Cl_both_ss, n_Cl_both_ss, 'o-', color='k', markersize=3)[0]\nax9.plot(I_stim_Cl_both_ss, g_Cl_both_ss, 'v-', color='tab:purple', markersize=3)[0]\nax9.plot(I_stim_Cl_both_ss, diff_Cl_both_ss, '*-', color='tab:red', markersize=3)[0]\nax9.plot(I_stim_Cl_both_ss, n_Cl_both_ss+g_Cl_both_ss+diff_Cl_both_ss, 'k--', markersize=3)[0]\nax9.plot(I_stim_Cl_both_db, n_Cl_both_db, 'o-', color='k', markersize=3)[0]\nax9.plot(I_stim_Cl_both_db, g_Cl_both_db, 'v-', color='tab:purple', markersize=3)[0]\nax9.plot(I_stim_Cl_both_db, diff_Cl_both_db, '*-', color='tab:red', markersize=3)[0]\nax9.plot(I_stim_Cl_both_db, n_Cl_both_db+g_Cl_both_db+diff_Cl_both_db, 'k--', markersize=3)[0]\nax9.set_title('Cl$^-$, both')\n\nfor ax in [ax00, ax0, ax1, ax2, ax3, ax4, ax5, ax6, ax7, ax8, ax9]:\n ax.spines['top'].set_visible(False)\n ax.spines['right'].set_visible(False)\n\nax00.set_ylabel('$\\phi_\\mathrm{{se,sum}}$ [mV]')\nax00.set_xticks([0,20,40,60])\nax0.set_xticks([0,200,400,600])\nax1.set_xlim(90,160)\nax1.set_xticks([100,120,140,160])\nax2.set_xlim(90,160)\nax2.set_xticks([100,120,140,160])\nax3.set_xlim(30,100)\nax3.set_xticks([40,60,80,100])\nax4.set_xlim(70,140)\nax4.set_xticks([80,120,100,140])\nax5.set_xlim(70,140)\nax5.set_xticks([80,120,100,140])\nax6.set_xlim(20,90)\nax6.set_xticks([30,50,70,90])\nax7.set_xlim(50,120)\nax7.set_xticks([60,80,100,120])\nax8.set_xlim(50,120)\nax8.set_xticks([60,80,100,120])\nax9.set_xlim(10,80)\nax9.set_xticks([20,40,60,80])\n\nfor ax in [ax1, ax4, ax7]:\n ax.set_yticks([-2, -1, 0])\n\nfor ax in [ax1, ax4, ax7]:\n ax.set_ylabel(r'$\\bar{\\phi}\\mathrm{_{se}}$ [mV]')\nfor ax in [ax00, ax0]:\n ax.set_xlabel('time [s]')\nfor ax in [ax7, ax8, ax9]:\n ax.set_xlabel('$I\\mathrm{_{stim}}$ [pA]')\n\n## ABC\npanel = np.array(['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K'])\ni = 0\nfor ax in [ax00, ax0, ax1, ax2, ax3, ax4, ax5, ax6, ax7, ax8, ax9]:\n ax.text(-0.1, 1.2, panel[i], transform=ax.transAxes, fontsize=12, fontweight='bold', va='top', ha='right')\n i += 1\n\nplt.tight_layout()\nplt.savefig('figure6.pdf', dpi=600)\n"
] | [
[
"numpy.load",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.subplot",
"numpy.array",
"matplotlib.gridspec.GridSpec"
]
] |
ChaokunChang/SVAS | [
"61af6eb39269edff8ea5147311628b3200c3a3d2"
] | [
"models/tmp/xchani_yolov5_master/yolov5_models/experimental.py"
] | [
"# This file contains experimental modules\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\n\nfrom yolov5_models.common import Conv, DWConv\nfrom yolov5_utils.google_utils import attempt_download\n\n\nclass CrossConv(nn.Module):\n # Cross Convolution Downsample\n def __init__(self, c1, c2, k=3, s=1, g=1, e=1.0, shortcut=False):\n # ch_in, ch_out, kernel, stride, groups, expansion, shortcut\n super(CrossConv, self).__init__()\n c_ = int(c2 * e) # hidden channels\n self.cv1 = Conv(c1, c_, (1, k), (1, s))\n self.cv2 = Conv(c_, c2, (k, 1), (s, 1), g=g)\n self.add = shortcut and c1 == c2\n\n def forward(self, x):\n return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x))\n\n\nclass Sum(nn.Module):\n # Weighted sum of 2 or more layers https://arxiv.org/abs/1911.09070\n def __init__(self, n, weight=False): # n: number of inputs\n super(Sum, self).__init__()\n self.weight = weight # apply weights boolean\n self.iter = range(n - 1) # iter object\n if weight:\n self.w = nn.Parameter(-torch.arange(1., n) / 2, requires_grad=True) # layer weights\n\n def forward(self, x):\n y = x[0] # no weight\n if self.weight:\n w = torch.sigmoid(self.w) * 2\n for i in self.iter:\n y = y + x[i + 1] * w[i]\n else:\n for i in self.iter:\n y = y + x[i + 1]\n return y\n\n\nclass GhostConv(nn.Module):\n # Ghost Convolution https://github.com/huawei-noah/ghostnet\n def __init__(self, c1, c2, k=1, s=1, g=1, act=True): # ch_in, ch_out, kernel, stride, groups\n super(GhostConv, self).__init__()\n c_ = c2 // 2 # hidden channels\n self.cv1 = Conv(c1, c_, k, s, None, g, act)\n self.cv2 = Conv(c_, c_, 5, 1, None, c_, act)\n\n def forward(self, x):\n y = self.cv1(x)\n return torch.cat([y, self.cv2(y)], 1)\n\n\nclass GhostBottleneck(nn.Module):\n # Ghost Bottleneck https://github.com/huawei-noah/ghostnet\n def __init__(self, c1, c2, k, s):\n super(GhostBottleneck, self).__init__()\n c_ = c2 // 2\n self.conv = nn.Sequential(GhostConv(c1, c_, 1, 1), # pw\n DWConv(c_, c_, k, s, act=False) if s == 2 else nn.Identity(), # dw\n GhostConv(c_, c2, 1, 1, act=False)) # pw-linear\n self.shortcut = nn.Sequential(DWConv(c1, c1, k, s, act=False),\n Conv(c1, c2, 1, 1, act=False)) if s == 2 else nn.Identity()\n\n def forward(self, x):\n return self.conv(x) + self.shortcut(x)\n\n\nclass MixConv2d(nn.Module):\n # Mixed Depthwise Conv https://arxiv.org/abs/1907.09595\n def __init__(self, c1, c2, k=(1, 3), s=1, equal_ch=True):\n super(MixConv2d, self).__init__()\n groups = len(k)\n if equal_ch: # equal c_ per group\n i = torch.linspace(0, groups - 1E-6, c2).floor() # c2 indices\n c_ = [(i == g).sum() for g in range(groups)] # intermediate channels\n else: # equal weight.numel() per group\n b = [c2] + [0] * groups\n a = np.eye(groups + 1, groups, k=-1)\n a -= np.roll(a, 1, axis=1)\n a *= np.array(k) ** 2\n a[0] = 1\n c_ = np.linalg.lstsq(a, b, rcond=None)[0].round() # solve for equal weight indices, ax = b\n\n self.m = nn.ModuleList([nn.Conv2d(c1, int(c_[g]), k[g], s, k[g] // 2, bias=False) for g in range(groups)])\n self.bn = nn.BatchNorm2d(c2)\n self.act = nn.LeakyReLU(0.1, inplace=True)\n\n def forward(self, x):\n return x + self.act(self.bn(torch.cat([m(x) for m in self.m], 1)))\n\n\nclass Ensemble(nn.ModuleList):\n # Ensemble of models\n def __init__(self):\n super(Ensemble, self).__init__()\n\n def forward(self, x, augment=False):\n y = []\n for module in self:\n y.append(module(x, augment)[0])\n # y = torch.stack(y).max(0)[0] # max ensemble\n # y = torch.stack(y).mean(0) # mean ensemble\n y = torch.cat(y, 1) # nms ensemble\n return y, None # inference, train output\n\n\ndef attempt_load(weights, map_location=None):\n # Loads an ensemble of models weights=[a,b,c] or a single model weights=[a] or weights=a\n model = Ensemble()\n for w in weights if isinstance(weights, list) else [weights]:\n attempt_download(w)\n model.append(torch.load(w, map_location=map_location)['model'].float().fuse().eval()) # load FP32 model\n\n # Compatibility updates\n for m in model.modules():\n if type(m) in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU]:\n m.inplace = True # pytorch 1.7.0 compatibility\n elif type(m) is Conv:\n m._non_persistent_buffers_set = set() # pytorch 1.6.0 compatibility\n\n if len(model) == 1:\n return model[-1] # return model\n else:\n print('Ensemble created with %s\\n' % weights)\n for k in ['names', 'stride']:\n setattr(model, k, getattr(model[-1], k))\n return model # return ensemble\n"
] | [
[
"torch.nn.BatchNorm2d",
"numpy.eye",
"numpy.roll",
"torch.load",
"torch.linspace",
"torch.arange",
"numpy.linalg.lstsq",
"torch.nn.Identity",
"numpy.array",
"torch.sigmoid",
"torch.cat",
"torch.nn.LeakyReLU"
]
] |
marcusvaltonen/python-droneposelib | [
"a08648207f20e90ed0491ef154ae9af56541831a"
] | [
"example/synthetic.py"
] | [
"import numpy as np\nimport droneposelib as dpl\n\n\ndef pflat(x):\n \"\"\"Divide by the last coordinate to get an inhomogenous representation.\"\"\"\n # Enfoce 2D\n if x.ndim == 1:\n x = np.expand_dims(x, 1)\n return x / x[-1, :]\n\n\ndef skew(a):\n a = np.squeeze(a)\n \"\"\"Create skew-symmetric matrix corresponding to cross product.\"\"\"\n return np.array([\n [0, -a[2], a[1]],\n [a[2], 0, -a[0]],\n [-a[1], a[0], 0]])\n\n\ndef radialdistort(x, kappa):\n \"\"\"Applies radial distortion to the (homogeneous or inhomogeneous) coordinates x\n using the parameter kappa. Assumes the division model.\"\"\"\n\n # Check if homogeneous\n ishom = x.shape[0] == 3\n\n if ishom:\n x = pflat(x)\n x = x[:2, :]\n\n # Compute distorted radius\n ru2 = np.sum(x**2, axis=0)\n ru = np.sqrt(ru2)\n\n # Compute distorted radius\n if kappa == 0:\n rd = ru\n else:\n rd = 0.5 / kappa / ru - np.sign(kappa) * np.sqrt(0.25 / kappa**2 / ru2 - 1.0 / kappa)\n\n # Compute distorted coordinates\n y = np.tile(rd / ru, (x.shape[0], 1)) * x\n\n if ishom:\n y = np.vstack((y, np.ones((1, y.shape[1]))))\n\n return y\n\n\ndef generate_points_realistic(N=100, distortion_param=0, rng=None):\n \"\"\"Generates two poses and the corresponding scene points and image points.\"\"\"\n # Check if a seed is used (for unittests)\n if not rng:\n rng = np.random.default_rng()\n\n # Relative translation\n t = 2 * rng.random((3, 1)) - 1\n\n # Make sure the baseline is okay\n t = t / np.linalg.norm(t)\n\n # Calibration matrix\n f = rng.random() * 200 + 200\n K = np.diag([f, f, 1.0])\n Kinv = np.diag([1.0 / f, 1.0 / f, 1.0])\n\n R1, _ = np.linalg.qr(rng.random((3, 3)))\n R2, _ = np.linalg.qr(rng.random((3, 3)))\n\n R = R2 @ R1.T\n\n P1 = K @ np.hstack((R1, np.zeros((3, 1))))\n P2 = K @ np.hstack((R2, t))\n\n # Fundamental matrix\n F = Kinv.T @ skew(t) @ R @ Kinv\n\n # Generate points with y-coordinate in front of scene\n X = np.vstack((\n 6 * rng.random((1, N)) - 3,\n 5 * rng.random((1, N)) + 3,\n 6 * rng.random((1, N)) - 3,\n np.ones((1, N))))\n\n # Generate point correspondences (pinhole)\n x1 = pflat(P1 @ X)\n x2 = pflat(P2 @ X)\n\n # Add radial distortion (if desired)\n x1u = x1\n x2u = x2\n\n if distortion_param < 0:\n x1 = radialdistort(x1, distortion_param)\n x2 = radialdistort(x2, distortion_param)\n\n return R1, R2, f, F, x1, x2, R, t, x1u, x2u\n\n\ndef compare_to_gt(sols, f, F, r):\n \"\"\"Compute relative errors compared to ground truth.\"\"\"\n F /= F[2, 2]\n normF = np.linalg.norm(F)\n f_err = min([abs(f - sol['f']) / f for sol in sols])\n F_err = min([np.linalg.norm(F - sol['F'] / sol['F'][2, 2]) / normF for sol in sols])\n r_err = min([abs(r - sol['r']) / abs(r) for sol in sols])\n\n return f_err, F_err, r_err\n\n\nif __name__ == '__main__':\n # Test a minimal sample\n print('frEfr:')\n N = 4\n distortion_param = -1e-07\n R1, R2, f, F, x1, x2, R, t, x1u, x2u = generate_points_realistic(N, distortion_param)\n\n print(f'F =\\n{F / F[2, 2]}')\n print(f'f = {f}')\n print(f'r = {distortion_param}')\n print(f'x1 =\\n{x1[:2, :]}')\n print(f'x2 =\\n{x2[:2, :]}')\n\n use_fast_solver = False\n out = dpl.get_valtonenornhag_arxiv_2021_frEfr(np.asfortranarray(x1[:2, :]), np.asfortranarray(x2[:2, :]),\n np.asfortranarray(R1), np.asfortranarray(R2), use_fast_solver)\n f_err, F_err, r_err = compare_to_gt(out, f, F, distortion_param)\n\n print(f'Focal length error: {f_err}')\n print(f'Fundamental matrix error: {F_err}')\n print(f'Radial distortion parameter error: {r_err}')\n"
] | [
[
"numpy.sqrt",
"numpy.sum",
"numpy.tile",
"numpy.ones",
"numpy.sign",
"numpy.random.default_rng",
"numpy.squeeze",
"numpy.diag",
"numpy.zeros",
"numpy.asfortranarray",
"numpy.hstack",
"numpy.expand_dims",
"numpy.array",
"numpy.linalg.norm"
]
] |
DahlitzFlorian/python-snippets | [
"212f63f820b6f5842f74913ed08da18d41dfe7a4"
] | [
"third_party/parse_complex_excel_sheets.py"
] | [
"from pathlib import Path\n\nimport pandas as pd\n\n\npath = Path(\"src/samples.xlsx\")\nxls = pd.ExcelFile(path)\nprint(xls.sheet_names)\n\ndf = xls.parse(xls.sheet_names[0])\nprint(df.head())\n"
] | [
[
"pandas.ExcelFile"
]
] |
alangee/FaiR-N | [
"724f2cbea44705cecd45b202cc649d75df813d5d"
] | [
"datasets/german/german_model.py"
] | [
"# coding=utf-8\n# Copyright 2019 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\n\nThe model is a two layer convolutional network followed by a fully connected\nlayer. Changes to the model architecture can be made by modifying\ngerman_config.py file.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nfrom absl import flags\nimport tensorflow as tf\n\nFLAGS = flags.FLAGS\n\nMOMENTUM = 0.9\nEPS = 1e-5\n\ndef pool2d_layer(inputs, pool_type, pool_size=2, pool_stride=2):\n \"\"\"Pooling layer.\n\n Args:\n inputs: Tensor of size [batch, H, W, channels].\n pool_type: String (\"max\", or \"average\"), specifying pooling type.\n pool_size: Integer > 1 pooling size.\n pool_stride: Integer > 1 pooling stride.\n\n Returns:\n Pooling result.\n \"\"\"\n if pool_type == \"max\":\n # Max pooling layer\n return tf.layers.max_pooling2d(\n inputs, pool_size=[pool_size] * 2, strides=pool_stride)\n\n elif pool_type == \"average\":\n # Average pooling layer\n return tf.layers.average_pooling2d(\n inputs, pool_size=[pool_size] * 2, strides=pool_stride)\n\n\nclass GermanNetwork(object):\n \"\"\"German Credit model.\"\"\"\n\n def __init__(self, config):\n self.num_classes = config.num_classes\n self.var_list = []\n self.init_ops = None\n self.regularizer = config.regularizer\n self.activation = config.activation\n self.filter_sizes_conv_layers = config.filter_sizes_conv_layers\n self.num_units_fc_layers = config.num_units_fc_layers\n self.pool_params = config.pool_params\n self.dropout_rate = config.dropout_rate\n self.batch_norm = config.batch_norm\n\n def __call__(self, images, is_training=False):\n \"\"\"Builds model.\"\"\"\n endpoints = {}\n net = images\n\n reuse = tf.AUTO_REUSE\n\n for i, num_units in enumerate(self.num_units_fc_layers):\n print('*****************')\n print('iteration ,', i)\n layer_suffix = \"layer%d\" % i\n with tf.variable_scope(\n os.path.join(\"german_network\", \"fc_\" + layer_suffix), reuse=reuse):\n net = tf.layers.dense(\n net,\n num_units,\n activation=self.activation,\n kernel_regularizer=self.regularizer,\n use_bias=True)\n\n endpoints[\"fc_\" + layer_suffix] = net\n\n with tf.variable_scope(\n os.path.join(\"german_network\", \"output_layer\"), reuse=reuse):\n logits = tf.layers.dense(\n net,\n self.num_classes,\n activation=None,\n kernel_regularizer=self.regularizer)\n endpoints[\"logits\"] = net\n print('-----------')\n print(net)\n\n return logits, endpoints\n"
] | [
[
"tensorflow.layers.average_pooling2d",
"tensorflow.layers.max_pooling2d",
"tensorflow.layers.dense"
]
] |
shtoneyan/sea-lion | [
"7e1ce9a18a147eea42e6172a2329d696f6e6aef9"
] | [
"preprocessing/basenji_data_write.py"
] | [
"#!/usr/bin/env python\n# Copyright 2019 Calico LLC\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# https://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# =========================================================================\nfrom optparse import OptionParser\nimport os\nimport sys\nimport uuid\nimport h5py\nimport numpy as np\nimport pdb\nimport pysam\nimport json\n\nfrom basenji_data import ModelSeq\nfrom dna_io import dna_1hot, dna_1hot_index\n\nimport tensorflow as tf\n\n\"\"\"\nbasenji_data_write.py\n\nWrite TF Records for batches of model sequences.\n\nNotes:\n-I think target_start and target_end are remnants of my previous data2 pipeline.\n If I see this again beyond 8/2020, remove it.\n\"\"\"\n\n################################################################################\n# main\n################################################################################\ndef main():\n usage = 'usage: %prog [options] <fasta_file> <seqs_bed_file> <seqs_cov_dir> <tfr_file> <fold_set>'\n parser = OptionParser(usage)\n parser.add_option('--threshold', dest='threshold',\n default=0, type='float',\n help='Set a minimum threshold for activity.')\n parser.add_option('--test_threshold', dest='test_threshold',\n type='float',\n help='Set a minimum threshold for activity for test set.')\n parser.add_option('-s', dest='start_i',\n default=0, type='int',\n help='Sequence start index [Default: %default]')\n parser.add_option('-e', dest='end_i',\n default=None, type='int',\n help='Sequence end index [Default: %default]')\n parser.add_option('--te', dest='target_extend',\n default=None, type='int', help='Extend targets vector [Default: %default]')\n parser.add_option('--ts', dest='target_start',\n default=0, type='int', help='Write targets into vector starting at index [Default: %default')\n parser.add_option('-u', dest='umap_npy',\n help='Unmappable array numpy file')\n parser.add_option('--umap_clip', dest='umap_clip',\n default=1, type='float',\n help='Clip values at unmappable positions to distribution quantiles, eg 0.25. [Default: %default]')\n parser.add_option('--umap_tfr', dest='umap_tfr',\n default=False, action='store_true',\n help='Save umap array into TFRecords [Default: %default]')\n parser.add_option('-o', dest='out_dir',\n default='data_out',\n help='Output directory [Default: %default]')\n (options, args) = parser.parse_args()\n\n if len(args) != 5:\n parser.error('Must provide input arguments.')\n else:\n fasta_file = args[0]\n seqs_bed_file = args[1]\n seqs_cov_dir = args[2]\n tfr_file = args[3]\n fold_set = args[4]\n\n if fold_set == 'test':\n options.threshold = options.test_threshold\n\n ################################################################\n # read model sequences\n\n model_seqs = []\n for line in open(seqs_bed_file):\n a = line.split()\n model_seqs.append(ModelSeq(a[0],int(a[1]),int(a[2]),None))\n\n if options.end_i is None:\n options.end_i = len(model_seqs)\n\n num_seqs = options.end_i - options.start_i\n\n ################################################################\n # determine sequence coverage files\n\n seqs_cov_files = []\n ti = 0\n seqs_cov_file = '%s/%d.h5' % (seqs_cov_dir, ti)\n while os.path.isfile(seqs_cov_file):\n seqs_cov_files.append(seqs_cov_file)\n ti += 1\n seqs_cov_file = '%s/%d.h5' % (seqs_cov_dir, ti)\n\n if len(seqs_cov_files) == 0:\n print('Sequence coverage files not found, e.g. %s' % seqs_cov_file, file=sys.stderr)\n exit(1)\n\n seq_pool_len = h5py.File(seqs_cov_files[0], 'r')['targets'].shape[1]\n num_targets = len(seqs_cov_files)\n\n ################################################################\n # read targets\n\n # extend targets\n num_targets_tfr = num_targets\n if options.target_extend is not None:\n assert(options.target_extend >= num_targets_tfr)\n num_targets_tfr = options.target_extend\n\n # initialize targets\n targets = np.zeros((num_seqs, seq_pool_len, num_targets_tfr), dtype='float16')\n\n # read each target\n for ti in range(num_targets):\n seqs_cov_open = h5py.File(seqs_cov_files[ti], 'r')\n tii = options.target_start + ti\n targets[:,:,tii] = seqs_cov_open['targets'][options.start_i:options.end_i,:]\n seqs_cov_open.close()\n # threshold each sequence using an arbitrary threshold\n mask_by_thr = np.any(np.any(targets > options.threshold, axis=1), axis=-1)\n idx_filt_seqs = np.argwhere(mask_by_thr).flatten()\n num_seqs_to_add = len(idx_filt_seqs)\n for i in range(5):\n print('*')\n print(num_seqs_to_add)\n for i in range(5):\n print('*')\n # current_json = open('%s/statistics.json' % options.out_dir, 'r')\n # current_stats = json.load(current_json)\n # current_stats['%s_seqs'%fold_set] += num_seqs_to_add # update number of seqs\n\n # with open('%s/statistics.json' % options.out_dir, 'w') as stats_json_out:\n # json.dump(current_stats, stats_json_out, indent=4)\n\n count_dir = os.path.join(options.out_dir, 'counts')\n if not os.path.isdir(count_dir):\n os.mkdir(count_dir)\n file_id = fold_set+'_'+uuid.uuid4().hex\n file_path = os.path.join(count_dir, file_id)\n f = open(file_path, 'w')\n f.write(str(num_seqs_to_add))\n f.close()\n\n\n\n ################################################################\n # modify unmappable\n #\n # if options.umap_npy is not None and options.umap_clip < 1:\n # unmap_mask = np.load(options.umap_npy)\n #\n # for si in idx_filt_seqs:\n # msi = options.start_i + si\n #\n # # determine unmappable null value\n # seq_target_null = np.percentile(targets[si], q=[100*options.umap_clip], axis=0)[0]\n #\n # # set unmappable positions to null\n # targets[si,unmap_mask[msi,:],:] = np.minimum(targets[si,unmap_mask[msi,:],:], seq_target_null)\n #\n # elif options.umap_npy is not None and options.umap_tfr:\n # unmap_mask = np.load(options.umap_npy)\n\n ################################################################\n # write TFRecords\n\n # open FASTA\n fasta_open = pysam.Fastafile(fasta_file)\n\n # define options\n tf_opts = tf.io.TFRecordOptions(compression_type='ZLIB')\n with tf.io.TFRecordWriter(tfr_file, tf_opts) as writer:\n for si in idx_filt_seqs:\n\n msi = options.start_i + si\n mseq = model_seqs[msi]\n\n # read FASTA\n seq_dna = fasta_open.fetch(mseq.chr, mseq.start, mseq.end)\n\n # one hot code\n seq_1hot = dna_1hot(seq_dna)\n # seq_1hot = dna_1hot_index(seq_dna) # more efficient, but fighting inertia\n # hash to bytes\n features_dict = {\n 'coordinate': feature_str('{}_{}_{}'.format(mseq.chr, mseq.start, mseq.end).encode()),\n # 'sequence': feature_bytes(seq_1hot),\n 'sequence': feature_bytes(seq_1hot),\n 'target': feature_bytes(targets[si,:,:])\n }\n # features_dict = {\n # 'chrom': feature_str(mseq.chr.encode()),\n # # 'sequence': feature_bytes(seq_1hot),\n # 'start': feature_floats(mseq.start),\n # 'end': feature_floats(mseq.end),\n # 'sequence': feature_bytes(seq_1hot),\n # 'target': feature_bytes(targets[si,:,:])\n # }\n # add unmappability\n if options.umap_tfr:\n features_dict['umap'] = feature_bytes(unmap_mask[msi,:])\n\n # write example\n example = tf.train.Example(features=tf.train.Features(feature=features_dict))\n writer.write(example.SerializeToString())\n\n fasta_open.close()\n\n\n\n\ndef feature_bytes(values):\n \"\"\"Convert numpy arrays to bytes features.\"\"\"\n values = values.flatten().tostring()\n return tf.train.Feature(bytes_list=tf.train.BytesList(value=[values]))\n\ndef feature_str(values):\n \"\"\"Convert str to bytes features.\"\"\"\n # value = np.array(values)\n return tf.train.Feature(bytes_list=tf.train.BytesList(value=[values]))\n\ndef feature_floats(values):\n \"\"\"Convert numpy arrays to floats features.\n Requires more space than bytes.\"\"\"\n values = values.flatten().tolist()\n return tf.train.Feature(float_list=tf.train.FloatList(value=values))\n\n\n################################################################################\n# __main__\n################################################################################\nif __name__ == '__main__':\n main()\n"
] | [
[
"numpy.argwhere",
"numpy.zeros",
"tensorflow.io.TFRecordWriter",
"numpy.any",
"tensorflow.train.FloatList",
"tensorflow.train.Features",
"tensorflow.io.TFRecordOptions",
"tensorflow.train.BytesList"
]
] |
qacwnfq/linmix | [
"36635592d76d2d0e06324265ede385e2d5bcc721"
] | [
"linmix/linmix.py"
] | [
"\"\"\" linmix -- A hierarchical Bayesian approach to linear regression with error in both X and Y.\n\"\"\"\n\nfrom __future__ import print_function\n\nimport numpy as np\n\n\ndef task_manager(conn):\n chain = None\n while True:\n message = conn.recv()\n if message['task'] == 'init':\n chain = Chain(**message['init_args'])\n chain.initial_guess()\n elif message['task'] == 'init_chain':\n chain.initialize_chain(message['miniter'])\n elif message['task'] == 'step':\n chain.step(message['niter'])\n elif message['task'] == 'extend':\n chain.extend(message['niter'])\n elif message['task'] == 'fetch':\n conn.send(chain.__dict__[message['key']])\n elif message['task'] == 'kill':\n break\n else:\n raise ValueError(\"Invalid task\")\n\nclass Chain(object):\n def __init__(self, x, y, xsig, ysig, xycov, delta, K, nchains, rng=None):\n self.x = np.array(x, dtype=float)\n self.y = np.array(y, dtype=float)\n\n if xsig is None:\n self.xsig = np.zeros_like(self.x)\n xycov = np.zeros_like(self.x)\n else:\n self.xsig = np.array(xsig, dtype=float)\n if ysig is None:\n self.ysig = np.zeros_like(self.y)\n xycov = np.zeros_like(self.y)\n else:\n self.ysig = np.array(ysig, dtype=float)\n self.wxerr = (self.xsig != 0.0)\n self.wyerr = (self.ysig != 0.0)\n self.werrs = werrs = self.wxerr & self.wyerr\n\n if xycov is None:\n self.xycov = np.zeros_like(self.x)\n else:\n self.xycov = np.array(xycov, dtype=float)\n\n self.xycorr = np.zeros_like(self.xycov)\n self.xycorr[werrs] = self.xycov[werrs] / (self.xsig[werrs] * self.ysig[werrs])\n\n self.N = len(self.x)\n self.K = K\n self.nchains = nchains\n\n self.xvar = self.xsig**2\n self.yvar = self.ysig**2\n\n if delta is None:\n self.delta = np.ones((self.N), dtype=bool)\n else:\n self.delta = np.array(delta, dtype=bool)\n\n if rng is None:\n rng = np.random.RandomState()\n self.rng = rng\n\n self.initialized = False\n\n def initial_guess(self): # Step 1\n # For convenience\n x = self.x\n y = self.y\n xycov = self.xycov\n xvar = self.xvar\n yvar = self.yvar\n N = self.N\n K = self.K\n\n # Use BCES estimator for initial guess of theta = {alpha, beta, sigsqr}\n self.beta = ((np.cov(x, y, ddof=1)[1, 0] - np.mean(xycov))\n / (np.var(x, ddof=1) - np.mean(xvar)))\n self.alpha = np.mean(y) - self.beta * np.mean(x)\n self.sigsqr = np.var(y, ddof=1) - np.mean(yvar) - self.beta * (np.cov(x, y, ddof=1)[1, 0]\n - np.mean(xycov))\n self.sigsqr = np.max([self.sigsqr,\n 0.05 * np.var(y - self.alpha - self.beta * x, ddof=1)])\n\n self.mu0 = np.median(x)\n self.wsqr = np.var(x, ddof=1) - np.median(xvar)\n self.wsqr = np.max([self.wsqr, 0.01*np.var(x, ddof=1)])\n\n # Now get an MCMC value dispersed around above values\n X = np.ones((N, 2), dtype=float)\n X[:, 1] = x\n Sigma = np.linalg.inv(np.dot(X.T, X)) * self.sigsqr\n coef = self.rng.multivariate_normal([0, 0], Sigma)\n chisqr = self.rng.chisquare(self.nchains)\n self.alpha += coef[0] * np.sqrt(1.0/chisqr)\n self.beta += coef[1] * np.sqrt(1.0/chisqr)\n self.sigsqr *= 0.5 * N / self.rng.chisquare(0.5*N)\n\n # Now get the values for the mixture parameters, first do prior params\n self.mu0min = min(x)\n self.mu0max = max(x)\n\n mu0g = np.nan\n while not (mu0g > self.mu0min) & (mu0g < self.mu0max):\n mu0g = self.mu0 + (self.rng.normal(scale=np.sqrt(np.var(x, ddof=1) / N)) /\n np.sqrt(self.nchains/self.rng.chisquare(self.nchains)))\n self.mu0 = mu0g\n\n # wsqr is the global scale\n self.wsqr *= 0.5 * N / self.rng.chisquare(0.5 * N)\n\n self.usqrmax = 1.5 * np.var(x, ddof=1)\n self.usqr = 0.5 * np.var(x, ddof=1)\n\n self.tausqr = 0.5 * self.wsqr * self.nchains / self.rng.chisquare(self.nchains, size=K)\n\n self.mu = self.mu0 + self.rng.normal(scale=np.sqrt(self.wsqr), size=K)\n\n # get initial group proportions and group labels\n\n pig = np.zeros(self.K, dtype=float)\n if K == 1:\n self.G = np.ones(N, dtype=int)\n self.pi = np.array([1], dtype=float)\n else:\n self.G = np.zeros((N, K), dtype=int)\n for i in range(N):\n minind = np.argmin(abs(x[i] - self.mu))\n pig[minind] += 1\n self.G[i, minind] = 1\n self.pi = self.rng.dirichlet(pig+1)\n\n self.eta = y.copy()\n self.y_ul = y.copy()\n self.xi = x.copy()\n\n self.cens = np.nonzero(np.logical_not(self.delta))[0]\n\n self.initialized = True\n\n def update_cens_y(self): # Step 2\n todo = self.cens[:]\n while len(todo) > 0:\n self.y[todo] = self.rng.normal(loc=self.eta[todo],\n scale=np.sqrt(self.yvar[todo]),\n size=len(todo))\n todo = np.nonzero(np.logical_not(self.delta) & (self.y > self.y_ul))[0]\n\n def update_xi(self): # Step 3\n wxerr = self.wxerr\n wyerr = self.wyerr\n\n # Eqn (58)\n sigma_xihat_ik_sqr = 1.0/(1.0/(self.xvar * (1.0 - self.xycorr**2))[:, np.newaxis]\n + self.beta**2 / self.sigsqr\n + 1.0/self.tausqr)\n # Eqn (57)\n sigma_xihat_i_sqr = np.sum(self.G * sigma_xihat_ik_sqr, axis=1)\n # Eqn (56)\n xihat_xy_i = self.x.copy()\n xihat_xy_i[wyerr] += (self.xycov / self.yvar * (self.eta - self.y))[wyerr]\n # Eqn (55)\n xihat_ik = (sigma_xihat_i_sqr[:, np.newaxis]\n * ((xihat_xy_i/(self.xvar\n * (1.0 - self.xycorr**2)))[:, np.newaxis]\n + self.beta*(self.eta[:, np.newaxis] - self.alpha)/self.sigsqr\n + self.mu/self.tausqr))\n # Eqn (54)\n xihat_i = np.sum(self.G * xihat_ik, axis=1)\n # Eqn (53)\n self.xi[wxerr] = self.rng.normal(loc=xihat_i[wxerr],\n scale=np.sqrt(sigma_xihat_i_sqr[wxerr]))\n\n def update_eta(self): # Step 4\n wxerr = self.wxerr\n wyerr = self.wyerr\n\n etaxyvar = self.yvar * (1.0 - self.xycorr**2)\n etaxy = self.y.copy()\n etaxy[wxerr] += (self.xycov / self.xvar * (self.xi - self.x))[wxerr]\n\n # Eqn (68)\n sigma_etahat_i_sqr = 1.0/(1.0/etaxyvar + 1.0/self.sigsqr)\n # Eqn (67)\n etahat_i = (sigma_etahat_i_sqr * (etaxy / etaxyvar\n + (self.alpha + self.beta * self.xi) / self.sigsqr))\n # Eqn (66)\n self.eta[wyerr] = self.rng.normal(loc=etahat_i[wyerr],\n scale=np.sqrt(sigma_etahat_i_sqr[wyerr]))\n\n def update_G(self): # Step 5\n # Eqn (74)\n piNp = self.pi * (1.0 / np.sqrt(2.0 * np.pi * self.tausqr)\n * np.exp(-0.5 * (self.xi[:, np.newaxis] - self.mu) ** 2 / self.tausqr))\n\n sum_piNp = np.sum(piNp, axis=1)[:, np.newaxis]\n q_ki = piNp\n for i in range(self.N):\n q_ki[i] = q_ki[i] / sum_piNp[i] if sum_piNp[i] != 0 else 0\n # Eqn (73)\n for i in range(self.N):\n self.G[i] = self.rng.multinomial(1, q_ki[i])\n\n def update_alpha_beta(self): # Step 6\n X = np.ones((self.N, 2), dtype=float)\n X[:, 1] = self.xi\n # Eqn (77)\n XTXinv = np.linalg.inv(np.dot(X.T, X))\n Sigma_chat = XTXinv * self.sigsqr\n # Eqn (76)\n chat = np.dot(np.dot(XTXinv, X.T), self.eta)\n # Eqn (75)\n self.alpha, self.beta = self.rng.multivariate_normal(chat, Sigma_chat)\n\n def update_sigsqr(self): # Step 7\n # Eqn (80)\n ssqr = 1.0/(self.N-2) * np.sum((self.eta - self.alpha - self.beta * self.xi)**2)\n # Eqn (79)\n nu = self.N - 2\n # Eqn (78)\n self.sigsqr = nu * ssqr / self.rng.chisquare(nu)\n\n def update_pi(self): # Step 8\n # Eqn (82)\n self.nk = np.sum(self.G, axis=0)\n # Eqn (81)\n self.pi = self.rng.dirichlet(self.nk+1)\n\n def update_mu(self): # Step 9\n Gsum = np.sum(self.G * self.xi[:, np.newaxis], axis=0)\n for k in range(self.K):\n if self.nk[k] != 0:\n # Eqn (86)\n Sigma_muhat_k = 1.0/(1.0/self.usqr + self.nk[k]/self.tausqr[k])\n # Eqn (85)\n xibar_k = 1.0/self.nk[k] * Gsum[k]\n # Eqn (84)\n muhat_k = Sigma_muhat_k * (self.mu0/self.usqr + self.nk[k]/self.tausqr[k]*xibar_k)\n # Eqn (83)\n self.mu[k] = self.rng.normal(loc=muhat_k, scale=np.sqrt(Sigma_muhat_k))\n else:\n self.mu[k] = self.rng.normal(loc=self.mu0, scale=np.sqrt(self.usqr))\n\n def update_tausqr(self): # Step 10\n # Eqn (88)\n nu_k = self.nk + 1\n # Eqn (89)\n tk_sqr = 1.0/nu_k * (self.wsqr + np.sum(self.G*(self.xi[:, np.newaxis]-self.mu)**2, axis=0))\n # Eqn (87)\n self.tausqr = tk_sqr * nu_k / self.rng.chisquare(nu_k, size=self.K)\n\n def update_mu0(self): # Step 11\n # Eqn (94)\n mubar = np.mean(self.mu)\n # Eqn (93)\n self.mu0 = self.rng.normal(loc=mubar, scale=np.sqrt(self.usqr/self.K))\n\n def update_usqr(self): # Step 12\n # Eqn (96)\n nu_u = self.K + 1\n # Eqn (97)\n usqrhat = 1.0/nu_u * (self.wsqr + np.sum((self.mu - self.mu0)**2))\n usqr = np.inf\n while not usqr <= self.usqrmax:\n usqr = usqrhat * nu_u / self.rng.chisquare(nu_u)\n self.usqr = usqr\n\n def update_wsqr(self): # Step 13\n # Eqn (102)\n a = 0.5 * (self.K + 3)\n # Eqn (103)\n b = 0.5 * (1.0/self.usqr + np.sum(1.0/self.tausqr))\n # Eqn (101)\n self.wsqr = self.rng.gamma(a, 1.0/b)\n\n def initialize_chain(self, chain_length):\n self.chain_dtype = [('alpha', float),\n ('beta', float),\n ('sigsqr', float),\n ('pi', (float, (self.K,))),\n ('mu', (float, (self.K,))),\n ('tausqr', (float, (self.K,))),\n ('mu0', float),\n ('usqr', float),\n ('wsqr', float),\n ('ximean', float),\n ('xisig', float),\n ('corr', float)]\n self.chain = np.empty((chain_length,), dtype=self.chain_dtype)\n self.ichain = 0\n\n def extend(self, length):\n extension = np.empty((length), dtype=self.chain_dtype)\n self.chain = np.hstack((self.chain, extension))\n\n def update_chain(self):\n self.chain['alpha'][self.ichain] = self.alpha\n self.chain['beta'][self.ichain] = self.beta\n self.chain['sigsqr'][self.ichain] = self.sigsqr\n self.chain['pi'][self.ichain] = self.pi\n self.chain['mu'][self.ichain] = self.mu\n self.chain['tausqr'][self.ichain] = self.tausqr\n self.chain['mu0'][self.ichain] = self.mu0\n self.chain['usqr'][self.ichain] = self.usqr\n self.chain['wsqr'][self.ichain] = self.wsqr\n ximean = np.sum(self.pi * self.mu)\n self.chain['ximean'][self.ichain] = ximean\n xisig = np.sqrt(np.sum(self.pi * (self.tausqr + self.mu**2)) - ximean**2)\n self.chain['xisig'][self.ichain] = xisig\n self.chain['corr'][self.ichain] = self.beta * xisig / np.sqrt(self.beta**2 * xisig**2\n + self.sigsqr)\n self.ichain += 1\n\n def step(self, niter):\n for i in range(niter):\n self.update_cens_y()\n old_settings = np.seterr(divide='ignore', invalid='ignore')\n self.update_xi()\n self.update_eta()\n np.seterr(**old_settings)\n self.update_G()\n self.update_alpha_beta()\n self.update_sigsqr()\n self.update_pi()\n self.update_mu()\n self.update_tausqr()\n self.update_mu0()\n self.update_usqr()\n self.update_wsqr()\n self.update_chain()\n\n\nclass LinMix(object):\n \"\"\" A class to perform linear regression of `y` on `x` when there are measurement errors in\n both variables. The regression assumes:\n\n eta = alpha + beta * xi + epsilon\n\n x = xi + xerr\n\n y = eta + yerr\n\n Here, `alpha` and `beta` are the regression coefficients, `epsilon` is the intrinsic random\n scatter about the regression, `xerr` is the measurement error in `x`, and `yerr` is the\n measurement error in `y`. `epsilon` is assumed to be normally-distributed with mean zero and\n variance `sigsqr`. `xerr` and `yerr` are assumed to be normally-distributed with means equal\n to zero, variances `xsig`^2 and `ysig`^2, respectively, and covariance `xycov`. The\n distribution of `xi` is modelled as a mixture of normals, with group proportions `pi`, means\n `mu`, and variances `tausqr`.\n\n Args:\n x(array_like): The observed independent variable.\n y(array_like): The observed dependent variable.\n xsig(array_like): 1-sigma measurement errors in x.\n ysig(array_like): 1-sigma measurement errors in y.\n xycov(array_like): Covariance between the measurement errors in x and y.\n delta(array_like): Array indicating whether a data point is censored (i.e., not detected),\n or not. If delta[i] == 1, then the ith source is detected. If delta[i] == 0, then\n the ith source is not detected and y[i] will be interpreted as an upper limit. Note\n that if there are censored data points, then the maximum-likelihood estimate\n (alpha, beta, sigsqr) is not valid. By default, all data points are assumed to be\n detected.\n K(int): The number of Gaussians to use in the mixture model for the distribution of xi.\n nchains(int): The number of Monte Carlo Markov Chains to instantiate.\n parallelize(bool): Use a separate thread for each chain. Only makes sense for nchains > 1.\n seed(int): Random seed. If `None`, then get seed from np.random.randint().\n\n Attributes:\n nchains(int): The number of instantiated MCMCs.\n chain(numpy recarray): The concatenated MCMCs themselves. Actually, only the concatenation\n of the last half of each chain is stored here after convergence is reached. The\n recarray has the following columns:\n - alpha(float): The regression intercept.\n - beta(float): The regression slope.\n - sigsqr(float): The regression intrinsic scatter.\n - pi(array_like): The mixture model component fractions.\n - mu(array_like): The mixture model component means.\n - tausqr(array_like): The mixture model component variances.\n - mu0(float): The hyperparameter describing the prior variance of the distribution\n of mixture means.\n - usqr(float): The hyperparameter describing the prior variance of the distribution\n of mixture variances.\n - wsqr(float): The hyperparameter describing the typical scale for the prior on\n `usqr` and `tausqr`.\n - ximean(float): The mean of the distribution for the independent latent variable\n `xi`.\n - xisig(float): The standard deviation of the distribution for the independent\n latent variable `xi`.\n - corr(float): The linear correlation coefficient between the latent dependent and\n independent variables `xi` and `eta`.\n \"\"\"\n def __init__(self, x, y, xsig=None, ysig=None, xycov=None, delta=None, K=3,\n nchains=4, parallelize=True, seed=None):\n self.nchains = nchains\n self.parallelize = parallelize\n\n if seed is None:\n seed = np.random.randint(2**32-1)\n\n if self.parallelize:\n # Will place 1 chain in 1 thread.\n from multiprocessing import Process, Pipe\n # Create a pipe for each thread.\n self.pipes = []\n slave_pipes = []\n for i in range(self.nchains):\n master_pipe, slave_pipe = Pipe()\n self.pipes.append(master_pipe)\n slave_pipes.append(slave_pipe)\n\n # Create chain pool.\n self.pool = []\n for sp in slave_pipes:\n self.pool.append(Process(target=task_manager, args=(sp,)))\n self.pool[-1].start()\n\n init_kwargs0 = {'x':x,\n 'y':y,\n 'xsig':xsig,\n 'ysig':ysig,\n 'xycov':xycov,\n 'delta':delta,\n 'K':K,\n 'nchains':self.nchains}\n for i, p in enumerate(self.pipes):\n init_kwargs = init_kwargs0.copy()\n init_kwargs['rng'] = np.random.RandomState(seed+i)\n p.send({'task':'init',\n 'init_args':init_kwargs})\n else:\n self._chains = []\n for i in range(self.nchains):\n self._chains.append(Chain(x, y, xsig, ysig, xycov, delta, K, self.nchains))\n self._chains[-1].initial_guess()\n\n def _get_psi(self):\n if self.parallelize:\n for p in self.pipes:\n p.send({'task':'fetch',\n 'key':'chain'})\n chains = [p.recv() for p in self.pipes]\n self.pipes[0].send({'task':'fetch',\n 'key':'ichain'})\n ndraw = int(self.pipes[0].recv()/2)\n else:\n chains = [c.chain for c in self._chains]\n ndraw = int(self._chains[0].ichain/2)\n psi = np.empty((ndraw, self.nchains, 6), dtype=float)\n psi[:, :, 0] = np.vstack([c['alpha'][0:ndraw] for c in chains]).T\n beta = np.vstack([c['beta'][0:ndraw] for c in chains]).T\n psi[:, :, 1] = beta\n sigsqr = np.vstack([c['sigsqr'][0:ndraw] for c in chains]).T\n psi[:, :, 2] = np.log(sigsqr)\n ximean = np.vstack([np.sum(c['pi'][0:ndraw] * c['mu'][0:ndraw], axis=1)\n for c in chains]).T\n psi[:, :, 3] = ximean\n xivar = np.vstack([np.sum(c['pi'][0:ndraw] * (c['tausqr'][0:ndraw] + c['mu'][0:ndraw]**2),\n axis=1)\n for c in chains]).T - ximean**2\n psi[:, :, 4] = xivar\n psi[:, :, 5] = np.arctanh(beta * np.sqrt(xivar / (beta**2 * xivar + sigsqr)))\n return psi\n\n def _get_Rhat(self):\n psi = self._get_psi()\n ndraw = psi.shape[0]\n psibarj = np.sum(psi, axis=0)/ndraw\n psibar = np.mean(psibarj, axis=0)\n sjsqr = np.sum((psi-psibarj)**2 / (ndraw-1.0), axis=(0, 1))\n Bvar = ndraw / (self.nchains-1.0) * np.sum((psibarj-psibar)**2, axis=0)\n Wvar = sjsqr / self.nchains\n varplus = (1.0 - 1.0 / ndraw) * Wvar + Bvar / ndraw\n Rhat = np.sqrt(varplus / Wvar)\n return Rhat\n\n def _initialize_chains(self, miniter):\n if self.parallelize:\n for p in self.pipes:\n p.send({'task':'init_chain',\n 'miniter':miniter})\n else:\n for c in self._chains:\n c.initialize_chain(miniter)\n\n def _step(self, niter):\n if self.parallelize:\n for p in self.pipes:\n p.send({'task':'step',\n 'niter':niter})\n else:\n for c in self._chains:\n c.step(niter)\n\n def _extend(self, niter):\n if self.parallelize:\n for p in self.pipes:\n p.send({'task':'extend',\n 'niter':niter})\n else:\n for c in self._chains:\n c.extend(niter)\n\n def _build_chain(self, ikeep):\n if self.parallelize:\n for p in self.pipes:\n p.send({'task':'fetch',\n 'key':'chain'})\n self.chain = np.hstack([p.recv()[ikeep:] for p in self.pipes])\n else:\n self.chain = np.hstack([c.chain[ikeep:] for c in self._chains])\n\n def run_mcmc(self, miniter=5000, maxiter=100000, silent=False):\n \"\"\" Run the Markov Chain Monte Carlo for the LinMix object.\n\n Bayesian inference is employed, and a Markov chain containing random draws from the\n posterior is developed. Convergence of the MCMC to the posterior is monitored using the\n potential scale reduction factor (RHAT, Gelman et al. 2004). In general, when RHAT < 1.1\n then approximate convergence is reached. After convergence is reached, the second halves\n of all chains are concatenated and stored in the `.chain` attribute as a numpy recarray.\n\n Args:\n miniter(int): The minimum number of iterations to use.\n maxiter(int): The maximum number of iterations to use.\n silent(bool): If true, then suppress updates during sampling.\n \"\"\"\n checkiter = 100\n self._initialize_chains(miniter)\n for i in range(0, miniter, checkiter):\n self._step(checkiter)\n Rhat = self._get_Rhat()\n\n if not silent:\n print()\n print(\"Iteration: \", i+checkiter)\n print (\"Rhat values for alpha, beta, log(sigma^2)\"\n \", mean(xi), log(var(xi)), atanh(corr(xi, eta)):\")\n print(Rhat)\n\n i += checkiter\n while not np.all(Rhat < 1.1) and (i < maxiter):\n self._extend(checkiter)\n self._step(checkiter)\n\n Rhat = self._get_Rhat()\n if not silent:\n print()\n print(\"Iteration: \", i+checkiter)\n print (\"Rhat values for alpha, beta, log(sigma^2)\"\n \", mean(xi), log(var(xi)), atanh(corr(xi, eta)):\")\n print(Rhat)\n i += checkiter\n\n # Throw away first half of each chain\n self._build_chain(int(i/2))\n # Clean up threads\n if self.parallelize:\n for p in self.pipes:\n p.send({'task':'kill'})\n"
] | [
[
"numpy.ones",
"numpy.sum",
"numpy.var",
"numpy.random.RandomState",
"numpy.log",
"numpy.cov",
"numpy.vstack",
"numpy.seterr",
"numpy.logical_not",
"numpy.mean",
"numpy.zeros",
"numpy.median",
"numpy.hstack",
"numpy.all",
"numpy.array",
"numpy.zeros_like",
"numpy.empty",
"numpy.exp",
"numpy.sqrt",
"numpy.dot",
"numpy.random.randint"
]
] |
kyle-bong/Realtime_Voice_Activity_Detection | [
"9ea2d6187ec157051163ac83bed3f7135a3bb802"
] | [
"realtime_detection.py"
] | [
"import numpy as np\n# from numpy_ringbuffer import RingBuffer\nimport librosa\nimport librosa.display\nimport matplotlib.pyplot as plt\nimport noisereduce as nr\nfrom tensorflow.keras.models import model_from_json\nfrom sklearn.preprocessing import LabelEncoder\nimport pyaudio\nimport wave # for save audio file\nimport datetime\nimport os\nfrom collections import deque\nimport math\n\n#os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\n\n# Load segment audio classification model\nmodel_path = r\"audio_model/\"\nmodel_name = \"audio_CNN_2021_09_07_12_19_57_acc_97.53\"\n\n# Model reconstruction from JSON file\nwith open(model_path + model_name + '.json', 'r') as f:\n model = model_from_json(f.read())\n\n# Load weights into the new model\nmodel.load_weights(model_path + model_name + '.h5')\n\n# Replicate label encoder\nlb = LabelEncoder()\nlb.fit_transform(['Speaking', 'OtherSound'])\n\n# Some Utils\n\n# Plot audio\ndef plotAudio2(output):\n fig, ax = plt.subplots(nrows=1,ncols=1, figsize=(20,4)) #figsize=20,4\n plt.plot(output, color='blue')\n ax.set_xlim((0, len(output)))\n plt.show()\n\n# 스펙트로그램: 가로축은 시간, 세로축은 주파수, 진한 정도는 진폭을 나타냅니다.\ndef draw_spectrogram(X):\n X = librosa.effects.preemphasis(X)\n clip, index = librosa.effects.trim(X, top_db=20, hop_length=256)\n stfts = librosa.stft(clip, n_fft=512, hop_length=256, win_length=512) \n stftsdb = librosa.amplitude_to_db(abs(stfts))\n plt.figure(figsize=(20,4))\n librosa.display.specshow(stftsdb, sr=22050, \n hop_length=256,\n x_axis='s', y_axis='hz')\n plt.colorbar()\n \n# 입력 오디오를 normalization하는 함수입니다.\ndef minMaxNormalize(arr):\n mn = np.min(arr)\n mx = np.max(arr)\n return (arr-mn)/(mx-mn)\n\n# 입력 오디오 buffer가 말소리인지 아닌지를 판별하는 함수입니다.\ndef predictSound(X):\n\n #Triming: 입력 오디오에서 무음 구간을 제거합니다.\n clip, index = librosa.effects.trim(X, top_db=20, frame_length=512, hop_length=256) \n\n # Trimming\n X, index = librosa.effects.trim(X, top_db=20, frame_length=512,hop_length=256)\n \n #get mel-spectrogram: 입력 오디오로부터 mel-spectrogram feature 추출\n X = librosa.feature.melspectrogram(y=X, sr=16000, n_fft=512, hop_length=256, win_length=512)\n X = librosa.power_to_db(X, ref=np.max)\n X = X.T\n X = np.mean(X, axis=0)\n X = minMaxNormalize(X)\n X = np.reshape(X, (1, 16, 8))\n\n # get prob\n result = model.predict(np.array([X]))\n predictions = [np.argmax(y) for y in result]\n prob = np.max(result)\n result = lb.inverse_transform([predictions[0]])[0]\n #print('predict: ', result, round(prob, 2))\n return result, prob\n\n\n# 현재 입력값의 dB를 출력하는 함수입니다.\ndef showdB(y): # y, sr =librosa.load(...) \n clip, index = librosa.effects.trim(y, top_db=20, frame_length=512, hop_length=256)\n stfts = librosa.stft(clip, n_fft=512, hop_length=256, win_length=512)\n dB = librosa.amplitude_to_db(abs(stfts), ref=1/1000)\n dB = np.mean(dB)\n return dB \n\n\ndef pcm2float(sig, dtype='float32'):\n sig = np.asarray(sig)\n if sig.dtype.kind not in 'iu':\n raise TypeError(\"'sig' must be an array of integers\")\n dtype = np.dtype(dtype)\n if dtype.kind != 'f':\n raise TypeError(\"'dtype' must be a floating point type\")\n\n i = np.iinfo(sig.dtype)\n abs_max = 2 ** (i.bits - 1)\n offset = i.min + abs_max\n return (sig.astype(dtype) - offset) / abs_max\n\n# 발화 끝으로 갈수록 에너지가 약해지므로 정확도가 낮아집니다.\n# 이를 보완하기 위해 후처리 과정에서 이동평균을 이용합니다.\nclass MovingAverage:\n def __init__(self, size: int):\n self.data = deque(maxlen = size)\n\n def next(self, val: int) -> float:\n self.data.append(val)\n return sum(self.data)/len(self.data)\n\n\nclass RealtimeRecording():\n def __init__(self):\n self.CHUNKSIZE = 8192 # 8192: 256ms. 입력 오디오 신호를 256ms 단위로 받습니다.\n self.RATE = 16000 # sample rate\n self.FORMAT = pyaudio.paInt16 \n# self.FORMAT = pyaudio.paFloat32 # original = paFloat32 \n self.CHANNELS = 1 # mono\n\n self.audio_buffer = bytes()\n self.ma = MovingAverage(3)\n self.STATE = False\n\n # for saving speaking buffers\n self.speaking_buffer = np.array([])\n self.SAVE = False\n self.previous_result = ''\n self.category = 0.0\n\n \n def start(self):\n # initialize portaudio\n print(\"Stream Start\")\n now = datetime.datetime.now()\n p = pyaudio.PyAudio()\n\n stream = p.open(format=self.FORMAT,\n channels=self.CHANNELS,\n rate=self.RATE, input=True,\n frames_per_buffer=self.CHUNKSIZE)\n\n if not self.STATE:\n self.audio_buffer += audio_data.data\n\n if len(self.audio_buffer) == 20480: #10240 = 640ms. np.frombuffer를 거치면 320ms\n data = self.audio_buffer\n self.noise_sample = np.frombuffer(data, dtype=np.int16)\n\n self.noise_sample = np.nan_to_num(self.noise_sample)\n self.noise_sample_float = pcm2float(self.noise_sample)\n #plotAudio2(self.noise_sample_float)\n self.audio_buffer = bytes()\n self.STATE = True\n print('Noise reduction setting complete')\n\n\n if self.STATE:\n self.audio_buffer += audio_data.data\n \n if len(self.audio_buffer) == 8192: #8192: 256ms\n\n data = self.audio_buffer\n self.sample = np.frombuffer(data, dtype=np.int16)\n self.sample_float = pcm2float(self.sample)\n\n # nan 값 발견 시 제거\n if not np.isfinite(self.sample_float).all():\n self.sample = np.nan_to_num(self.sample)\n\n # 노이즈 샘플로 노이즈 제거\n noisy_part = self.noise_sample_float\n self.current_window = nr.reduce_noise(y=self.sample_float, \n y_noise=noisy_part, prop_decrease=1.0, sr=16000)\n\n \n # dB Threshold. 특정 dB 이상의 오디오에 대해서만 판별을 수행합니다.\n current_dB = showdB(self.current_window)\n dB_threshold = 16 \n \n # predict\n self.pred, self.prob = predictSound(np.array(self.current_window))\n\n # dB filtering and hangover\n # 이전 buffer의 상태에 따라서 speaking 판단 여부를 조금씩 조정합니다.\n if current_dB > dB_threshold:\n # false positive를 줄이기 위해 설정한 값입니다. 사용 환경에 따라서 조정할 수 있습니다.\n if self.pred == 'Speaking' and self.prob > 0.75: \n #print('pred: ', self.pred, round(self.prob,2))\n self.result = 'Speaking'\n self.category = self.ma.next(1)\n #print('result: ', self.result, self.category, 'loud speaking')\n\n else:\n #print('pred: ', self.pred, round(self.prob,2))\n if self.previous_result == 'Speaking' and self.category >0.7:\n #print('previous: ', self.previous_result)\n self.result = 'Speaking'\n self.category = self.ma.next(0)\n #print('result: ', self.result, self.category, 'possible speaking')\n else:\n #print('previous: ', self.previous_result)\n self.result = 'OtherSound'\n self.category = self.ma.next(0)\n #print('result: ', self.result, self.category, 'loud othersound')\n\n else:\n #print('pred: ', self.pred, round(self.prob,2))\n if self.previous_result == 'Speaking' and self.category >0.5:\n #print('previous: ', self.previous_result)\n self.result = 'Speaking'\n self.category = self.ma.next(0)\n #print('result: ', self.result, self.category, 'quite speaking')\n else:\n #print('previous: ', self.previous_result)\n self.result = 'OtherSound'\n self.category = self.ma.next(0)\n #print('result: ', self.result, self.category, 'quite othersound')\n\n now = datetime.datetime.now()\n print('final result: ', self.result, round(self.category,2))\n #print('dB: ', round(current_dB, 2))\n #print('*'*20)\n\n # maximum length of speaking buffer\n max_buffer_len = 16000 * 10 # 10S\n if self.category != 0 and len(self.speaking_buffer) < max_buffer_len:\n self.speaking_buffer = np.concatenate((self.speaking_buffer, self.current_window))\n self.SAVE=True\n else:\n self.SAVE=False\n\n \n \n self.audio_buffer = bytes()\n\n # Saving speaking buffer (optional)\n try:\n now = datetime.datetime.now()\n if self.SAVE == False and len(self.speaking_buffer) !=0:\n speaking_length = len(self.speaking_buffer) / 16000.\n print('speaking_length: ', speaking_length, 's')\n self.speaking_buffer = np.array([])\n except AttributeError:\n pass\n\n # audio_buffer = []\n # frames = []\n \n\n # for i in range(0, int(self.RATE / self.CHUNKSIZE * self.RECORD_SECONDS)):\n # data = stream.read(self.CHUNKSIZE)\n # current_window = np.frombuffer(data, dtype=np.int16) # dtype=np.float32\n\n # audio_buffer = np.concatenate((audio_buffer, current_window))\n \n # noisy_part = audio_buffer[0:20480] # 주변 소음을 수집한 뒤 noise reduction을 수행합니다.\n # audio_buffer = nr.reduce_noise(y = audio_buffer, y_noise=noisy_part, sr=16000)\n\n \n \n # close stream\n now = datetime.datetime.now()\n print(now)\n stream.stop_stream()\n stream.close()\n p.terminate()\n print('End.')\n \n return audio_buffer\n\nif __name__ == '__main__':\n rr = RealtimeRecording()\n audio_data = rr.start()\n predicted = predictSound(np.array(audio_data))\n print(predicted)\n"
] | [
[
"numpy.dtype",
"numpy.asarray",
"sklearn.preprocessing.LabelEncoder",
"matplotlib.pyplot.plot",
"numpy.nan_to_num",
"numpy.isfinite",
"matplotlib.pyplot.figure",
"numpy.reshape",
"numpy.mean",
"matplotlib.pyplot.subplots",
"numpy.argmax",
"numpy.max",
"numpy.min",
"matplotlib.pyplot.colorbar",
"numpy.iinfo",
"matplotlib.pyplot.show",
"numpy.array",
"numpy.concatenate",
"numpy.frombuffer"
]
] |
mufeili/P-GNN-dgl | [
"536569c22a3899a8b1a28cd0653923fd809cf9bd"
] | [
"main.py"
] | [
"import os\nimport dgl\nimport torch\nimport numpy as np\nimport torch.nn as nn\nfrom model import PGNN\nfrom sklearn.metrics import roc_auc_score\nfrom utils import get_dataset, preselect_anchor\n\nimport warnings\nwarnings.filterwarnings('ignore')\n\ndef get_loss(p, data, out, loss_func, device, get_auc=True):\n edge_mask = np.concatenate((data['positive_edges_{}'.format(p)], data['negative_edges_{}'.format(p)]), axis=-1)\n\n nodes_first = torch.index_select(out, 0, torch.from_numpy(edge_mask[0, :]).long().to(out.device))\n nodes_second = torch.index_select(out, 0, torch.from_numpy(edge_mask[1, :]).long().to(out.device))\n\n pred = torch.sum(nodes_first * nodes_second, dim=-1)\n\n label_positive = torch.ones([data['positive_edges_{}'.format(p)].shape[1], ], dtype=pred.dtype)\n label_negative = torch.zeros([data['negative_edges_{}'.format(p)].shape[1], ], dtype=pred.dtype)\n label = torch.cat((label_positive, label_negative)).to(device)\n loss = loss_func(pred, label)\n\n if get_auc:\n auc = roc_auc_score(label.flatten().cpu().numpy(), torch.sigmoid(pred).flatten().data.cpu().numpy())\n return loss, auc\n else:\n return loss\n\ndef train_model(data, model, loss_func, optimizer, device, g_data):\n model.train()\n out = model(g_data)\n\n loss = get_loss('train', data, out, loss_func, device, get_auc=False)\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n optimizer.zero_grad()\n\n return g_data\n\ndef eval_model(data, g_data, model, loss_func, device):\n model.eval()\n out = model(g_data)\n\n # train loss and auc\n tmp_loss, auc_train = get_loss('train', data, out, loss_func, device)\n loss_train = tmp_loss.cpu().data.numpy()\n\n # val loss and auc\n _, auc_val = get_loss('val', data, out, loss_func, device)\n\n # test loss and auc\n _, auc_test = get_loss('test', data, out, loss_func, device)\n\n return loss_train, auc_train, auc_val, auc_test\n\ndef main(args):\n # The mean and standard deviation of the experiment results\n # are stored in the 'results' folder\n if not os.path.isdir('results'):\n os.mkdir('results')\n\n if torch.cuda.is_available():\n device = 'cuda:0'\n else:\n device = 'cpu'\n\n print('Learning Type: {}'.format(['Transductive', 'Inductive'][args.inductive]),\n 'Task: {}'.format(args.task))\n\n results = []\n\n for repeat in range(args.repeat_num):\n data = get_dataset(args)\n\n # pre-sample anchor nodes and compute shortest distance values for all epochs\n g_list, anchor_eid_list, dist_max_list, edge_weight_list = preselect_anchor(data, args)\n\n # model\n model = PGNN(input_dim=data['feature'].shape[1]).to(device)\n\n # loss\n optimizer = torch.optim.Adam(model.parameters(), lr=1e-2, weight_decay=5e-4)\n loss_func = nn.BCEWithLogitsLoss()\n\n best_auc_val = -1\n best_auc_test = -1\n\n for epoch in range(args.epoch_num):\n if epoch == 200:\n for param_group in optimizer.param_groups:\n param_group['lr'] /= 10\n\n g = dgl.graph(g_list[epoch])\n g.ndata['feat'] = torch.FloatTensor(data['feature'])\n g.edata['sp_dist'] = torch.FloatTensor(edge_weight_list[epoch])\n g_data = {\n 'graph': g.to(device),\n 'anchor_eid': anchor_eid_list[epoch],\n 'dists_max': dist_max_list[epoch]\n }\n\n train_model(data, model, loss_func, optimizer, device, g_data)\n\n loss_train, auc_train, auc_val, auc_test = eval_model(\n data, g_data, model, loss_func, device)\n if auc_val > best_auc_val:\n best_auc_val = auc_val\n best_auc_test = auc_test\n\n if epoch % args.epoch_log == 0:\n print(repeat, epoch, 'Loss {:.4f}'.format(loss_train), 'Train AUC: {:.4f}'.format(auc_train),\n 'Val AUC: {:.4f}'.format(auc_val), 'Test AUC: {:.4f}'.format(auc_test),\n 'Best Val AUC: {:.4f}'.format(best_auc_val), 'Best Test AUC: {:.4f}'.format(best_auc_test))\n\n results.append(best_auc_test)\n\n results = np.array(results)\n results_mean = np.mean(results).round(6)\n results_std = np.std(results).round(6)\n print('-----------------Final-------------------')\n print(results_mean, results_std)\n\n with open('results/{}_{}_{}.txt'.format(['Transductive', 'Inductive'][args.inductive], args.task,\n args.k_hop_dist), 'w') as f:\n f.write('{}, {}\\n'.format(results_mean, results_std))\n\nif __name__ == '__main__':\n from argparse import ArgumentParser\n\n parser = ArgumentParser()\n parser.add_argument('--task', type=str, default='link', choices=['link', 'link_pair'])\n parser.add_argument('--inductive', action='store_true',\n help='Inductive learning or transductive learning')\n parser.add_argument('--k_hop_dist', default=-1, type=int,\n help='K-hop shortest path distance, -1 means exact shortest path.')\n\n parser.add_argument('--epoch_num', type=int, default=2001)\n parser.add_argument('--repeat_num', type=int, default=10)\n parser.add_argument('--epoch_log', type=int, default=10)\n\n args = parser.parse_args()\n main(args)\n"
] | [
[
"torch.sum",
"torch.FloatTensor",
"torch.cuda.is_available",
"torch.from_numpy",
"torch.nn.BCEWithLogitsLoss",
"numpy.array",
"numpy.std",
"torch.sigmoid",
"torch.cat",
"numpy.mean"
]
] |
jishnujayakumar/MLRC2020-EmbedKGQA | [
"ee99b8c83e6278b2dd6f16e0ae910c80b28da251"
] | [
"train_embeddings/main.py"
] | [
"from load_data import Data\nimport numpy as np\nimport torch\nimport time\nfrom collections import defaultdict\nfrom model import *\nfrom torch.optim.lr_scheduler import ExponentialLR\nimport argparse\nfrom tqdm import tqdm\nimport os\nfrom prettytable import PrettyTable\n\n \nclass Experiment:\n\n def __init__(self, learning_rate=0.0005, ent_vec_dim=200, rel_vec_dim=200, \n num_iterations=500, batch_size=128, decay_rate=0., cuda=False, \n input_dropout=0.3, hidden_dropout1=0.4, hidden_dropout2=0.5,\n label_smoothing=0., outfile='tucker.model', valid_steps=1, loss_type='BCE', do_batch_norm=1,\n dataset='', model='Rotat3', l3_reg = 0.0, load_from = ''):\n self.dataset = dataset\n self.learning_rate = learning_rate\n self.ent_vec_dim = ent_vec_dim\n self.rel_vec_dim = rel_vec_dim\n self.num_iterations = num_iterations\n self.batch_size = batch_size\n self.decay_rate = decay_rate\n self.label_smoothing = label_smoothing\n self.cuda = cuda\n self.outfile = outfile\n self.valid_steps = valid_steps\n self.model = model\n self.l3_reg = l3_reg\n self.loss_type = loss_type\n self.load_from = load_from\n if do_batch_norm == 1:\n do_batch_norm = True\n else:\n do_batch_norm = False\n self.kwargs = {\"input_dropout\": input_dropout, \"hidden_dropout1\": hidden_dropout1,\n \"hidden_dropout2\": hidden_dropout2, \"model\": model, \"loss_type\": loss_type,\n \"do_batch_norm\": do_batch_norm, \"l3_reg\": l3_reg}\n \n def get_data_idxs(self, data):\n\n '''\n Returns triples in their idx form, \n e.g.: (head_entity,relation,tail_entity) gets converted to (1,1,2)\n '''\n\n data_idxs = [(self.entity_idxs[data[i][0]], self.relation_idxs[data[i][1]], \\\n self.entity_idxs[data[i][2]]) for i in range(len(data))]\n return data_idxs\n\n def get_er_vocab(self, data):\n \n \"\"\"\n data =[[1,2,3],[1,2,3], [1,4,3]]\n der_vocab : efaultdict(<class 'list'>, {(1, 2): [3, 3, 3], (1, 4): [3]})\n \n returns er_vocab: (h,r):[t]\n \"\"\"\n \n er_vocab = defaultdict(list)\n for triple in data:\n er_vocab[(triple[0], triple[1])].append(triple[2])\n return er_vocab\n\n\n def get_batch(self, er_vocab, er_vocab_pairs, idx):\n \n '''\n Returns \n 1. batch: er_vocab_pairs(size:batch_size)\n 2. targets: batch_size*num_entities tensor with target label for each er_vocab pair \n '''\n \n batch = er_vocab_pairs[idx:idx+self.batch_size]\n targets = torch.zeros([len(batch), len(d.entities)], dtype=torch.float32)\n if self.cuda:\n targets = targets.cuda()\n for idx, pair in enumerate(batch):\n targets[idx, er_vocab[pair]] = 1.\n return np.array(batch), targets\n\n def evaluate(self, model, data):\n model.eval()\n hits = []\n ranks = []\n for i in range(10):\n hits.append([])\n\n test_data_idxs = self.get_data_idxs(data)\n er_vocab = self.get_er_vocab(self.get_data_idxs(d.data))\n\n print(\"Number of data points: %d\" % len(test_data_idxs))\n for i in tqdm(range(0, len(test_data_idxs), self.batch_size)):\n data_batch, _ = self.get_batch(er_vocab, test_data_idxs, i)\n e1_idx = torch.tensor(data_batch[:,0])\n r_idx = torch.tensor(data_batch[:,1])\n e2_idx = torch.tensor(data_batch[:,2])\n if self.cuda:\n e1_idx = e1_idx.cuda()\n r_idx = r_idx.cuda()\n e2_idx = e2_idx.cuda()\n predictions = model.forward(e1_idx, r_idx)\n\n # following lines commented means RAW evaluation (not filtered)\n for j in range(data_batch.shape[0]):\n filt = er_vocab[(data_batch[j][0], data_batch[j][1])]\n target_value = predictions[j,e2_idx[j]].item()\n predictions[j, filt] = 0.0\n predictions[j, e2_idx[j]] = target_value\n\n sort_values, sort_idxs = torch.sort(predictions, dim=1, descending=True)\n sort_idxs = sort_idxs.cpu().numpy()\n for j in range(data_batch.shape[0]):\n rank = np.where(sort_idxs[j]==e2_idx[j].item())[0][0]\n ranks.append(rank+1)\n\n for hits_level in range(10):\n if rank <= hits_level:\n hits[hits_level].append(1.0)\n else:\n hits[hits_level].append(0.0)\n\n hitat10 = np.mean(hits[9])\n hitat3 = np.mean(hits[2])\n hitat1 = np.mean(hits[0])\n meanrank = np.mean(ranks)\n mrr = np.mean(1./np.array(ranks))\n\n pretty_tbl = PrettyTable()\n pretty_tbl.field_names = [\"Metric\", \"Result\"]\n pretty_tbl.add_row(['Hits@10', hitat10])\n pretty_tbl.add_row(['Hits@3', hitat3])\n pretty_tbl.add_row(['Hits@1', hitat1])\n pretty_tbl.add_row(['MeanRank', meanrank])\n pretty_tbl.add_row(['MeanReciprocalRank', mrr])\n print(pretty_tbl)\n\n return [mrr, meanrank, hitat10, hitat3, hitat1]\n\n def write_embedding_files(self, model):\n model.eval()\n model_folder = f\"../kg_embeddings/{self.model}/{self.dataset}\" \n data_folder = \"../data/%s/\" % self.dataset\n embedding_type = self.model\n if(not os.path.exists(model_folder)):\n os.makedirs(model_folder)\n R_numpy = model.R.weight.data.cpu().numpy()\n E_numpy = model.E.weight.data.cpu().numpy()\n bn_list = []\n for bn in [model.bn0, model.bn1, model.bn2]:\n bn_weight = bn.weight.data.cpu().numpy()\n bn_bias = bn.bias.data.cpu().numpy()\n bn_running_mean = bn.running_mean.data.cpu().numpy()\n bn_running_var = bn.running_var.data.cpu().numpy()\n bn_numpy = {}\n bn_numpy['weight'] = bn_weight\n bn_numpy['bias'] = bn_bias\n bn_numpy['running_mean'] = bn_running_mean\n bn_numpy['running_var'] = bn_running_var\n bn_list.append(bn_numpy)\n \n if embedding_type == 'TuckER':\n W_numpy = model.W.detach().cpu().numpy()\n \n np.save(model_folder +'/E.npy', E_numpy)\n np.save(model_folder +'/R.npy', R_numpy)\n for i, bn in enumerate(bn_list):\n np.save(model_folder + '/bn' + str(i) + '.npy', bn)\n\n if embedding_type == 'TuckER':\n np.save(model_folder +'/W.npy', W_numpy)\n\n f = open(data_folder + '/entities.dict', 'r')\n f2 = open(model_folder + '/entities.dict', 'w')\n ents = {}\n idx2ent = {}\n for line in f:\n line = line.rstrip().split('\\t')\n name = line[0]\n id = int(line[1])\n ents[name] = id\n idx2ent[id] = name\n f2.write(str(id) + '\\t' + name + '\\n')\n f.close()\n f2.close()\n f = open(data_folder + '/relations.dict', 'r')\n f2 = open(model_folder + '/relations.dict', 'w')\n rels = {}\n idx2rel = {}\n for line in f:\n line = line.strip().split('\\t')\n name = line[0]\n id = int(line[1])\n rels[name] = id\n idx2rel[id] = name\n f2.write(str(id) + '\\t' + name + '\\n')\n f.close()\n f2.close()\n\n\n def train_and_eval(self, d):\n torch.set_num_threads(2)\n best_valid = [0, 0, 0, 0, 0]\n best_test = [0, 0, 0, 0, 0]\n self.entity_idxs = {d.entities[i]:i for i in range(len(d.entities))}\n self.relation_idxs = {d.relations[i]:i for i in range(len(d.relations))}\n f = open('../data/' + self.dataset +'/entities.dict', 'w')\n for key, value in self.entity_idxs.items():\n f.write(key + '\\t' + str(value) +'\\n')\n f.close()\n f = open('../data/' + self.dataset + '/relations.dict', 'w')\n for key, value in self.relation_idxs.items():\n f.write(key + '\\t' + str(value) +'\\n')\n f.close()\n train_data_idxs = self.get_data_idxs(d.train_data)\n\n pretty_tbl = PrettyTable()\n pretty_tbl.field_names = [\"ARTIFACT\", \"SAMPLES\"]\n pretty_tbl.add_row(['#TrainingSamples', len(train_data_idxs)])\n pretty_tbl.add_row(['#Entities', len(self.entity_idxs)])\n pretty_tbl.add_row(['#Relations', len(self.relation_idxs)])\n print(pretty_tbl)\n \n model = KGE(d, self.ent_vec_dim, self.rel_vec_dim, **self.kwargs)\n model.init()\n if self.load_from != '':\n fname = self.load_from\n checkpoint = torch.load(fname)\n model.load_state_dict(checkpoint)\n if self.cuda:\n model.cuda()\n opt = torch.optim.Adam(model.parameters(), lr=self.learning_rate)\n if self.decay_rate:\n scheduler = ExponentialLR(opt, self.decay_rate)\n\n er_vocab = self.get_er_vocab(train_data_idxs)\n er_vocab_pairs = list(er_vocab.keys()) #list(er_vocab.keys())\n\n print(\"Starting training...\")\n\n for it in range(1, self.num_iterations+1):\n print(f\"Iteration: {it}/{self.num_iterations}\")\n start_train = time.time()\n model.train() \n losses = []\n np.random.shuffle(er_vocab_pairs)\n for j in tqdm(range(0, len(er_vocab_pairs), self.batch_size)):\n data_batch, targets = self.get_batch(er_vocab, er_vocab_pairs, j)\n opt.zero_grad()\n e1_idx = torch.tensor(data_batch[:,0])\n r_idx = torch.tensor(data_batch[:,1]) \n if self.cuda:\n e1_idx = e1_idx.cuda()\n r_idx = r_idx.cuda()\n predictions = model.forward(e1_idx, r_idx)\n if self.label_smoothing:\n targets = ((1.0-self.label_smoothing)*targets) + (1.0/targets.size(1)) \n loss = model.loss(predictions, targets)\n loss.backward()\n opt.step()\n losses.append(loss.item())\n if self.decay_rate:\n scheduler.step()\n if it%100 == 0:\n print('Epoch', it, ' Epoch time', time.time()-start_train, ' Loss:', np.mean(losses))\n model.eval()\n \n with torch.no_grad():\n if it % self.valid_steps == 0:\n start_test = time.time()\n print(\"Validation:\")\n valid = self.evaluate(model, d.valid_data)\n print(\"Test:\")\n test = self.evaluate(model, d.test_data)\n valid_mrr = valid[0]\n test_mrr = test[0]\n if valid_mrr >= best_valid[0]:\n best_valid = valid\n best_test = test\n print('Validation MRR increased.')\n print('Saving model...')\n self.write_embedding_files(model)\n print('Model saved!') \n \n pretty_tbl = PrettyTable()\n pretty_tbl.field_names = [\"ARTIFACT\", \"VALUE\"]\n pretty_tbl.add_row(['Best valid', best_valid])\n pretty_tbl.add_row(['Best test', best_test])\n pretty_tbl.add_row(['Dataset', self.dataset])\n pretty_tbl.add_row(['Model', self.model])\n print(pretty_tbl)\n\n print(f'Training-time: {round(time.time()-start_test,2)}')\n\n pretty_tbl = PrettyTable()\n pretty_tbl.field_names = [\"Parameter\", \"Value\"]\n pretty_tbl.add_row(['Learning rate', self.learning_rate])\n pretty_tbl.add_row(['Decay', self.decay_rate])\n pretty_tbl.add_row(['Dim', self.ent_vec_dim])\n pretty_tbl.add_row(['Input drop', self.kwargs[\"input_dropout\"]])\n pretty_tbl.add_row(['Hidden drop 2', self.kwargs[\"hidden_dropout2\"]])\n pretty_tbl.add_row(['Label Smoothing', self.label_smoothing])\n pretty_tbl.add_row(['Batch size', self.batch_size])\n pretty_tbl.add_row(['Loss type', self.loss_type])\n pretty_tbl.add_row(['L3 reg', self.l3_reg])\n print(pretty_tbl) \n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--dataset\", type=str, default=\"FB15k-237\", nargs=\"?\",\n help=\"Which dataset to use: FB15k, FB15k-237, WN18 or WN18RR.\")\n parser.add_argument(\"--num_iterations\", type=int, default=500, nargs=\"?\",\n help=\"Number of iterations.\")\n parser.add_argument(\"--batch_size\", type=int, default=128, nargs=\"?\",\n help=\"Batch size.\")\n parser.add_argument(\"--lr\", type=float, default=0.0005, nargs=\"?\",\n help=\"Learning rate.\")\n parser.add_argument(\"--model\", type=str, default='Rotat3', nargs=\"?\",\n help=\"Model.\")\n parser.add_argument(\"--dr\", type=float, default=1.0, nargs=\"?\",\n help=\"Decay rate.\")\n parser.add_argument(\"--edim\", type=int, default=200, nargs=\"?\",\n help=\"Entity embedding dimensionality.\")\n parser.add_argument(\"--rdim\", type=int, default=200, nargs=\"?\",\n help=\"Relation embedding dimensionality.\")\n parser.add_argument(\"--cuda\", type=bool, default=True, nargs=\"?\",\n help=\"Whether to use cuda (GPU) or not (CPU).\")\n parser.add_argument(\"--input_dropout\", type=float, default=0.3, nargs=\"?\",\n help=\"Input layer dropout.\")\n parser.add_argument(\"--hidden_dropout1\", type=float, default=0.4, nargs=\"?\",\n help=\"Dropout after the first hidden layer.\")\n parser.add_argument(\"--hidden_dropout2\", type=float, default=0.5, nargs=\"?\",\n help=\"Dropout after the second hidden layer.\")\n parser.add_argument(\"--label_smoothing\", type=float, default=0.1, nargs=\"?\",\n help=\"Amount of label smoothing.\")\n parser.add_argument(\"--outfile\", type=str, default='tucker.model', nargs=\"?\",\n help=\"File to save\")\n parser.add_argument(\"--valid_steps\", type=int, default=1, nargs=\"?\",\n help=\"Epochs before u validate\")\n parser.add_argument(\"--loss_type\", type=str, default='BCE', nargs=\"?\",\n help=\"Loss type\")\n parser.add_argument(\"--do_batch_norm\", type=int, default=1, nargs=\"?\",\n help=\"Do batch norm or not (0, 1)\")\n parser.add_argument(\"--l3_reg\", type=float, default=0.0, nargs=\"?\",\n help=\"l3 reg hyperparameter\")\n parser.add_argument(\"--load_from\", type=str, default='', nargs=\"?\",\n help=\"load from state dict\")\n\n args = parser.parse_args()\n dataset = args.dataset\n data_dir = f\"../data/{dataset}/\"\n torch.backends.cudnn.deterministic = True \n seed = 20\n np.random.seed(seed)\n torch.manual_seed(seed)\n if torch.cuda.is_available:\n torch.cuda.manual_seed_all(seed) \n\n experiment = Experiment(num_iterations=args.num_iterations, batch_size=args.batch_size, learning_rate=args.lr, \n decay_rate=args.dr, ent_vec_dim=args.edim, rel_vec_dim=args.rdim, cuda=args.cuda,\n input_dropout=args.input_dropout, hidden_dropout1=args.hidden_dropout1, \n hidden_dropout2=args.hidden_dropout2, label_smoothing=args.label_smoothing, outfile=args.outfile,\n valid_steps=args.valid_steps, loss_type=args.loss_type, do_batch_norm=args.do_batch_norm,\n dataset=args.dataset, model=args.model, l3_reg=args.l3_reg, load_from=args.load_from)\n \n d=Data(data_dir=data_dir, reverse=True)\n\n experiment.train_and_eval(d)\n \n\n"
] | [
[
"numpy.save",
"torch.cuda.manual_seed_all",
"torch.optim.lr_scheduler.ExponentialLR",
"torch.load",
"numpy.random.shuffle",
"torch.sort",
"torch.manual_seed",
"torch.no_grad",
"numpy.random.seed",
"torch.tensor",
"torch.set_num_threads",
"numpy.array",
"numpy.mean"
]
] |
kiyoon/PyVideoAI | [
"c4d3ba7a69723aeae7da48245989ae11cbdb1f8b"
] | [
"tools/run_train.py"
] | [
"#!/usr/bin/env python3\n# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n\n\"\"\"Wrapper to train and test a video classification model.\"\"\"\n\nimport argparse\nimport sys\nimport torch\n\nimport pyvideoai.utils.multiprocessing_helper as mpu\nfrom pyvideoai.train_multiprocess import train\nfrom experiment_utils.argparse_utils import add_exp_arguments\n\ndef add_distributed_args(parser):\n \"\"\"\n Parse the following arguments for the video training and testing pipeline.\n Args:\n shard_id (int): shard id for the current machine. Starts from 0 to\n num_shards - 1. If single machine is used, then set shard id to 0.\n num_shards (int): number of shards using by the job.\n init_method (str): initialization method to launch the job with multiple\n devices. Options includes TCP or shared file-system for\n initialization. details can be find in\n https://pytorch.org/docs/stable/distributed.html#tcp-initialization\n \"\"\"\n parser.add_argument(\n \"--local_world_size\",\n help=\"Number of processes per machine. (i.e. number of GPUs to use per machine)\",\n default=1,\n type=int,\n )\n parser.add_argument(\n \"--shard_id\",\n help=\"The shard id of current node, Starts from 0 to num_shards - 1\",\n default=0,\n type=int,\n )\n parser.add_argument(\n \"--num_shards\",\n help=\"Number of shards using by the job\",\n default=1,\n type=int,\n )\n parser.add_argument(\n \"--init_method\",\n help=\"Initialization method, includes TCP or shared file-system\",\n default=\"tcp://localhost:19999\",\n type=str,\n )\n parser.add_argument(\n \"--backend\",\n help=\"Torch distributed backend\",\n default=\"nccl\",\n type=str,\n )\n\n\nimport dataset_configs\nimport model_configs\nimport exp_configs\nfrom pyvideoai import config\n\ndef add_train_args(parser):\n parser.add_argument(\"-e\", \"--num_epochs\", type=int, default=100, help=\"Number of epochs for training.\")\n add_exp_arguments(parser, \n root_default=config.DEFAULT_EXPERIMENT_ROOT, dataset_default='hmdb', model_default='i3d_resnet50', name_default='crop224_8x8_largejit_plateau_1scrop5tcrop_split1',\n dataset_channel_choices=dataset_configs.available_channels, model_channel_choices=model_configs.available_channels, exp_channel_choices=exp_configs.available_channels)\n parser.add_argument(\"-s\", \"--save_mode\", type=str, default=\"last_and_peaks\", choices=[\"all\", \"higher\", \"last_and_peaks\"], help=\"Checkpoint saving condition. all: save all epochs, higher: save whenever the highest validation performance model is found, last_and_peaks: save all epochs, but remove previous epoch if that wasn't the best.\")\n parser.add_argument(\"-S\", \"--training_speed\", type=str, default=\"standard\", choices=[\"standard\", \"faster\"], help=\"Only applicable when using distributed multi-GPU training. 'faster' skips multiprocess commuication and CPU-GPU synchronisation for calculating training metrics (loss, accuracy) so they will be reported as 0. This probably won't give you any benefit on a single node, but for multi-node, it depends on the internet connection.\")\n parser.add_argument(\"-l\", \"--load_epoch\", type=int, default=None, help=\"Load from checkpoint. Set to -1 to load from the last checkpoint.\")\n parser.add_argument(\"--seed\", type=int, default=12, help=\"Random seed for np, torch, torch.cuda, DALI.\")\n parser.add_argument(\"-t\", \"--multi_crop_val_period\", type=int, default=-1, help=\"Number of epochs after full multi-crop validation is performed.\")\n parser.add_argument(\"-T\", \"--telegram_post_period\", type=int, default=10, help=\"Period (in epochs) to send the training stats on Telegram.\")\n parser.add_argument(\"-B\", \"--telegram_bot_idx\", type=int, default=0, help=\"Which Telegram bot to use defined in key.ini?\")\n parser.add_argument(\"-w\", \"--dataloader_num_workers\", type=int, default=4, help=\"num_workers for PyTorch Dataset loader.\")\n parser.add_argument(\"-r\", \"--refresh_period\", type=int, default=1, help=\"How many iterations until printing stats. Increase this if stdio is your bottleneck (such as Slurm printing to network file).\")\n parser.add_argument(\"-v\", \"--version\", type=str, default='auto', help=\"Experiment version (`auto` or integer). `auto` chooses the last version when resuming from the last, otherwise creates new version.\")\n\n\n\ndef get_parser():\n parser = argparse.ArgumentParser(description=\"Train and validate an action model\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n add_distributed_args(parser)\n add_train_args(parser)\n return parser\n\n\ndef main():\n \"\"\"\n Main function to spawn the train and test process.\n \"\"\"\n parser = get_parser()\n args = parser.parse_args()\n\n # Perform training.\n if args.local_world_size * args.num_shards > 1:\n torch.multiprocessing.spawn(\n mpu.run,\n nprocs=args.local_world_size,\n args=(\n args.local_world_size,\n train,\n args.init_method,\n args.shard_id,\n args.num_shards,\n args.backend,\n args\n ),\n daemon=False,\n )\n else:\n train(args)\n\n\n\nif __name__ == \"__main__\":\n # This will probably not make any difference,\n # as `torch.multiprocessing.spawn` defaults to \"spawn\" start method.\n #torch.multiprocessing.set_start_method(\"forkserver\")\n #torch.multiprocessing.set_start_method(\"spawn\")\n main()\n"
] | [
[
"torch.multiprocessing.spawn"
]
] |
DizzyProtos/asteroid | [
"bb3c374fefe7525c2f6da355834e470d45b45b90"
] | [
"asteroid/masknn/recurrent.py"
] | [
"import torch\nfrom torch import nn\nfrom torch.nn.functional import fold, unfold\n\nfrom . import norms, activations\nfrom .norms import GlobLN, CumLN\nfrom ..utils import has_arg\n\n\nclass SingleRNN(nn.Module):\n \"\"\" Module for a RNN block.\n\n Inspired from https://github.com/yluo42/TAC/blob/master/utility/models.py\n Licensed under CC BY-NC-SA 3.0 US.\n\n Args:\n rnn_type (str): Select from ``'RNN'``, ``'LSTM'``, ``'GRU'``. Can\n also be passed in lowercase letters.\n input_size (int): Dimension of the input feature. The input should have\n shape [batch, seq_len, input_size].\n hidden_size (int): Dimension of the hidden state.\n n_layers (int, optional): Number of layers used in RNN. Default is 1.\n dropout (float, optional): Dropout ratio. Default is 0.\n bidirectional (bool, optional): Whether the RNN layers are\n bidirectional. Default is ``False``.\n \"\"\"\n\n def __init__(\n self, rnn_type, input_size, hidden_size, n_layers=1, dropout=0, bidirectional=False\n ):\n super(SingleRNN, self).__init__()\n assert rnn_type.upper() in [\"RNN\", \"LSTM\", \"GRU\"]\n rnn_type = rnn_type.upper()\n self.rnn_type = rnn_type\n self.input_size = input_size\n self.hidden_size = hidden_size\n self.rnn = getattr(nn, rnn_type)(\n input_size,\n hidden_size,\n num_layers=n_layers,\n dropout=dropout,\n batch_first=True,\n bidirectional=bool(bidirectional),\n )\n\n def forward(self, inp):\n \"\"\" Input shape [batch, seq, feats] \"\"\"\n self.rnn.flatten_parameters() # Enables faster multi-GPU training.\n output = inp\n rnn_output, _ = self.rnn(output)\n return rnn_output\n\n\nclass StackedResidualRNN(nn.Module):\n \"\"\" Stacked RNN with builtin residual connection.\n Only supports forward RNNs.\n See StackedResidualBiRNN for bidirectional ones.\n\n Args:\n rnn_type (str): Select from ``'RNN'``, ``'LSTM'``, ``'GRU'``. Can\n also be passed in lowercase letters.\n n_units (int): Number of units in recurrent layers. This will also be\n the expected input size.\n n_layers (int): Number of recurrent layers.\n dropout (float): Dropout value, between 0. and 1. (Default: 0.)\n bidirectional (bool): If True, use bidirectional RNN, else\n unidirectional. (Default: False)\n \"\"\"\n\n def __init__(self, rnn_type, n_units, n_layers=4, dropout=0.0, bidirectional=False):\n super(StackedResidualRNN, self).__init__()\n self.rnn_type = rnn_type\n self.n_units = n_units\n self.n_layers = n_layers\n self.dropout = dropout\n assert bidirectional is False, \"Bidirectional not supported yet\"\n self.bidirectional = bidirectional\n\n self.layers = nn.ModuleList()\n for _ in range(n_layers):\n self.layers.append(\n SingleRNN(\n rnn_type, input_size=n_units, hidden_size=n_units, bidirectional=bidirectional\n )\n )\n self.dropout_layer = nn.Dropout(self.dropout)\n\n def forward(self, x):\n \"\"\" Builtin residual connections + dropout applied before residual.\n Input shape : [batch, time_axis, feat_axis]\n \"\"\"\n for rnn in self.layers:\n rnn_out = rnn(x)\n dropped_out = self.dropout_layer(rnn_out)\n x = x + dropped_out\n return x\n\n\nclass StackedResidualBiRNN(nn.Module):\n \"\"\" Stacked Bidirectional RNN with builtin residual connection.\n Residual connections are applied on both RNN directions.\n Only supports bidiriectional RNNs.\n See StackedResidualRNN for unidirectional ones.\n\n Args:\n rnn_type (str): Select from ``'RNN'``, ``'LSTM'``, ``'GRU'``. Can\n also be passed in lowercase letters.\n n_units (int): Number of units in recurrent layers. This will also be\n the expected input size.\n n_layers (int): Number of recurrent layers.\n dropout (float): Dropout value, between 0. and 1. (Default: 0.)\n bidirectional (bool): If True, use bidirectional RNN, else\n unidirectional. (Default: False)\n \"\"\"\n\n def __init__(self, rnn_type, n_units, n_layers=4, dropout=0.0, bidirectional=True):\n super().__init__()\n self.rnn_type = rnn_type\n self.n_units = n_units\n self.n_layers = n_layers\n self.dropout = dropout\n assert bidirectional is True, \"Only bidirectional not supported yet\"\n self.bidirectional = bidirectional\n\n # The first layer has as many units as input size\n self.first_layer = SingleRNN(\n rnn_type, input_size=n_units, hidden_size=n_units, bidirectional=bidirectional\n )\n # As the first layer outputs 2*n_units, the following layers need\n # 2*n_units as input size\n self.layers = nn.ModuleList()\n for i in range(n_layers - 1):\n input_size = 2 * n_units\n self.layers.append(\n SingleRNN(\n rnn_type,\n input_size=input_size,\n hidden_size=n_units,\n bidirectional=bidirectional,\n )\n )\n self.dropout_layer = nn.Dropout(self.dropout)\n\n def forward(self, x):\n \"\"\" Builtin residual connections + dropout applied before residual.\n Input shape : [batch, time_axis, feat_axis]\n \"\"\"\n # First layer\n rnn_out = self.first_layer(x)\n dropped_out = self.dropout_layer(rnn_out)\n x = torch.cat([x, x], dim=-1) + dropped_out\n # Rest of the layers\n for rnn in self.layers:\n rnn_out = rnn(x)\n dropped_out = self.dropout_layer(rnn_out)\n x = x + dropped_out\n return x\n\n\nclass DPRNNBlock(nn.Module):\n \"\"\" Dual-Path RNN Block as proposed in [1].\n\n Args:\n in_chan (int): Number of input channels.\n hid_size (int): Number of hidden neurons in the RNNs.\n norm_type (str, optional): Type of normalization to use. To choose from\n - ``'gLN'``: global Layernorm\n - ``'cLN'``: channelwise Layernorm\n bidirectional (bool, optional): True for bidirectional Inter-Chunk RNN.\n rnn_type (str, optional): Type of RNN used. Choose from ``'RNN'``,\n ``'LSTM'`` and ``'GRU'``.\n num_layers (int, optional): Number of layers used in each RNN.\n dropout (float, optional): Dropout ratio. Must be in [0, 1].\n\n References:\n [1] \"Dual-path RNN: efficient long sequence modeling for\n time-domain single-channel speech separation\", Yi Luo, Zhuo Chen\n and Takuya Yoshioka. https://arxiv.org/abs/1910.06379\n \"\"\"\n\n def __init__(\n self,\n in_chan,\n hid_size,\n norm_type=\"gLN\",\n bidirectional=True,\n rnn_type=\"LSTM\",\n num_layers=1,\n dropout=0,\n ):\n super(DPRNNBlock, self).__init__()\n # IntraRNN and linear projection layer (always bi-directional)\n self.intra_RNN = SingleRNN(\n rnn_type, in_chan, hid_size, num_layers, dropout=dropout, bidirectional=True\n )\n self.intra_linear = nn.Linear(hid_size * 2, in_chan)\n self.intra_norm = norms.get(norm_type)(in_chan)\n # InterRNN block and linear projection layer (uni or bi-directional)\n self.inter_RNN = SingleRNN(\n rnn_type, in_chan, hid_size, num_layers, dropout=dropout, bidirectional=bidirectional\n )\n num_direction = int(bidirectional) + 1\n self.inter_linear = nn.Linear(hid_size * num_direction, in_chan)\n self.inter_norm = norms.get(norm_type)(in_chan)\n\n def forward(self, x):\n \"\"\" Input shape : [batch, feats, chunk_size, num_chunks] \"\"\"\n B, N, K, L = x.size()\n output = x # for skip connection\n # Intra-chunk processing\n x = x.transpose(1, -1).reshape(B * L, K, N)\n x = self.intra_RNN(x)\n x = self.intra_linear(x)\n x = x.reshape(B, L, K, N).transpose(1, -1)\n x = self.intra_norm(x)\n output = output + x\n # Inter-chunk processing\n x = output.transpose(1, 2).transpose(2, -1).reshape(B * K, L, N)\n x = self.inter_RNN(x)\n x = self.inter_linear(x)\n x = x.reshape(B, K, L, N).transpose(1, -1).transpose(2, -1)\n x = self.inter_norm(x)\n return output + x\n\n\nclass DPRNN(nn.Module):\n \"\"\" Dual-path RNN Network for Single-Channel Source Separation\n introduced in [1].\n\n Args:\n in_chan (int): Number of input filters.\n n_src (int): Number of masks to estimate.\n out_chan (int or None): Number of bins in the estimated masks.\n Defaults to `in_chan`.\n bn_chan (int): Number of channels after the bottleneck.\n Defaults to 128.\n hid_size (int): Number of neurons in the RNNs cell state.\n Defaults to 128.\n chunk_size (int): window size of overlap and add processing.\n Defaults to 100.\n hop_size (int or None): hop size (stride) of overlap and add processing.\n Default to `chunk_size // 2` (50% overlap).\n n_repeats (int): Number of repeats. Defaults to 6.\n norm_type (str, optional): Type of normalization to use. To choose from\n\n - ``'gLN'``: global Layernorm\n - ``'cLN'``: channelwise Layernorm\n mask_act (str, optional): Which non-linear function to generate mask.\n bidirectional (bool, optional): True for bidirectional Inter-Chunk RNN\n (Intra-Chunk is always bidirectional).\n rnn_type (str, optional): Type of RNN used. Choose between ``'RNN'``,\n ``'LSTM'`` and ``'GRU'``.\n num_layers (int, optional): Number of layers in each RNN.\n dropout (float, optional): Dropout ratio, must be in [0,1].\n\n References:\n [1] \"Dual-path RNN: efficient long sequence modeling for\n time-domain single-channel speech separation\", Yi Luo, Zhuo Chen\n and Takuya Yoshioka. https://arxiv.org/abs/1910.06379\n \"\"\"\n\n def __init__(\n self,\n in_chan,\n n_src,\n out_chan=None,\n bn_chan=128,\n hid_size=128,\n chunk_size=100,\n hop_size=None,\n n_repeats=6,\n norm_type=\"gLN\",\n mask_act=\"relu\",\n bidirectional=True,\n rnn_type=\"LSTM\",\n num_layers=1,\n dropout=0,\n ):\n super(DPRNN, self).__init__()\n self.in_chan = in_chan\n out_chan = out_chan if out_chan is not None else in_chan\n self.out_chan = out_chan\n self.bn_chan = bn_chan\n self.hid_size = hid_size\n self.chunk_size = chunk_size\n hop_size = hop_size if hop_size is not None else chunk_size // 2\n self.hop_size = hop_size\n self.n_repeats = n_repeats\n self.n_src = n_src\n self.norm_type = norm_type\n self.mask_act = mask_act\n self.bidirectional = bidirectional\n self.rnn_type = rnn_type\n self.num_layers = num_layers\n self.dropout = dropout\n\n layer_norm = norms.get(norm_type)(in_chan)\n bottleneck_conv = nn.Conv1d(in_chan, bn_chan, 1)\n self.bottleneck = nn.Sequential(layer_norm, bottleneck_conv)\n\n # Succession of DPRNNBlocks.\n net = []\n for x in range(self.n_repeats):\n net += [\n DPRNNBlock(\n bn_chan,\n hid_size,\n norm_type=norm_type,\n bidirectional=bidirectional,\n rnn_type=rnn_type,\n num_layers=num_layers,\n dropout=dropout,\n )\n ]\n self.net = nn.Sequential(*net)\n # Masking in 3D space\n net_out_conv = nn.Conv2d(bn_chan, n_src * bn_chan, 1)\n self.first_out = nn.Sequential(nn.PReLU(), net_out_conv)\n # Gating and masking in 2D space (after fold)\n self.net_out = nn.Sequential(nn.Conv1d(bn_chan, bn_chan, 1), nn.Tanh())\n self.net_gate = nn.Sequential(nn.Conv1d(bn_chan, bn_chan, 1), nn.Sigmoid())\n self.mask_net = nn.Conv1d(bn_chan, out_chan, 1, bias=False)\n\n # Get activation function.\n mask_nl_class = activations.get(mask_act)\n # For softmax, feed the source dimension.\n if has_arg(mask_nl_class, \"dim\"):\n self.output_act = mask_nl_class(dim=1)\n else:\n self.output_act = mask_nl_class()\n\n def forward(self, mixture_w):\n \"\"\"\n Args:\n mixture_w (:class:`torch.Tensor`): Tensor of shape\n [batch, n_filters, n_frames]\n Returns:\n :class:`torch.Tensor`\n estimated mask of shape [batch, n_src, n_filters, n_frames]\n \"\"\"\n batch, n_filters, n_frames = mixture_w.size()\n output = self.bottleneck(mixture_w) # [batch, bn_chan, n_frames]\n output = unfold(\n output.unsqueeze(-1),\n kernel_size=(self.chunk_size, 1),\n padding=(self.chunk_size, 0),\n stride=(self.hop_size, 1),\n )\n n_chunks = output.size(-1)\n output = output.reshape(batch, self.bn_chan, self.chunk_size, n_chunks)\n # Apply stacked DPRNN Blocks sequentially\n output = self.net(output)\n # Map to sources with kind of 2D masks\n output = self.first_out(output)\n output = output.reshape(batch * self.n_src, self.bn_chan, self.chunk_size, n_chunks)\n # Overlap and add:\n # [batch, out_chan, chunk_size, n_chunks] -> [batch, out_chan, n_frames]\n to_unfold = self.bn_chan * self.chunk_size\n output = fold(\n output.reshape(batch * self.n_src, to_unfold, n_chunks),\n (n_frames, 1),\n kernel_size=(self.chunk_size, 1),\n padding=(self.chunk_size, 0),\n stride=(self.hop_size, 1),\n )\n # Apply gating\n output = output.reshape(batch * self.n_src, self.bn_chan, -1)\n output = self.net_out(output) * self.net_gate(output)\n # Compute mask\n score = self.mask_net(output)\n est_mask = self.output_act(score)\n est_mask = est_mask.view(batch, self.n_src, self.out_chan, n_frames)\n return est_mask\n\n def get_config(self):\n config = {\n \"in_chan\": self.in_chan,\n \"out_chan\": self.out_chan,\n \"bn_chan\": self.bn_chan,\n \"hid_size\": self.hid_size,\n \"chunk_size\": self.chunk_size,\n \"hop_size\": self.hop_size,\n \"n_repeats\": self.n_repeats,\n \"n_src\": self.n_src,\n \"norm_type\": self.norm_type,\n \"mask_act\": self.mask_act,\n \"bidirectional\": self.bidirectional,\n \"rnn_type\": self.rnn_type,\n \"num_layers\": self.num_layers,\n \"dropout\": self.dropout,\n }\n return config\n\n\nclass LSTMMasker(nn.Module):\n \"\"\" LSTM mask network introduced in [1], without skip connections.\n\n Args:\n in_chan (int): Number of input filters.\n n_src (int): Number of masks to estimate.\n out_chan (int or None): Number of bins in the estimated masks.\n Defaults to `in_chan`.\n rnn_type (str, optional): Type of RNN used. Choose between ``'RNN'``,\n ``'LSTM'`` and ``'GRU'``.\n n_layers (int, optional): Number of layers in each RNN.\n hid_size (int): Number of neurons in the RNNs cell state.\n mask_act (str, optional): Which non-linear function to generate mask.\n bidirectional (bool, optional): Whether to use BiLSTM\n dropout (float, optional): Dropout ratio, must be in [0,1].\n\n References:\n [1]: Yi Luo et al. \"Real-time Single-channel Dereverberation and Separation\n with Time-domain Audio Separation Network\", Interspeech 2018\n \"\"\"\n\n def __init__(\n self,\n in_chan,\n n_src,\n out_chan=None,\n rnn_type=\"lstm\",\n n_layers=4,\n hid_size=512,\n dropout=0.3,\n mask_act=\"sigmoid\",\n bidirectional=True,\n ):\n super().__init__()\n self.in_chan = in_chan\n self.n_src = n_src\n out_chan = out_chan if out_chan is not None else in_chan\n self.out_chan = out_chan\n self.rnn_type = rnn_type\n self.n_layers = n_layers\n self.hid_size = hid_size\n self.dropout = dropout\n self.mask_act = mask_act\n self.bidirectional = bidirectional\n\n # Get activation function.\n mask_nl_class = activations.get(mask_act)\n # For softmax, feed the source dimension.\n if has_arg(mask_nl_class, \"dim\"):\n self.output_act = mask_nl_class(dim=1)\n else:\n self.output_act = mask_nl_class()\n\n # Create TasNet masker\n out_size = hid_size * (int(bidirectional) + 1)\n if bidirectional:\n self.bn_layer = GlobLN(in_chan)\n else:\n self.bn_layer = CumLN(in_chan)\n self.masker = nn.Sequential(\n SingleRNN(\n \"lstm\",\n in_chan,\n hidden_size=hid_size,\n n_layers=n_layers,\n bidirectional=bidirectional,\n dropout=dropout,\n ),\n nn.Linear(out_size, self.n_src * out_chan),\n self.output_act,\n )\n\n def forward(self, x):\n batch_size = x.shape[0]\n to_sep = self.bn_layer(x)\n est_masks = self.masker(to_sep.transpose(-1, -2)).transpose(-1, -2)\n est_masks = est_masks.view(batch_size, self.n_src, self.out_chan, -1)\n return est_masks\n\n def get_config(self):\n config = {\n \"in_chan\": self.in_chan,\n \"n_src\": self.n_src,\n \"out_chan\": self.out_chan,\n \"rnn_type\": self.rnn_type,\n \"n_layers\": self.n_layers,\n \"hid_size\": self.hid_size,\n \"dropout\": self.dropout,\n \"mask_act\": self.mask_act,\n \"bidirectional\": self.bidirectional,\n }\n return config\n"
] | [
[
"torch.nn.Linear",
"torch.nn.PReLU",
"torch.nn.Tanh",
"torch.nn.Conv1d",
"torch.nn.ModuleList",
"torch.nn.Conv2d",
"torch.nn.Sequential",
"torch.nn.Sigmoid",
"torch.cat",
"torch.nn.Dropout"
]
] |
abhishekpratapa/turicreate | [
"f56f9ef9c138d8047037184afb6356c20bbc7f71"
] | [
"src/python/turicreate/toolkits/_mxnet/_mx_sframe_iter.py"
] | [
"# -*- coding: utf-8 -*-\n# pylint: disable= too-many-lines, redefined-builtin\n# Copyright © 2017 Apple Inc. All rights reserved.\n#\n# Use of this source code is governed by a BSD-3-clause license that can\n# be found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause\n\"\"\"SFrame Data Iterator.\"\"\"\nfrom __future__ import absolute_import as _\nfrom __future__ import print_function as _\nfrom __future__ import division as _\n\nimport ctypes\nimport array as _array\nimport numpy as np\nfrom collections import OrderedDict\n\nfrom mxnet.io import DataIter\nfrom mxnet.ndarray import NDArray\nfrom mxnet.ndarray import array\n\n\nfrom turicreate import SFrame, SArray, Image\nfrom turicreate import extensions as sf_extension\n\n\ndef _copy_from_sframe(sf, buf, start, end, shape, bias=0):\n assert isinstance(sf, SFrame)\n sf_extension.sframe_load_to_numpy(sf, buf.ctypes.data + buf.strides[0] * bias, buf.strides, shape, start, end)\n\n\ndef _copy_from_sarray(sa, buf, start, end, shape, bias=0):\n assert isinstance(sa, SArray)\n sf = SFrame({'__tmp__': sa})\n _copy_from_sframe(sf, buf, start, end, shape, bias)\n\n\ndef _init_data(data, allow_empty, default_name):\n \"\"\"Convert data into canonical form.\"\"\"\n assert (data is not None) or allow_empty\n if data is None:\n data = []\n\n if isinstance(data, (np.ndarray, NDArray)):\n data = [data]\n if isinstance(data, list):\n if not allow_empty:\n assert(len(data) > 0)\n if len(data) == 1:\n data = OrderedDict([(default_name, data[0])])\n else:\n data = OrderedDict([('_%d_%s' % (i, default_name), d) for i, d in enumerate(data)])\n if not isinstance(data, dict):\n raise TypeError(\"Input must be NDArray, numpy.ndarray, \" + \\\n \"a list of them or dict with them as values\")\n for k, v in data.items():\n if isinstance(v, NDArray):\n data[k] = v.asnumpy()\n for k, v in data.items():\n if not isinstance(v, np.ndarray):\n raise TypeError((\"Invalid type '%s' for %s, \" % (type(v), k)) + \\\n \"should be NDArray or numpy.ndarray\")\n return list(data.items())\n\nclass SFrameIter(DataIter):\n \"\"\"DataIter from SFrame\n Provides DataIter interface for SFrame, a highly scalable columnar DataFrame.\n The iterator can simultaneously iterate over multiple columns indicated by `data_field` and `label_field`.\n `data_field` can refer either a single image typed column or multiple numerical columns (int, float or array).\n `label_field` con only refer to a single numerical column (int, float or array).\n\n Parameters\n ----------\n sframe : SFrame object\n source SFrame\n data_field : string or list(string)\n data fields of the SFrame. The selected fields may be either a single image typed column,\n or multiple numerical columns (int, float, array).\n label_field : string, optional\n label field in SFrame\n batch_size : int, optional\n batch size\n\n Examples\n --------\n >>> import turicreate as tc\n >>> import mxnet as mx\n\n >>> data = tc.SFrame({'x': [1,2,3], 'y': [.1, .5, .5], 'z': [[1,1,1], [2,2,2,], [3,3,3]]})\n >>> dataiter = mx.io.SFrameIter(sframe=data, data_field=['x', 'z'], label_field='z')\n\n Notes\n -----\n - Image column must contain images of the same size.\n - Array column must contain arrays of the same length.\n \"\"\"\n\n def __init__(self, sframe, data_field, label_field=None, batch_size=1, data_name='data', label_name='softmax_label'):\n\n super(SFrameIter, self).__init__()\n if not isinstance(sframe, SFrame):\n raise TypeError\n if not (isinstance(data_field, str) or isinstance(data_field, list)):\n raise TypeError\n if not (label_field is None or isinstance(label_field, str)):\n raise TypeError\n\n if type(data_field) is str:\n data_field = [data_field]\n\n self._type_check(sframe, data_field, label_field)\n self.data_field = data_field\n self.label_field = label_field\n self.data_sframe = sframe[data_field]\n if label_field is not None:\n self.label_sframe = sframe[label_field]\n\n # allocate ndarray\n data_shape = list(self.infer_shape())\n data_shape.insert(0, batch_size)\n self.data_shape = tuple(data_shape)\n self.label_shape = (batch_size, )\n self.data_ndarray = np.zeros(self.data_shape, dtype=np.float32)\n self.label_ndarray = np.zeros(self.label_shape, dtype=np.float32)\n self.data_mx_ndarray = None\n self.label_mx_ndarray = None\n self.data = _init_data(self.data_ndarray, allow_empty=False, default_name=data_name)\n self.label = _init_data(self.label_ndarray, allow_empty=True, default_name=label_name)\n # size\n self.batch_size = batch_size\n self.data_size = len(sframe)\n self.reset()\n\n @property\n def provide_data(self):\n \"\"\"The name and shape of data provided by this iterator\"\"\"\n return [(k, tuple([self.batch_size] + list(v.shape[1:]))) for k, v in self.data]\n\n @property\n def provide_label(self):\n \"\"\"The name and shape of label provided by this iterator\"\"\"\n return [(k, tuple([self.batch_size] + list(v.shape[1:]))) for k, v in self.label]\n\n def reset(self):\n self.pad = 0\n self.cursor = 0\n self.has_next = True\n\n def _type_check(self, sframe, data_field, label_field):\n if label_field is not None:\n label_column_type = sframe[label_field].dtype\n if label_column_type not in [int, float]:\n raise TypeError('Unexpected type for label_field \\\"%s\\\". Expect int or float, got %s' %\n (label_field, str(label_column_type)))\n for col in data_field:\n col_type = sframe[col].dtype\n if col_type not in [int, float, _array.array, Image]:\n raise TypeError('Unexpected type for data_field \\\"%s\\\". Expect int, float, array or image, got %s' %\n (col, str(col_type)))\n\n def _infer_column_shape(self, sarray):\n dtype = sarray.dtype\n if (dtype in [int, float]):\n return (1, )\n elif dtype is _array.array:\n lengths = sarray.item_length()\n if lengths.min() != lengths.max():\n raise ValueError('Array column does not have the same length')\n else:\n return (lengths.max(), )\n elif dtype is Image:\n first_image = sarray.head(1)[0]\n if first_image is None:\n raise ValueError('Column cannot contain missing value')\n return (first_image.channels, first_image.height, first_image.width)\n\n def infer_shape(self):\n ret = None\n features = self.data_sframe.column_names()\n assert len(features) > 0\n if len(features) > 1:\n # If more than one feature, all features must be numeric or array\n shape = 0\n for col in features:\n colshape = self._infer_column_shape(self.data_sframe[col])\n if len(colshape) != 1:\n raise ValueError('Only one column is allowed if input is image typed')\n shape += colshape[0]\n ret = (shape,)\n else:\n ret = self._infer_column_shape(self.data_sframe[features[0]])\n return ret\n\n def _copy(self, start, end, bias=0):\n _copy_from_sframe(self.data_sframe, self.data_ndarray, start, end, self.data_shape, bias)\n self.data_mx_ndarray = None\n if self.label_field is not None:\n _copy_from_sarray(self.label_sframe, self.label_ndarray, start, end, (self.batch_size, 1), bias)\n self.label_mx_ndarray = None\n\n def iter_next(self):\n if self.has_next:\n start = self.cursor\n end = start + self.batch_size\n if end >= self.data_size:\n self.has_next = False\n self.pad = end - self.data_size\n end = self.data_size\n self._copy(start, end)\n if self.pad > 0:\n bias = self.batch_size - self.pad\n start = 0\n end = self.pad\n self._copy(start, end, bias)\n self.cursor = self.pad\n else:\n self.cursor += self.batch_size\n return True\n else:\n return False\n\n def getdata(self):\n if self.data_mx_ndarray is None:\n self.data_mx_ndarray = array(self.data_ndarray)\n return [self.data_mx_ndarray]\n\n def getlabel(self):\n if self.label_field is None:\n return None\n if self.label_mx_ndarray is None:\n self.label_mx_ndarray = array(self.label_ndarray)\n return [self.label_mx_ndarray]\n\n def getpad(self):\n return self.pad\n\n\nclass SFrameImageIter(SFrameIter):\n \"\"\"Image Data Iterator from SFrame\n Provide the SFrameIter like interface with options to normalize and augment image data.\n\n Parameters\n ----------\n sframe : SFrame object\n source SFrame\n data_field : string\n image data field of the SFrame.\n label_field : string, optional\n label field in SFrame\n batch_size : int, optional\n batch size\n image_shape : tuple, optional\n if specified, each image will be resized to this (channel, height, width)\n mean_r : float, optional\n normalize the image by subtracting the mean value of r channel, or the first channel for\n mean_g : float, optional\n normalize the image by subtracting the mean value of g channel\n mean_b : float, optional\n normalize the image by subtracting the mean value of b channel\n mean_nd : np.ndarray, optional\n normalize the image by subtracting the ndarray of mean pixel values.\n The mean_nd array stores the pixel values in the order of [height, width, channel]\n This option will suppress mean_r, mean_g, and mean_b.\n scale : float, optional\n multiply each pixel value by the scale (this operation is performed after mean subtraction)\n random_flip : bool, optional\n Randomly flip horizontally on the fly, useful to augment data for training neural network.\n **kwargs :\n placeholder for new parameters\n\n Examples\n --------\n >>> import turicreate as tc\n >>> import mxnet as mx\n\n >>> image_data = tc.image_analysis.load_images('/path/to/directory/with/images')\n >>> image_data_iter = mx.io.SFrameImageIter(sframe=data, data_field=['image'], label_field='label', batch_size=100,\n mean_r=117, scale=0.5)\n\n Notes\n -----\n - Image column must contain images of the same size if image_shape is not provided.\n \"\"\"\n\n def __init__(self, sframe, data_field, label_field=None, batch_size=1,\n image_shape=None,\n data_name='data', label_name='softmax_label',\n mean_r=0.0,\n mean_g=0.0,\n mean_b=0.0,\n mean_nd=None,\n scale=1.0,\n random_flip=False,\n **kwargs):\n if image_shape is not None and len(image_shape) != 3:\n raise ValueError('image_shape must be a (channels, height, width) tuple')\n self.image_shape = image_shape\n\n super(SFrameImageIter, self).__init__(sframe, data_field, label_field, batch_size,\n data_name, label_name)\n\n # Mean subtraction parameters\n self._rgb_mask = np.zeros(self.data_shape)\n if mean_nd is None:\n nchannels = self.data_shape[1]\n mean_per_channel = [mean_r, mean_g, mean_b][:nchannels]\n for i in range(nchannels):\n self._rgb_mask[:, i, :, :] = mean_per_channel[i]\n elif type(mean_nd) == np.ndarray:\n mean_nd = np.swapaxes(mean_nd, 0, 2) # h, w, c -> c, w, h\n mean_nd = np.swapaxes(mean_nd, 1, 2) # c, w, h -> c, h, w\n if mean_nd.shape == self.data_shape[1:]:\n for i in range(self.data_shape[0]):\n self._rgb_mask[i,:] = mean_nd\n else:\n raise ValueError('Shape mismatch. mean_nd has different shape from input image')\n else:\n raise TypeError('mean_nd must be type np.ndarray')\n self._rgb_mask = array(self._rgb_mask)\n\n # Rescale parameters\n self._scale = scale\n\n #Augmentation parameters\n self._random_flip = random_flip\n def _type_check(self, sframe, data_field, label_field):\n if label_field is not None:\n label_column_type = sframe[label_field].dtype\n if label_column_type not in [int, float]:\n raise TypeError('Unexpected type for label_field \\\"%s\\\". Expect int or float, got %s' %\n (label_field, str(label_column_type)))\n for col in data_field:\n col_type = sframe[col].dtype\n if col_type not in [Image]:\n raise TypeError('Unexpected type for data_field \\\"%s\\\". Expect or image, got %s' %\n (col, str(col_type)))\n\n def _infer_column_shape(self, sarray):\n dtype = sarray.dtype\n if not dtype is Image:\n raise TypeError('Data column must be image type')\n\n if self.image_shape is not None:\n return self.image_shape\n\n first_image = sarray.head(1)[0]\n if first_image is None:\n raise ValueError('Column cannot contain missing value')\n return (first_image.channels, first_image.height, first_image.width)\n\n def iter_next(self):\n ret = super(self.__class__, self).iter_next()\n # Postprocess: normalize by mean, scale, ...\n self.data_ndarray = (self.data_ndarray - self._rgb_mask.asnumpy()) * self._scale\n # random flip\n if self._random_flip:\n self.data_ndarray = array(self.data_ndarray[:,:,:,::(np.random.randint(2)- 0.5) * 2])\n return ret\n"
] | [
[
"numpy.swapaxes",
"numpy.random.randint",
"numpy.zeros"
]
] |
junhaoim/deep-learning-from-scratch | [
"26d26ca6182733af2bc2dfd079a268a4fc6196a1"
] | [
"ch06/overfit_weight_decay.py"
] | [
"# coding: utf-8\nimport os\nimport sys\n\nsys.path.append(os.pardir) # 親ディレクトリのファイルをインポートするための設定\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom dataset.mnist import load_mnist\nfrom common.multi_layer_net import MultiLayerNet\nfrom common.optimizer import SGD\n\n(x_train, t_train), (x_test, t_test) = load_mnist(normalize=True)\n\n# 過学習を再現するために、学習データを削減\nx_train = x_train[:300]\nt_train = t_train[:300]\n\n# weight decay(荷重減衰)の設定 =======================\n# weight_decay_lambda = 0 # weight decayを使用しない場合\nweight_decay_lambda = 0.1\n# ====================================================\n\nnetwork = MultiLayerNet(input_size=784, hidden_size_list=[100, 100, 100, 100, 100, 100], output_size=10,\n weight_decay_lambda=weight_decay_lambda)\noptimizer = SGD(lr=0.01)\n\nmax_epochs = 201\ntrain_size = x_train.shape[0]\nbatch_size = 100\n\ntrain_loss_list = []\ntrain_acc_list = []\ntest_acc_list = []\n\niter_per_epoch = max(train_size / batch_size, 1)\nepoch_cnt = 0\n\nfor i in range(1000000000):\n batch_mask = np.random.choice(train_size, batch_size)\n x_batch = x_train[batch_mask]\n t_batch = t_train[batch_mask]\n\n grads = network.gradient(x_batch, t_batch)\n optimizer.update(network.params, grads)\n\n if i % iter_per_epoch == 0:\n train_acc = network.accuracy(x_train, t_train)\n test_acc = network.accuracy(x_test, t_test)\n train_acc_list.append(train_acc)\n test_acc_list.append(test_acc)\n\n print(\"epoch:\" + str(epoch_cnt) + \", train acc:\" + str(train_acc) + \", test acc:\" + str(test_acc))\n\n epoch_cnt += 1\n if epoch_cnt >= max_epochs:\n break\n\n# 3.グラフの描画==========\nmarkers = {'train': 'o', 'test': 's'}\nx = np.arange(max_epochs)\nplt.plot(x, train_acc_list, marker='o', label='train', markevery=10)\nplt.plot(x, test_acc_list, marker='s', label='test', markevery=10)\nplt.xlabel(\"epochs\")\nplt.ylabel(\"accuracy\")\nplt.ylim(0, 1.0)\nplt.legend(loc='lower right')\nplt.show()\n"
] | [
[
"matplotlib.pyplot.legend",
"numpy.random.choice",
"numpy.arange",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.xlabel"
]
] |
psychicmario/continuum | [
"22f60d3fc71553f1334cffa7e88a1727cdf2413c"
] | [
"continuum/scenarios/class_incremental.py"
] | [
"import warnings\nfrom copy import copy\nfrom typing import Callable, List, Union\n\nimport numpy as np\n\nfrom continuum.datasets import _ContinuumDataset\nfrom continuum.scenarios import _BaseScenario\n\n\nclass ClassIncremental(_BaseScenario):\n \"\"\"Continual Loader, generating datasets for the consecutive tasks.\n\n Scenario: Each new tasks bring new classes only\n\n :param cl_dataset: A continual dataset.\n :param nb_tasks: The scenario number of tasks.\n :param increment: Either number of classes per task (e.g. increment=2),\n or a list specifying for every task the amount of new classes\n (e.g. increment=[5,1,1,1,1]).\n :param initial_increment: A different task size applied only for the first task.\n Desactivated if `increment` is a list.\n :param transformations: A list of transformations applied to all tasks.\n :param class_order: An optional custom class order, used for NC.\n e.g. [0,1,2,3,4,5,6,7,8,9] or [5,2,4,1,8,6,7,9,0,3]\n \"\"\"\n\n def __init__(\n self,\n cl_dataset: _ContinuumDataset,\n nb_tasks: int = 0,\n increment: Union[List[int], int] = 0,\n initial_increment: int = 0,\n transformations: List[Callable] = None,\n class_order: Union[List[int], None]=None\n ) -> None:\n\n super().__init__(cl_dataset=cl_dataset, nb_tasks=nb_tasks, transformations=transformations)\n\n self.increment = increment\n self.initial_increment = initial_increment\n self.class_order = class_order\n\n self._nb_tasks = self._setup(nb_tasks)\n\n def _setup(self, nb_tasks: int) -> int:\n\n x, y, _ = self.cl_dataset.get_data()\n unique_classes = np.unique(y)\n\n self.class_order = self.class_order or self.cl_dataset.class_order or list(\n range(len(unique_classes))\n )\n\n if len(np.unique(self.class_order)) != len(self.class_order):\n raise ValueError(f\"Invalid class order, duplicates found: {self.class_order}.\")\n\n new_y = np.vectorize(self.class_order.index)(y)\n\n # Increments setup\n self.class_order = np.array(self.class_order)\n if nb_tasks <= 0:\n # The number of tasks is left unspecified, thus it will be determined\n # by the specified increments.\n self.increments = self._define_increments(\n self.increment, self.initial_increment, unique_classes\n )\n else:\n # A fixed number of tasks is required, thus the all increments will\n # be equal among tasks.\n if self.increment > 0:\n warnings.warn(\n f\"When both `nb_tasks` (given value = {nb_tasks}) and \"\n f\"`increment` (given value = {self.increment} are both set, \"\n \"we only consider the number of tasks. The `increment` \"\n \"argument is ignored.\"\n )\n increment = len(unique_classes) / nb_tasks\n if not increment.is_integer():\n raise Exception(\n f\"Invalid number of tasks ({nb_tasks}) for {len(unique_classes)} classes.\"\n )\n self.increments = [int(increment) for _ in range(nb_tasks)]\n\n # compute task label\n task_ids = self._set_task_labels(new_y)\n\n # Dataset with task label\n self.dataset = (x, new_y, task_ids) # (data, class label, task label)\n\n return len(np.unique(task_ids))\n\n def _set_task_labels(self, y: np.ndarray) -> np.ndarray:\n \"\"\"For each data point, defines a task associated with the data.\n\n :param y: label tensor\n :param increments: increments contains information about classes per tasks\n :return: tensor of task label\n \"\"\"\n t = copy(y) # task label as same size as y\n\n for task_index, _ in enumerate(self.increments):\n max_class = sum(self.increments[:task_index + 1])\n min_class = sum(self.increments[:task_index]) # 0 when task_index == 0.\n\n indexes = np.where(np.logical_and(y >= min_class, y < max_class))[0]\n t[indexes] = task_index\n return t\n\n def _define_increments(\n self, increment: Union[List[int], int], initial_increment: int, unique_classes: List[int]\n ) -> List[int]:\n\n if isinstance(increment, list):\n # Check if the total number of classes is compatible\n # with increment list and self.nb_classes\n if not sum(increment) == len(unique_classes):\n raise Exception(\"The increment list is not compatible with the number of classes\")\n\n increments = increment\n elif isinstance(increment, int) and increment > 0:\n increments = []\n if initial_increment:\n increments.append(initial_increment)\n\n nb_tasks = (len(unique_classes) - initial_increment) / increment\n if not nb_tasks.is_integer():\n raise Exception(\n \"The tasks won't have an equal number of classes\"\n f\" with {len(self.class_order)} and increment {increment}\"\n )\n increments.extend([increment for _ in range(int(nb_tasks))])\n else:\n raise TypeError(f\"Invalid increment={increment}, it must be an int > 0.\")\n\n return increments\n\n def get_original_targets(self, targets: np.ndarray) -> np.ndarray:\n \"\"\"Returns the original targets not changed by the custom class order.\n\n :param targets: An array of targets, as provided by the task datasets.\n :return: An array of targets, with their original values.\n \"\"\"\n return self.class_order[targets]\n\n def _select_data_by_classes(self, min_class_id: int, max_class_id: int):\n \"\"\"Selects a subset of the whole data for a given set of classes.\n\n :param min_class_id: The minimum class id.\n :param max_class_id: The maximum class id.\n :return: A tuple of numpy array, the first item being the data and the\n second the associated targets.\n \"\"\"\n x_, y_, _ = self.dataset\n\n indexes = np.where(np.logical_and(y_ >= min_class_id, y_ < max_class_id))[0]\n selected_x = x_[indexes]\n selected_y = y_[indexes]\n\n if self.cl_dataset.need_class_remapping:\n # A remapping of the class ids is done to handle some special cases\n # like PermutedMNIST or RotatedMNIST.\n selected_y = self.cl_dataset.class_remapping(selected_y)\n\n return selected_x, selected_y\n"
] | [
[
"numpy.array",
"numpy.logical_and",
"numpy.vectorize",
"numpy.unique"
]
] |
Goda-Research-Group/MLMC_stochastic_gradient | [
"5c90827192d4d0fbf9415013a9df700b6978c330"
] | [
"mlmc_eig_grad/visualize_path.py"
] | [
"import numpy as np\nimport matplotlib.pyplot as plt\nimport mlmc_eig_grad.models as models\n\n\ndef convergence_with_path(paths_, inner_samples, labels, filename):\n\n if len(paths_) == 1:\n colors = [0]\n else:\n colors = np.arange(len(paths_)) * 0.8 / (len(paths_) - 1)\n\n plt.figure(figsize=(13, 10))\n plt.xscale('log')\n plt.yscale('log')\n\n for i in range(len(paths_)):\n paths = np.array(paths_[i]).squeeze()\n times = np.arange(1, paths.shape[1])\n distance_mse = np.mean((paths - models.optimal_xi_test) ** 2, axis=0)\n plt.plot(\n times * inner_samples[i],\n distance_mse[1:],\n color=str(colors[i]),\n linewidth=1.3,\n label=labels[i],\n )\n plt.legend(fontsize=15)\n plt.xlabel(\"$model evaluation$\", fontsize=22)\n plt.ylabel(\"$mse$\", fontsize=22)\n\n plt.savefig(filename + \".eps\")\n print(\"The graphs has been saved at [\" + filename + \".eps].\")\n plt.close()\n"
] | [
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.yscale",
"numpy.arange",
"matplotlib.pyplot.xscale",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.close",
"numpy.array",
"matplotlib.pyplot.xlabel",
"numpy.mean"
]
] |
jlim13/pytorch.sngan_projection | [
"ba8630e5c0ef317399c7eba3d05daa72e9c9d034"
] | [
"test.py"
] | [
"# Training script for tiny-imagenet.\n# Again, this script has a lot of bugs everywhere.\nimport argparse\nimport datetime\nimport json\nimport os\nimport shutil\n\nimport numpy as np\nimport torch\nimport torch.utils.data as data\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport torchvision\nimport torchvision.datasets as datasets\nimport torchvision.transforms as transforms\nimport tqdm\n\nimport evaluation\nimport losses as L\nfrom models.discriminators.snresnet64 import SNResNetConcatDiscriminator\nfrom models.discriminators.snresnet64 import SNResNetProjectionDiscriminator\nfrom models.generators.resnet64 import ResNetGenerator\nfrom models import inception\nimport utils\n\nfrom datasets import cifar10, mnist\n\ndef cycle(iterable):\n while True:\n for x in iterable:\n yield x\n\n# Copied from https://github.com/naoto0804/pytorch-AdaIN/blob/master/sampler.py#L5-L15\ndef InfiniteSampler(n):\n # i = 0\n i = n - 1\n order = np.random.permutation(n)\n while True:\n yield order[i]\n i += 1\n if i >= n:\n np.random.seed()\n order = np.random.permutation(n)\n i = 0\n\n\n# Copied from https://github.com/naoto0804/pytorch-AdaIN/blob/master/sampler.py#L18-L26\nclass InfiniteSamplerWrapper(data.sampler.Sampler):\n def __init__(self, data_source):\n self.num_samples = len(data_source)\n\n def __iter__(self):\n return iter(InfiniteSampler(self.num_samples))\n\n def __len__(self):\n return 2 ** 31\n\n\n\n\ndef decay_lr(opt, max_iter, start_iter, initial_lr):\n \"\"\"Decay learning rate linearly till 0.\"\"\"\n coeff = -initial_lr / (max_iter - start_iter)\n for pg in opt.param_groups:\n pg['lr'] += coeff\n\n\ndef get_args():\n parser = argparse.ArgumentParser()\n # Dataset configuration\n parser.add_argument('--cGAN', default=False, action='store_true',\n help='to train cGAN, set this ``True``. default: False')\n parser.add_argument('--data_root', type=str, default='tiny-imagenet-200',\n help='path to dataset root directory. default: tiny-imagenet-200')\n parser.add_argument('--batch_size', '-B', type=int, default=64,\n help='mini-batch size of training data. default: 64')\n parser.add_argument('--eval_batch_size', '-eB', default=None,\n help='mini-batch size of evaluation data. default: None')\n parser.add_argument('--num_workers', type=int, default=8,\n help='Number of workers for training data loader. default: 8')\n # Generator configuration\n parser.add_argument('--gen_num_features', '-gnf', type=int, default=64,\n help='Number of features of generator (a.k.a. nplanes or ngf). default: 64')\n parser.add_argument('--gen_dim_z', '-gdz', type=int, default=128,\n help='Dimension of generator input noise. default: 128')\n parser.add_argument('--gen_bottom_width', '-gbw', type=int, default=4,\n help='Initial size of hidden variable of generator. default: 4')\n parser.add_argument('--gen_distribution', '-gd', type=str, default='normal',\n help='Input noise distribution: normal (default) or uniform.')\n # Discriminator (Critic) configuration\n parser.add_argument('--dis_arch_concat', '-concat', default=False, action='store_true',\n help='If use concat discriminator, set this true. default: False')\n parser.add_argument('--dis_emb', type=int, default=128,\n help='Parameter for concat discriminator. default: 128')\n parser.add_argument('--dis_num_features', '-dnf', type=int, default=64,\n help='Number of features of discriminator (a.k.a nplanes or ndf). default: 64')\n # Optimizer settings\n parser.add_argument('--lr', type=float, default=0.0002,\n help='Initial learning rate of Adam. default: 0.0002')\n parser.add_argument('--beta1', type=float, default=0.0,\n help='beta1 (betas[0]) value of Adam. default: 0.0')\n parser.add_argument('--beta2', type=float, default=0.9,\n help='beta2 (betas[1]) value of Adam. default: 0.9')\n parser.add_argument('--lr_decay_start', '-lds', type=int, default=50000,\n help='Start point of learning rate decay. default: 50000')\n # Training setting\n parser.add_argument('--seed', type=int, default=46,\n help='Random seed. default: 46 (derived from Nogizaka46)')\n parser.add_argument('--max_iteration', '-N', type=int, default=100000,\n help='Max iteration number of training. default: 100000')\n parser.add_argument('--n_dis', type=int, default=5,\n help='Number of discriminator updater per generator updater. default: 5')\n parser.add_argument('--num_classes', '-nc', type=int, default=0,\n help='Number of classes in training data. No need to set. default: 0')\n parser.add_argument('--loss_type', type=str, default='hinge',\n help='loss function name. hinge (default) or dcgan.')\n parser.add_argument('--relativistic_loss', '-relloss', default=False, action='store_true',\n help='Apply relativistic loss or not. default: False')\n parser.add_argument('--calc_FID', default=False, action='store_true',\n help='If calculate FID score, set this ``True``. default: False')\n parser.add_argument('--transform_space', type=str, default=None,\n help='Which space to smack a transformer')\n # Log and Save interval configuration\n parser.add_argument('--results_root', type=str, default='results',\n help='Path to results directory. default: results')\n parser.add_argument('--no_tensorboard', action='store_true', default=False,\n help='If you dislike tensorboard, set this ``False``. default: True')\n parser.add_argument('--no_image', action='store_true', default=False,\n help='If you dislike saving images on tensorboard, set this ``True``. default: False')\n parser.add_argument('--checkpoint_interval', '-ci', type=int, default=1000,\n help='Interval of saving checkpoints (model and optimizer). default: 1000')\n parser.add_argument('--log_interval', '-li', type=int, default=100,\n help='Interval of showing losses. default: 100')\n parser.add_argument('--eval_interval', '-ei', type=int, default=1000,\n help='Interval for evaluation (save images and FID calculation). default: 1000')\n parser.add_argument('--n_eval_batches', '-neb', type=int, default=100,\n help='Number of mini-batches used in evaluation. default: 100')\n parser.add_argument('--n_fid_images', '-nfi', type=int, default=5000,\n help='Number of images to calculate FID. default: 5000')\n parser.add_argument('--test', default=False, action='store_true',\n help='If test this python program, set this ``True``. default: False')\n # Resume training\n parser.add_argument('--args_path', default=None, help='Checkpoint args json path. default: None')\n parser.add_argument('--gen_ckpt_path', '-gcp', default=None,\n help='Generator and optimizer checkpoint path. default: None')\n parser.add_argument('--dis_ckpt_path', '-dcp', default=None,\n help='Discriminator and optimizer checkpoint path. default: None')\n args = parser.parse_args()\n return args\n\n\ndef sample_from_data(args, device, data_loader):\n \"\"\"Sample real images and labels from data_loader.\n\n Args:\n args (argparse object)\n device (torch.device)\n data_loader (DataLoader)\n\n Returns:\n real, y\n\n \"\"\"\n\n real, y = next(data_loader)\n real, y = real.to(device), y.to(device)\n if not args.cGAN:\n y = None\n return real, y\n\n\ndef sample_from_gen(args, device, num_classes, gen):\n \"\"\"Sample fake images and labels from generator.\n\n Args:\n args (argparse object)\n device (torch.device)\n num_classes (int): for pseudo_y\n gen (nn.Module)\n\n Returns:\n fake, pseudo_y, z\n\n \"\"\"\n\n z = utils.sample_z(\n args.batch_size, args.gen_dim_z, device, args.gen_distribution\n )\n if args.cGAN:\n pseudo_y = utils.sample_pseudo_labels(\n num_classes, args.batch_size, device\n )\n else:\n pseudo_y = None\n\n fake = gen(z, pseudo_y)\n return fake, pseudo_y, z\n\n\ndef main():\n args = get_args()\n # CUDA setting\n if not torch.cuda.is_available():\n raise ValueError(\"Should buy GPU!\")\n torch.manual_seed(args.seed)\n torch.cuda.manual_seed_all(args.seed)\n device = torch.device('cuda')\n torch.set_default_tensor_type('torch.cuda.FloatTensor')\n torch.backends.cudnn.benchmark = True\n\n def _rescale(img):\n return img * 2.0 - 1.0\n\n def _noise_adder(img):\n return torch.empty_like(img, dtype=img.dtype).uniform_(0.0, 1/128.0) + img\n\n\n\n eval_dataset = cifar10.CIFAR10(root=args.data_root,\n train=False,\n download=True,\n transform=transforms.Compose([\n transforms.Resize(64),\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n ]),\n minority_classes = None,\n keep_ratio = None)\n eval_loader = iter(torch.utils.data.DataLoader(eval_dataset, batch_size=args.batch_size,\n sampler=InfiniteSamplerWrapper(eval_dataset),\n num_workers=args.num_workers,\n pin_memory=True))\n\n\n print(' prepared datasets...')\n\n # Prepare directories.\n num_classes = len(eval_dataset.classes)\n args.num_classes = num_classes\n\n # initialize models.\n _n_cls = num_classes if args.cGAN else 0\n gen = ResNetGenerator(\n args.gen_num_features, args.gen_dim_z, args.gen_bottom_width,\n activation=F.relu, num_classes=_n_cls, distribution=args.gen_distribution\n ).to(device)\n if args.dis_arch_concat:\n dis = SNResNetConcatDiscriminator(args.dis_num_features, _n_cls, F.relu, args.dis_emb).to(device)\n else:\n dis = SNResNetProjectionDiscriminator(args.dis_num_features, _n_cls, F.relu, args.transform_space).to(device)\n inception_model = inception.InceptionV3().to(device) if args.calc_FID else None\n\n gen = torch.nn.DataParallel(gen)\n # dis = torch.nn.DataParallel(dis)\n\n opt_gen = optim.Adam(gen.parameters(), args.lr, (args.beta1, args.beta2))\n opt_dis = optim.Adam(dis.parameters(), args.lr, (args.beta1, args.beta2))\n\n # gen_criterion = getattr(L, 'gen_{}'.format(args.loss_type))\n # dis_criterion = getattr(L, 'dis_{}'.format(args.loss_type))\n gen_criterion = L.GenLoss(args.loss_type, args.relativistic_loss)\n dis_criterion = L.DisLoss(args.loss_type, args.relativistic_loss)\n\n print(' Initialized models...\\n')\n\n if args.args_path is None:\n print (\"Please specify weights to load\")\n exit()\n else:\n print(' Load weights...\\n')\n\n prev_args, gen, opt_gen, dis, opt_dis = utils.resume_from_args(\n args.args_path, args.gen_ckpt_path, args.dis_ckpt_path\n )\n args.n_fid_batches = args.n_eval_batches\n fid_score = evaluation.evaluate(\n args, 0, gen, device, inception_model, eval_loader, to_save=False\n )\n print (fid_score)\n\n\n\nif __name__ == '__main__':\n torch.multiprocessing.set_start_method('spawn')\n main()\n"
] | [
[
"torch.cuda.manual_seed_all",
"torch.empty_like",
"numpy.random.permutation",
"torch.manual_seed",
"numpy.random.seed",
"torch.set_default_tensor_type",
"torch.multiprocessing.set_start_method",
"torch.cuda.is_available",
"torch.nn.DataParallel",
"torch.device"
]
] |
csgwon/dl-pipeline | [
"5ac2cdafe0daac675d3f3e810918133de3466f8a"
] | [
"flaskapp/namecnn.py"
] | [
"from tools import *\nimport torch\nimport numpy as np\n\nimport sys\n\ncharcnn = torch.load('/var/www/html/flaskapp/charcnn.pth')\n\ncharcnn.train(False)\n\ndef predict(name):\n from torch.autograd import Variable\n name = encode_input(name)\n name = Variable(torch.from_numpy(name).float())\n name = name.view(1,-1,max_name_len)\n preds = charcnn(name)\n top_pred, index = torch.max(preds, dim=1)\n return labels[index.data.tolist()[0]]\n\ndef predict2(num):\n return str(np.sqrt(float(num)))\n\n#print(predict(sys.argv[1]))\n"
] | [
[
"torch.from_numpy",
"torch.load",
"torch.max"
]
] |
shbang91/PnC | [
"880cbbcf96a48a93a0ab646634781e4f112a71f6"
] | [
"Addition/PythonPlotter/DracoBip/plot_joint.py"
] | [
"import numpy as np\nimport matplotlib\nmatplotlib.use('TkAgg')\nimport matplotlib.pyplot as plt\nimport os\n\nfigure_number = 0\ncol_index = 0\nrow_index = 0\n\nfile_path = os.getcwd() + \"/../../../ExperimentDataCheck/\"\n\n## read files\ndata_q = \\\nnp.genfromtxt(file_path+'q.txt', delimiter=None, dtype=(float))\ndata_jpos_des = \\\nnp.genfromtxt(file_path+'jpos_des.txt', delimiter=None, dtype=(float))\n\ndata_qdot = \\\nnp.genfromtxt(file_path+'qdot.txt', delimiter=None, dtype=(float))\ndata_jvel_des = \\\nnp.genfromtxt(file_path+'jvel_des.txt', delimiter=None, dtype=(float))\n\ndata_des_jacc_cmd = \\\nnp.genfromtxt(file_path+'des_jacc_cmd.txt', delimiter=None, dtype=(float))\n\ndata_command = \\\nnp.genfromtxt(file_path+'command.txt', delimiter=None, dtype=(float))\n\ndata_x = np.genfromtxt(file_path+'time.txt', delimiter='\\n', dtype=(float))\nst_idx = 1\nend_idx = len(data_x) - 10\ndata_x = data_x[st_idx:end_idx]\n\ndata_phse = np.genfromtxt(file_path+'phase.txt', delimiter=None, dtype=(float))\ndata_phse = data_phse[st_idx:end_idx]\nphseChange = []\nfor i in range(0,len(data_x)-1):\n if data_phse[i] != data_phse[i+1]:\n phseChange.append(i)\n else:\n pass\n\naxes = plt.gca()\n\nleft_leg_idx = [0, 1, 2, 3, 4] ## left leg\nright_leg_idx = [5, 6, 7, 8, 9] ## right leg\nn_leg = len(left_leg_idx)\n\nfig=plt.figure(figure_number)\nfig.canvas.set_window_title('left leg jpos(hy, hr, hp, kp, ap)')\nfor i in range(1, 6, 1):\n ax = plt.subplot(5, 1, i)\n plt.plot(data_x, data_q[st_idx:end_idx, left_leg_idx[i-1]+6], 'b-')\n plt.plot(data_x, data_jpos_des[st_idx:end_idx, left_leg_idx[i-1]], 'r-')\n plt.grid(True)\n for j in phseChange:\n plt.axvline(x=data_x[j],color='indigo',linestyle='-')\n plt.text(data_x[j],ax.get_ylim()[1],'%d'%(data_phse[j]),color='indigo')\nfigure_number += 1\n\nfig=plt.figure(figure_number)\nfig.canvas.set_window_title('left leg jvel(hy, hr, hp, kp, ap)')\nfor i in range(1, 6, 1):\n ax = plt.subplot(5, 1, i)\n plt.plot(data_x, data_qdot[st_idx:end_idx, left_leg_idx[i-1]+6], 'b-')\n plt.plot(data_x, data_jvel_des[st_idx:end_idx, left_leg_idx[i-1]], 'r-')\n plt.grid(True)\n for j in phseChange:\n plt.axvline(x=data_x[j],color='indigo',linestyle='-')\n plt.text(data_x[j],ax.get_ylim()[1],'%d'%(data_phse[j]),color='indigo')\nfigure_number += 1\n\nfig=plt.figure(figure_number)\nfig.canvas.set_window_title('left leg command(hy, hr, hp, kp, ap, ar)')\nfor i in range(1, 6, 1):\n ax = plt.subplot(5, 1, i)\n plt.plot(data_x, data_command[st_idx:end_idx, left_leg_idx[i-1]], 'b-')\n plt.grid(True)\n for j in phseChange:\n plt.axvline(x=data_x[j],color='indigo',linestyle='-')\n plt.text(data_x[j],ax.get_ylim()[1],'%d'%(data_phse[j]),color='indigo')\nfigure_number += 1\n\n\nfig=plt.figure(figure_number)\nfig.canvas.set_window_title('right leg jpos(hy, hr, hp, kp, ap, ar)')\nfor i in range(1, 6, 1):\n ax = plt.subplot(5, 1, i)\n plt.plot(data_x, data_q[st_idx:end_idx, right_leg_idx[i-1]+6], 'b-')\n plt.plot(data_x, data_jpos_des[st_idx:end_idx, right_leg_idx[i-1]], 'r-')\n plt.grid(True)\n for j in phseChange:\n plt.axvline(x=data_x[j],color='indigo',linestyle='-')\n plt.text(data_x[j],ax.get_ylim()[1],'%d'%(data_phse[j]),color='indigo')\nfigure_number += 1\n\nfig=plt.figure(figure_number)\nfig.canvas.set_window_title('right leg jvel(hy, hr, hp, kp, ap, ar)')\nfor i in range(1, 6, 1):\n ax = plt.subplot(5, 1, i)\n plt.plot(data_x, data_qdot[st_idx:end_idx, right_leg_idx[i-1]+6], 'b-')\n plt.plot(data_x, data_jvel_des[st_idx:end_idx, right_leg_idx[i-1]], 'r-')\n plt.grid(True)\n for j in phseChange:\n plt.axvline(x=data_x[j],color='indigo',linestyle='-')\n plt.text(data_x[j],ax.get_ylim()[1],'%d'%(data_phse[j]),color='indigo')\nfigure_number += 1\n\nfig=plt.figure(figure_number)\nfig.canvas.set_window_title('right leg command(hy, hr, hp, kp, ap, ar)')\nfor i in range(1, 6, 1):\n ax = plt.subplot(5, 1, i)\n plt.plot(data_x, data_command[st_idx:end_idx, right_leg_idx[i-1]], 'b-')\n plt.grid(True)\n for j in phseChange:\n plt.axvline(x=data_x[j],color='indigo',linestyle='-')\n plt.text(data_x[j],ax.get_ylim()[1],'%d'%(data_phse[j]),color='indigo')\nfigure_number += 1\n\nfig=plt.figure(figure_number)\nfig.canvas.set_window_title('right leg des_jacc_cmd(hy, hr, hp, kp, ap, ar)')\nfor i in range(1, 6, 1):\n ax = plt.subplot(5, 1, i)\n plt.plot(data_x, data_des_jacc_cmd[st_idx:end_idx, right_leg_idx[i-1]], 'b-')\n plt.grid(True)\n for j in phseChange:\n plt.axvline(x=data_x[j],color='indigo',linestyle='-')\n plt.text(data_x[j],ax.get_ylim()[1],'%d'%(data_phse[j]),color='indigo')\nfigure_number += 1\n\nfig=plt.figure(figure_number)\nfig.canvas.set_window_title('left leg des_jacc_cmd(hy, hr, hp, kp, ap, ar)')\nfor i in range(1, 6, 1):\n ax = plt.subplot(5, 1, i)\n plt.plot(data_x, data_des_jacc_cmd[st_idx:end_idx, left_leg_idx[i-1]], 'b-')\n plt.grid(True)\n for j in phseChange:\n plt.axvline(x=data_x[j],color='indigo',linestyle='-')\n plt.text(data_x[j],ax.get_ylim()[1],'%d'%(data_phse[j]),color='indigo')\nfigure_number += 1\n\nplt.show()\n"
] | [
[
"matplotlib.pyplot.axvline",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show",
"matplotlib.use",
"matplotlib.pyplot.plot",
"numpy.genfromtxt"
]
] |
shivam124081/datumaro | [
"3aa8842a3649ec8e05c0bfe042794823375b812b"
] | [
"datumaro/components/launcher.py"
] | [
"# Copyright (C) 2019-2020 Intel Corporation\n#\n# SPDX-License-Identifier: MIT\n\nimport numpy as np\n\nfrom datumaro.components.extractor import Transform, LabelCategories, AnnotationType\nfrom datumaro.util import take_by\n\n\n# pylint: disable=no-self-use\nclass Launcher:\n def __init__(self, model_dir=None):\n pass\n\n def launch(self, inputs):\n raise NotImplementedError()\n\n def categories(self):\n return None\n\n\n# pylint: enable=no-self-use\n\n\nclass ModelTransform(Transform):\n def __init__(self, extractor, launcher, batch_size=1):\n super().__init__(extractor)\n self._launcher = launcher\n self._batch_size = batch_size\n\n def __iter__(self):\n for batch in take_by(self._extractor, self._batch_size):\n inputs = np.array([item.image.data for item in batch])\n inference = self._launcher.launch(inputs)\n\n for item, annotations in zip(batch, inference):\n self._check_annotations(annotations)\n yield self.wrap_item(item, annotations=annotations)\n\n def get_subset(self, name):\n subset = self._extractor.get_subset(name)\n return __class__(subset, self._launcher, self._batch_size)\n\n def categories(self):\n launcher_override = self._launcher.categories()\n if launcher_override is not None:\n return launcher_override\n return self._extractor.categories()\n\n def transform_item(self, item):\n inputs = np.expand_dims(item.image, axis=0)\n annotations = self._launcher.launch(inputs)[0]\n return self.wrap_item(item, annotations=annotations)\n\n def _check_annotations(self, annotations):\n labels_count = len(\n self.categories().get(AnnotationType.label, LabelCategories()).items\n )\n\n for ann in annotations:\n label = getattr(ann, \"label\")\n if label is None:\n continue\n\n if label not in range(labels_count):\n raise Exception(\n \"Annotation has unexpected label id %s, \"\n \"while there is only %s defined labels.\" % (label, labels_count)\n )\n"
] | [
[
"numpy.array",
"numpy.expand_dims"
]
] |
jtbuckwalter/UPBC-Assignment-10-Advanced-Data-Storage-and-Retrieval | [
"a3b5cdadde2be90ab9e63ace2126648728c6e0bb"
] | [
"app.py"
] | [
"from flask import Flask, jsonify\n#################################################\n# Flask Setup\n#################################################\napp = Flask(__name__)\n\n\nimport numpy as np\nimport sqlalchemy\nfrom sqlalchemy.ext.automap import automap_base\nfrom sqlalchemy.orm import Session\nimport datetime as dt\nfrom sqlalchemy import create_engine, func\nimport json\n\n\n#################################################\n# Database Setup\n#################################################\nengine = create_engine(\"sqlite:///Resources/hawaii.sqlite\")\n\n# reflect an existing database into a new model\nBase = automap_base()\n# reflect the tables\nBase.prepare(engine, reflect=True)\n\n# Save reference to the table\nMeasurement = Base.classes.measurement\nStation = Base.classes.station\n\n#################################################\n# Flask Routes\n#################################################\n\[email protected](\"/\")\ndef index():\n \"\"\"API Routes.\"\"\"\n return (\n f\"Available Routes:<br/>\"\n f\"/api/v1.0/precipitation<br/>\"\n f\"/api/v1.0/stations\"\n f\"/api/v1.0/stations\"\n f\"/api/v1.0/<start>\"\n f\"/api/v1.0/<start>/<end>\"\n )\n\[email protected](\"/api/v1.0/precipitation\")\ndef precipitation():\n session = Session(engine)\n last_date = session.query(Measurement.date).order_by(Measurement.date.desc()).first()\n last_date = last_date[0]\n precip = session.query(Measurement.date).filter(Measurement.date >= last_date)\n prev_year = dt.datetime.strptime(last_date, \"%Y-%m-%d\")- dt.timedelta(days=365)\n precip_scores = session.query(Measurement.date,Measurement.prcp).filter(Measurement.date>=prev_year).all()\n\n result = dict(precip_scores)\n return jsonify(result)\n\n session.close()\n\[email protected](\"/api/v1.0/stations\")\ndef stations():\n session = Session(engine)\n stations = session.query(Station.station,Station.name).all()\n result = dict(stations)\n return jsonify(result)\n session.close()\n\n\[email protected]('/api/v1.0/<start>')\[email protected]('/api/v1.0/<start>/<end>')\ndef temperature(start=None, end=None):\n session = Session(engine)\n\n if end != None:\n temps = session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\\\n filter(Measurement.date >= start).filter(Measurement.date <= end).all()\n else:\n temps = session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\\\n filter(Measurement.date >= start).all()\n\n result = list(np.ravel(temps))\n\n return jsonify(result)\n session.close()\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n"
] | [
[
"numpy.ravel"
]
] |
mharradon/SHMArrays | [
"ef03b4dbd6b3adbfdb3a83afa694c120862b36ac"
] | [
"tests.py"
] | [
"from SHMArrays import SHMArrays\nimport numpy as np\nimport pdb\nfrom collections import OrderedDict\n\nn_params = 20\nsizes = [int(1e5*np.random.rand()) for i in range(n_params)]\narrays = OrderedDict([('id'+str(i),np.random.randn(size)) for i,size in zip(range(n_params),sizes)])\narrays2 = OrderedDict([('id'+str(i),np.random.randn(size)) for i,size in zip(range(n_params),sizes)])\n\nshm_array = SHMArrays(arrays)\nkeys = shm_array.get_keys()\n\n# Check that partitioning is valid\nassert((np.sort(np.concatenate(shm_array.partitioning))==np.arange(0,n_params)).all())\n\n# Check that shm is read properly (init values are ignored when keys are specified)\nshm_array2 = SHMArrays(arrays2,keys=keys)\nread_vals = shm_array2.read_arrays()\n\nfor array,read_array in zip(arrays.values(),read_vals.values()):\n assert((array==read_array).all())\n\n# Check that shm is writing properly (init values are ignored when keys are specified)\nshm_array2.write_arrays(arrays2)\nread_vals = shm_array.read_arrays()\n\nfor array,read_array in zip(arrays2.values(),read_vals.values()):\n assert((array==read_array).all())\n\n# Check that update function is applied properly \nshm_array2.update_arrays(arrays,lambda new,old: new + old)\nread_vals = shm_array.read_arrays()\n\nfor array,array2,read_array in zip(arrays.values(),arrays2.values(),read_vals.values()):\n assert((array+array2==read_array).all())\n\nprint('All passed')\n"
] | [
[
"numpy.arange",
"numpy.concatenate",
"numpy.random.randn",
"numpy.random.rand"
]
] |
behnamh217rn21/PCA_v1 | [
"dfcf8d37649c75f217262edcded5111bf8de88d1"
] | [
"pca/pca.py"
] | [
"\"\"\"pca is a python package to perform Principal Component Analysis and to make insightful plots.\"\"\"\n\n# %% Libraries\nimport colourmap as colourmap\nfrom sklearn.decomposition import PCA, SparsePCA, TruncatedSVD\n# from sklearn import preprocessing\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.metrics.pairwise import euclidean_distances\nfrom scipy import stats\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom matplotlib.patches import Ellipse\nimport scipy.sparse as sp\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\nimport wget\n\n\n# %% Association learning across all variables\nclass pca():\n \"\"\"pca module.\"\"\"\n\n def __init__(self, n_components=0.95, n_feat=25, alpha=0.05, n_std=2, onehot=False, normalize=False, detect_outliers=['ht2','spe'], random_state=None):\n \"\"\"Initialize pca with user-defined parameters.\n\n Parameters\n ----------\n n_components : [0,..,1] or [1,..number of samples-1], (default: 0.95)\n Number of TOP components to be returned. Values>0 are the number of components. Values<0 are the components that covers at least the percentage of variance.\n 0.95: Take the number of components that cover at least 95% of variance.\n k: Take the top k components\n n_feat : int, default: 10\n Number of features that explain the space the most, dervied from the loadings. This parameter is used for vizualization purposes only.\n alpha : float, default: 0.05\n Alpha to set the threshold to determine the outliers based on on the Hoteling T2 test.\n n_std : int, default: 2\n Number of standard deviations to determine the outliers using SPE/DmodX method.\n onehot : [Bool] optional, (default: False)\n Boolean: Set True if X is a sparse data set such as the output of a tfidf model. Many zeros and few numbers. Note this is different then a sparse matrix. Sparse data can be in a sparse matrix.\n normalize : bool (default : False)\n Normalize data, Z-score\n detect_outliers : list (default : ['ht2','spe'])\n None: Do not compute outliers.\n 'ht2': compute outliers based on Hotelling T2.\n 'spe': compute outliers basedon SPE/DmodX method. \n random_state : int optional\n Random state\n\n \"\"\"\n if isinstance(detect_outliers, str): detect_outliers = [detect_outliers]\n # Store in object\n self.n_components = n_components\n self.onehot = onehot\n self.normalize = normalize\n self.random_state = random_state\n self.n_feat = n_feat\n self.alpha = alpha\n self.n_std = n_std\n self.detect_outliers = detect_outliers\n\n # Make PCA fit_transform\n def transform(self, X, row_labels=None, col_labels=None, verbose=3):\n \"\"\"Transform new input data with fitted model.\n\n Parameters\n ----------\n X : array-like : Can be of type Numpy or DataFrame\n [NxM] array with columns as features and rows as samples.\n Verbose : int (default : 3)\n Print to screen. 0: None, 1: Error, 2: Warning, 3: Info, 4: Debug, 5: Trace\n\n Examples\n --------\n >>> import matplotlib.pyplot as plt\n >>> from sklearn.datasets import load_iris\n >>> import pandas as pd\n >>> from pca import pca\n >>>\n >>> # Initialize\n >>> model = pca(n_components=2, normalize=True)\n >>> # Dataset\n >>> X = pd.DataFrame(data=load_iris().data, columns=load_iris().feature_names, index=load_iris().target)\n >>>\n >>> # Gather some random samples across the classes.\n >>> idx=[0,1,2,3,4,50,51,52,53,54,55,100,101,102,103,104,105]\n >>> X_unseen = X.iloc[idx, :]\n >>>\n >>> # Label the unseen samples differently.\n >>> X.index.values[idx]=3\n >>>\n >>> # Fit transform\n >>> model.fit_transform(X)\n >>>\n >>> # Transform the \"unseen\" data with the fitted model. Note that these datapoints are not really unseen as they are readily fitted above.\n >>> # But for the sake of example, you can see that these samples will be transformed exactly on top of the orignial ones.\n >>> PCnew = model.transform(X_unseen)\n >>>\n >>> # Plot PC space\n >>> model.scatter()\n >>> # Plot the new \"unseen\" samples on top of the existing space\n >>> plt.scatter(PCnew.iloc[:, 0], PCnew.iloc[:, 1], marker='x')\n\n Returns\n -------\n pca transformed data.\n\n \"\"\"\n \n # Check type to make sure we can perform matrix operations\n if isinstance(X, list):\n X = np.array(X)\n \n # Pre-processing using scaler.\n X_scaled, row_labels, _, _ = self._preprocessing(X, row_labels, col_labels, scaler=self.results['scaler'], verbose=verbose)\n # Transform the data using fitted model.\n PCs = self.results['model'].transform(X_scaled)\n # Store in dataframe\n columns = ['PC{}'.format(i + 1) for i in np.arange(0, PCs.shape[1])]\n PCs = pd.DataFrame(data=PCs, index=row_labels, columns=columns)\n # Return\n return PCs\n\n # Make PCA fit_transform\n def fit_transform(self, X, row_labels=None, col_labels=None, verbose=3):\n \"\"\"Fit PCA on data.\n\n Parameters\n ----------\n X : array-like : Can be of type Numpy or DataFrame\n [NxM] array with columns as features and rows as samples.\n row_labels : [list of integers or strings] optional\n Used for colors.\n col_labels : [list of string] optional\n Numpy or list of strings: Name of the features that represent the data features and loadings. This should match the number of columns in the data. Use this option when using a numpy-array. For a pandas-dataframe, the column names are used but are overruled when using this parameter.\n Verbose : int (default : 3)\n Print to screen. 0: None, 1: Error, 2: Warning, 3: Info, 4: Debug, 5: Trace\n\n Returns\n -------\n dict.\n loadings : pd.DataFrame\n Structured dataframe containing loadings for PCs\n X : array-like\n Reduced dimentionsality space, the Principal Components (PCs)\n explained_var : array-like\n Explained variance for each fo the PCs (same ordering as the PCs)\n model_pca : object\n Model to be used for further usage of the model.\n topn : int\n Top n components\n pcp : int\n pcp\n col_labels : array-like\n Name of the features\n y : array-like\n Determined class labels\n\n Examples\n --------\n >>> from pca import pca\n >>> # Load example data\n >>> from sklearn.datasets import load_iris\n >>> X = pd.DataFrame(data=load_iris().data, columns=load_iris().feature_names, index=load_iris().target)\n >>>\n >>> Initialize\n >>> model = pca(n_components=3)\n >>> # Fit using PCA\n >>> results = model.fit_transform(X)\n >>>\n >>> # Make plots\n >>> fig, ax = model.scatter()\n >>> fig, ax = model.plot()\n >>> fig, ax = model.biplot()\n >>> fig, ax = model.biplot(SPE=True, hotellingt2=True)\n >>>\n >>> 3D plots\n >>> fig, ax = model.scatter3d()\n >>> fig, ax = model.biplot3d()\n >>> fig, ax = model.biplot3d(SPE=True, hotellingt2=True)\n >>>\n >>> # Normalize out PCs\n >>> X_norm = model.norm(X)\n\n \"\"\"\n \n # Check type to make sure we can perform matrix operations\n if isinstance(X, list):\n X = np.array(X)\n \n # Clean readily fitted models to ensure correct results.\n self._clean(verbose=verbose)\n # Pre-processing\n X, row_labels, col_labels, scaler = self._preprocessing(X, row_labels, col_labels, verbose=verbose)\n\n if self.n_components<1:\n if verbose>=3: print('[pca] >The PCA reduction is performed to capture [%.1f%%] explained variance using the [%.d] columns of the input data.' %(self.n_components * 100, X.shape[1]))\n pcp = self.n_components\n # Run with all components to get all PCs back. This is needed for the step after.\n _, _, _, percentExplVar = _explainedvar(X, n_components=None, onehot=self.onehot, random_state=self.random_state, verbose=verbose)\n # Take number of components with minimal [n_components] explained variance\n if percentExplVar is None:\n self.n_components = X.shape[1] - 1\n if verbose>=3: print('[pca] >n_components is set to %d' %(self.n_components))\n else:\n self.n_components = np.min(np.where(percentExplVar >= self.n_components)[0]) + 1\n if verbose>=3: print('[pca] >Number of components is [%d] that covers the [%.2f%%] explained variance.' %(self.n_components, pcp * 100))\n\n if verbose>=3: print('[pca] >The PCA reduction is performed on the [%.d] columns of the input dataframe.' %(X.shape[1]))\n model_pca, PC, loadings, percentExplVar = _explainedvar(X, n_components=self.n_components, onehot=self.onehot, random_state=self.random_state, verbose=verbose)\n pcp = percentExplVar[np.minimum(len(percentExplVar) - 1, self.n_components)]\n\n # Combine components relations with features\n loadings = self._postprocessing(model_pca, loadings, col_labels, self.n_components, verbose=verbose)\n # Top scoring n_components\n topfeat = self.compute_topfeat(loadings=loadings, verbose=verbose)\n # Detection of outliers\n outliers, outliers_params = self.compute_outliers(PC, verbose=verbose)\n # Store\n self.results = _store(PC, loadings, percentExplVar, model_pca, self.n_components, pcp, col_labels, row_labels, topfeat, outliers, scaler, outliers_params)\n # Return\n return(self.results)\n\n def _clean(self, verbose=3):\n # Clean readily fitted models to ensure correct results.\n if hasattr(self, 'results'):\n if verbose>=3: print('[pca] >Cleaning previous fitted model results..')\n if hasattr(self, 'results'): del self.results\n\n # Outlier detection\n def compute_outliers(self, PC, n_std=2, verbose=3):\n \"\"\"Compute outliers.\n\n Parameters\n ----------\n PC : Array-like\n Principal Components.\n n_std : int, (default: 2)\n Standard deviation. The default is 2.\n Verbose : int (default : 3)\n Print to screen. 0: None, 1: Error, 2: Warning, 3: Info, 4: Debug, 5: Trace\n\n Returns\n -------\n outliers : numpy array\n Array containing outliers.\n outliers_params: dictionary, (default: None)\n Contains parameters for hotellingsT2() and spe_dmodx(), reusable in the future.\n \"\"\"\n # Convert to numpy array if required\n if isinstance(PC, pd.DataFrame): PC = np.array(PC)\n # Initialize\n outliersHT2, outliersELIPS = pd.DataFrame(), pd.DataFrame()\n if hasattr(self, 'results'):\n paramT2 = self.results['outliers_params'].get('paramT2', None)\n paramSPE = self.results['outliers_params'].get('paramSPE', None)\n else:\n paramT2, paramSPE = None, None\n\n if np.any(np.isin(self.detect_outliers, 'ht2')):\n # Detection of outliers using hotelling T2 test.\n if (paramT2 is not None) and (verbose>=3): print('[pca] >compute hotellingsT2 with precomputed parameter.')\n outliersHT2, _, paramT2 = hotellingsT2(PC, alpha=self.alpha, df=1, n_components=self.n_components, param=paramT2, verbose=verbose)\n if np.any(np.isin(self.detect_outliers, 'spe')):\n # Detection of outliers using elipse method.\n if (paramSPE is not None) and (verbose>=3): print('[pca] >compute SPE with precomputed parameter.')\n outliersELIPS, _, paramSPE = spe_dmodx(PC, n_std=self.n_std, param=paramSPE, verbose=verbose)\n # Combine\n outliers = pd.concat([outliersHT2, outliersELIPS], axis=1)\n outliers_params = {'paramT2': paramT2, 'paramSPE': paramSPE}\n return outliers, outliers_params\n\n # Post processing.\n def _postprocessing(self, model_pca, loadings, col_labels, n_components, verbose=3):\n PCzip = list(zip(['PC'] * model_pca.components_.shape[0], np.arange(1, model_pca.components_.shape[0] + 1).astype(str)))\n PCnames = list(map(lambda x: ''.join(x), PCzip))\n loadings = pd.DataFrame(loadings, columns=col_labels, index=PCnames)\n # Return\n return(loadings)\n\n # Top scoring components\n def compute_topfeat(self, loadings=None, verbose=3):\n \"\"\"Compute the top-scoring features.\n\n Description\n -----------\n Per Principal Component, the feature with absolute maximum loading is stored.\n This can result into the detection of PCs that contain the same features. The feature that were never detected are stored as \"weak\".\n\n Parameters\n ----------\n loadings : array-like\n The array containing the loading information of the Principal Components.\n Verbose : int (default : 3)\n Print to screen. 0: None, 1: Error, 2: Warning, 3: Info, 4: Debug, 5: Trace\n\n Returns\n -------\n topfeat : pd.DataFrame\n Best performing features per PC.\n\n \"\"\"\n if (loadings is None):\n try:\n # Get feature names\n initial_feature_names = self.results['loadings'].columns.values\n loadings = self.results['loadings'].values.copy()\n except:\n raise Exception('[pca] >Error: loadings is not defined. Tip: run fit_transform() or provide the loadings yourself as input argument.')\n\n if isinstance(loadings, pd.DataFrame):\n initial_feature_names = loadings.columns.values\n loadings = loadings.values\n\n # number of components\n n_pcs = loadings.shape[0]\n # get the index of the most important feature on EACH component\n idx = [np.abs(loadings[i]).argmax() for i in range(n_pcs)]\n # The the loadings\n loading_best = loadings[np.arange(0, n_pcs), idx]\n # get the names\n most_important_names = [initial_feature_names[idx[i]] for i in range(len(idx))]\n # Make dict with most important features\n dic = {'PC{}'.format(i + 1): most_important_names[i] for i in range(len(most_important_names))}\n # Collect the features that were never discovered. The weak features.\n idxcol = np.setdiff1d(range(loadings.shape[1]), idx)\n # get the names\n least_important_names = [initial_feature_names[idxcol[i]] for i in range(len(idxcol))]\n # Find the strongest loading across the PCs for the least important ones\n idxrow = [np.abs(loadings[:, i]).argmax() for i in idxcol]\n loading_weak = loadings[idxrow, idxcol]\n # Make dict with most important features\n # dic_weak = {'weak'.format(i+1): least_important_names[i] for i in range(len(least_important_names))}\n PC_weak = ['PC{}'.format(i + 1) for i in idxrow]\n\n # build the dataframe\n topfeat = pd.DataFrame(dic.items(), columns=['PC', 'feature'])\n topfeat['loading'] = loading_best\n topfeat['type'] = 'best'\n # Weak features\n weakfeat = pd.DataFrame({'PC': PC_weak, 'feature': least_important_names, 'loading': loading_weak, 'type': 'weak'})\n\n # Combine features\n df = pd.concat([topfeat, weakfeat])\n df.reset_index(drop=True, inplace=True)\n # Return\n return df\n\n # Check input values\n def _preprocessing(self, X, row_labels, col_labels, scaler=None, verbose=3):\n if self.n_components is None:\n self.n_components = X.shape[1] - 1\n if verbose>=3: print('[pca] >n_components is set to %d' %(self.n_components))\n\n self.n_feat = np.min([self.n_feat, X.shape[1]])\n\n if (not self.onehot) and (not self.normalize) and isinstance(X, pd.DataFrame) and (str(X.values.dtype)=='bool'):\n if verbose>=2: print('[pca] >Warning: Sparse or one-hot boolean input data is detected, it is highly recommended to set onehot=True or alternatively, normalize=True')\n\n # if sp.issparse(X):\n # if verbose>=1: print('[PCA] Error: A sparse matrix was passed, but dense data is required for method=barnes_hut. Use X.toarray() to convert to a dense numpy array if the array is small enough for it to fit in memory.')\n if isinstance(X, pd.DataFrame):\n if verbose>=3: print('[pca] >Processing dataframe..')\n col_labels = X.columns.values\n row_labels = X.index.values\n X = X.values\n if sp.issparse(X) and self.normalize:\n if verbose>=3: print('[pca] >Can not normalize a sparse matrix. Normalize is set to [False]')\n self.normalize=False\n if col_labels is None or len(col_labels)==0 or len(col_labels)!=X.shape[1]:\n if verbose>=3: print('[pca] >Column labels are auto-completed.')\n col_labels = np.arange(1, X.shape[1] + 1).astype(str)\n if row_labels is None or len(row_labels)!=X.shape[0]:\n row_labels=np.ones(X.shape[0])\n if verbose>=3: print('[pca] >Row labels are auto-completed.')\n if isinstance(row_labels, list):\n row_labels=np.array(row_labels)\n if isinstance(col_labels, list):\n col_labels=np.array(col_labels)\n if (sp.issparse(X) is False) and (self.n_components > X.shape[1]):\n # raise Exception('[pca] >Number of components can not be more then number of features.')\n if verbose>=2: print('[pca] >Warning: >Number of components can not be more then number of features. n_components is set to %d' %(X.shape[1] - 1))\n self.n_components = X.shape[1] - 1\n\n # normalize data\n if self.normalize:\n if verbose>=3: print('[pca] >Normalizing input data per feature (zero mean and unit variance)..')\n # Plot the data distribution\n # fig,(ax1,ax2)=plt.subplots(1,2, figsize=(15,5))\n # ax1.hist(X.ravel().astype(float), bins=50)\n # ax1.set_ylabel('frequency')\n # ax1.set_xlabel('Values')\n # ax1.set_title('RAW')\n # ax1.grid(True)\n\n # X = preprocessing.scale(X, with_mean=True, with_std=True, axis=0)\n\n # IF the scaler is not yet fitted, make scaler object.\n if scaler is None:\n scaler = StandardScaler(with_mean=True, with_std=True).fit(X)\n X = scaler.transform(X)\n\n # Plot the data distribution\n # ax2.hist(X.ravel().astype(float), bins=50)\n # ax2.set_ylabel('frequency')\n # ax2.set_xlabel('Values')\n # ax2.set_title('Zero-mean with unit variance normalized')\n # ax2.grid(True)\n\n return(X, row_labels, col_labels, scaler)\n\n # Figure pre processing\n def _fig_preprocessing(self, y, n_feat, d3):\n if hasattr(self, 'PC'): raise Exception('[pca] >Error: Principal components are not derived yet. Tip: run fit_transform() first.')\n if self.results['PC'].shape[1]<1: raise Exception('[pca] >Requires at least 1 PC to make plot.')\n\n if (n_feat is not None):\n topfeat = self.compute_topfeat()\n # n_feat = np.maximum(np.minimum(n_feat, self.results['loadings'].shape[0]), 2)\n else:\n topfeat = self.results['topfeat']\n n_feat = self.n_feat\n\n if d3:\n n_feat = np.maximum(np.minimum(n_feat, self.results['loadings'].shape[1]), 3)\n else:\n n_feat = np.maximum(np.minimum(n_feat, self.results['loadings'].shape[1]), 2)\n\n if (y is not None):\n if len(y)!=self.results['PC'].shape[0]: raise Exception('[pca] >Error: Input variable [y] should have some length as the number input samples: [%d].' %(self.results['PC'].shape[0]))\n y = y.astype(str)\n else:\n y = self.results['PC'].index.values.astype(str)\n\n if len(self.results['explained_var'])<=1:\n raise Exception('[pca] >Error: No PCs are found with explained variance..')\n\n return y, topfeat, n_feat\n\n # Scatter plot\n def scatter3d(self, y=None, label=True, legend=True, PC=[0, 1, 2], SPE=False, hotellingt2=False, cmap='Set1', visible=True, figsize=(10, 8), \n alpha_transparency=None):\n \"\"\"Scatter 3d plot.\n\n Parameters\n ----------\n y : array-like, default: None\n Label for each sample. The labeling is used for coloring the samples.\n PC : list, default : [0,1,2]\n Plot the first three Principal Components. Note that counting starts from 0. PC1=0, PC2=1, PC3=2, etc\n label : Bool, default: True\n Show the labels.\n legend : Bool, default: True\n Show the legend based on the unique y-labels.\n SPE : Bool, default: False\n Show the outliers based on SPE/DmodX method.\n hotellingt2 : Bool, default: False\n Show the outliers based on the hotelling T2 test.\n cmap : String, optional, default: 'Set1'\n Colormap. If set to None, no points are shown.\n visible : Bool, default: True\n Visible status of the Figure. When False, figure is created on the background.\n figsize : (int, int), optional, default: (10,8)\n (width, height) in inches.\n alpha_transparency : Float, default: None\n The alpha blending value, between 0 (transparent) and 1 (opaque).\n\n Returns\n -------\n tuple containing (fig, ax)\n\n \"\"\"\n if self.results['PC'].shape[1]>=3:\n fig, ax = self.scatter(y=y, d3=True, label=label, legend=legend, PC=PC, SPE=SPE, hotellingt2=hotellingt2, cmap=cmap, visible=visible, figsize=figsize, \n alpha_transparency=alpha_transparency)\n else:\n print('[pca] >Error: There are not enough PCs to make a 3d-plot.')\n fig, ax = None, None\n return fig, ax\n\n # Scatter plot\n def scatter(self, y=None, d3=False, label=True, legend=True, PC=[0, 1], SPE=False, hotellingt2=False, cmap='Set1', visible=True, figsize=(10, 8), \n alpha_transparency=None):\n \"\"\"Scatter 2d plot.\n\n Parameters\n ----------\n y : array-like, default: None\n Label for each sample. The labeling is used for coloring the samples.\n d3 : Bool, default: False\n 3d plot is created when True.\n PC : list, default : [0,1]\n Plot the first two Principal Components. Note that counting starts from 0. PC1=0, PC2=1, PC3=2, etc\n legend : Bool, default: True\n Show the legend based on the unique y-labels.\n label : Bool, default: True\n Show the labels.\n SPE : Bool, default: False\n Show the outliers based on SPE/DmodX method.\n hotellingt2 : Bool, default: False\n Show the outliers based on the hotelling T2 test.\n cmap : String, optional, default: 'Set1'\n Colormap. If set to None, no points are shown.\n visible : Bool, default: True\n Visible status of the Figure. When False, figure is created on the background.\n figsize : (int, int), optional, default: (10,8)\n (width, height) in inches.\n alpha_transparency : Float, default: None\n The alpha blending value, between 0 (transparent) and 1 (opaque).\n\n Returns\n -------\n tuple containing (fig, ax)\n\n \"\"\"\n fig, ax = plt.subplots(figsize=figsize, edgecolor='k')\n fig.set_visible(visible)\n\n Ioutlier1 = np.repeat(False, self.results['PC'].shape[0])\n Ioutlier2 = np.repeat(False, self.results['PC'].shape[0])\n\n if y is None:\n y, _, _ = self._fig_preprocessing(y, None, d3)\n\n # Get coordinates\n xs, ys, zs, ax = _get_coordinates(self.results['PC'], PC, fig, ax, d3)\n\n # Plot outliers for hotelling T2 test.\n if hotellingt2 and ('y_bool' in self.results['outliers'].columns):\n Ioutlier1 = self.results['outliers']['y_bool'].values\n if d3:\n ax.scatter(xs[Ioutlier1], ys[Ioutlier1], zs[Ioutlier1], marker='x', color=[0, 0, 0], s=26, label='outliers (hotelling t2)', \n alpha=alpha_transparency)\n else:\n ax.scatter(xs[Ioutlier1], ys[Ioutlier1], marker='x', color=[0, 0, 0], s=26, label='outliers (hotelling t2)',\n alpha=alpha_transparency)\n\n # Plot outliers for hotelling T2 test.\n if SPE and ('y_bool_spe' in self.results['outliers'].columns):\n Ioutlier2 = self.results['outliers']['y_bool_spe'].values\n if d3:\n ax.scatter(xs[Ioutlier2], ys[Ioutlier2], zs[Ioutlier2], marker='d', color=[0.5, 0.5, 0.5], s=26, label='outliers (SPE/DmodX)',\n alpha=alpha_transparency)\n else:\n ax.scatter(xs[Ioutlier2], ys[Ioutlier2], marker='d', color=[0.5, 0.5, 0.5], s=26, label='outliers (SPE/DmodX)',\n alpha=alpha_transparency)\n # Plot the ellipse\n g_ellipse = spe_dmodx(np.c_[xs, ys], n_std=self.n_std, color='green', calpha=0.3, verbose=0)[1]\n if g_ellipse is not None: ax.add_artist(g_ellipse)\n\n # Make scatter plot of all not-outliers\n Inormal = ~np.logical_or(Ioutlier1, Ioutlier2)\n uiy = np.unique(y)\n\n # Get the colors\n if cmap is None:\n getcolors = np.repeat([1, 1, 1], len(uiy), axis=0).reshape(-1, 3)\n else:\n getcolors = np.array(colourmap.generate(len(uiy), cmap=cmap))\n\n for i, yk in enumerate(uiy):\n Iloc_label = (yk==y)\n Iloc_sampl = np.logical_and(Iloc_label, Inormal)\n if d3:\n ax.scatter(xs[Iloc_sampl], ys[Iloc_sampl], zs[Iloc_sampl], color=getcolors[i, :], s=25, label=yk,\n alpha=alpha_transparency)\n # if label: ax.text(xs[Iloc_label], ys[Iloc_label], zs[Iloc_label], yk, color=getcolors[i,:], ha='center', va='center')\n else:\n ax.scatter(xs[Iloc_sampl], ys[Iloc_sampl], color=getcolors[i, :], s=25, label=yk,\n alpha=alpha_transparency)\n if label: ax.annotate(yk, (np.mean(xs[Iloc_label]), np.mean(ys[Iloc_label])))\n\n # Set y\n ax.set_xlabel('PC' + str(PC[0] + 1) + ' (' + str(self.results['model'].explained_variance_ratio_[PC[0]] * 100)[0:4] + '% expl.var)')\n if len(self.results['model'].explained_variance_ratio_)>=2:\n ax.set_ylabel('PC' + str(PC[1] + 1) + ' (' + str(self.results['model'].explained_variance_ratio_[PC[1]] * 100)[0:4] + '% expl.var)')\n else:\n ax.set_ylabel('PC2 (0% expl.var)')\n if d3 and (len(self.results['model'].explained_variance_ratio_)>=3):\n ax.set_zlabel('PC' + str(PC[2] + 1) + ' (' + str(self.results['model'].explained_variance_ratio_[PC[2]] * 100)[0:4] + '% expl.var)')\n ax.set_title(str(self.n_components) + ' Principal Components explain [' + str(self.results['pcp'] * 100)[0:5] + '%] of the variance')\n if legend: ax.legend()\n ax.grid(True)\n # Return\n return (fig, ax)\n\n def biplot(self, y=None, n_feat=None, d3=False, label=True, legend=True, SPE=False, hotellingt2=False, cmap='Set1', figsize=(10, 8), visible=True, verbose=3, \n alpha_transparency=None):\n \"\"\"Create the Biplot.\n\n Description\n -----------\n Plots the PC1 vs PC2 (vs PC3) with the samples, and the best performing features.\n Per PC, The feature with absolute highest loading is gathered. This can result into features that are seen over multiple PCs, and some features may never be detected.\n For vizualization purposes we will keep only the unique feature-names and plot them with red arrows and green labels.\n The feature-names that were never discovered (described as weak) are colored yellow.\n\n Parameters\n ----------\n y : array-like, default: None\n Label for each sample. The labeling is used for coloring the samples.\n n_feat : int, default: 10\n Number of features that explain the space the most, dervied from the loadings. This parameter is used for vizualization purposes only.\n d3 : Bool, default: False\n 3d plot is created when True.\n label : Bool, default: True\n Show the labels.\n legend : Bool, default: True\n Show the legend based on the unique y-labels.\n SPE : Bool, default: False\n Show the outliers based on SPE/DmodX method.\n hotellingt2 : Bool, default: False\n Show the outliers based on the hotelling T2 test.\n cmap : String, optional, default: 'Set1'\n Colormap. If set to None, no points are shown.\n figsize : (int, int), optional, default: (10,8)\n (width, height) in inches.\n visible : Bool, default: True\n Visible status of the Figure. When False, figure is created on the background.\n Verbose : int (default : 3)\n Print to screen. 0: None, 1: Error, 2: Warning, 3: Info, 4: Debug, 5: Trace\n alpha_transparency : Float, default: None\n The alpha blending value, between 0 (transparent) and 1 (opaque).\n\n Returns\n -------\n tuple containing (fig, ax)\n\n References\n ----------\n * https://stackoverflow.com/questions/50796024/feature-variable-importance-after-a-pca-analysis/50845697#50845697\n * https://towardsdatascience.com/pca-clearly-explained-how-when-why-to-use-it-and-feature-importance-a-guide-in-python-7c274582c37e\n\n \"\"\"\n if self.results['PC'].shape[1]<2:\n print('[pca] >Requires 2 PCs to make 2d plot.')\n return None, None\n\n # Pre-processing\n y, topfeat, n_feat = self._fig_preprocessing(y, n_feat, d3)\n # coeff = self.results['loadings'][topfeat['feature'].values].iloc[0:n_feat,:]\n coeff = self.results['loadings'].iloc[0:n_feat, :]\n # Use the PCs only for scaling purposes\n mean_x = np.mean(self.results['PC'].iloc[:, 0].values)\n mean_y = np.mean(self.results['PC'].iloc[:, 1].values)\n\n # Plot and scale values for arrows and text\n # Take the absolute minimum range of the x-axis and y-axis\n # max_axis = np.min(np.abs(self.results['PC'].iloc[:,0:2]).max())\n max_axis = np.max(np.abs(self.results['PC'].iloc[:, 0:2]).min(axis=1))\n max_arrow = np.abs(coeff).max().max()\n scale = (np.max([1, np.round(max_axis / max_arrow, 2)])) * 0.93\n\n # Include additional parameters if 3d-plot is desired.\n if d3:\n if self.results['PC'].shape[1]<3:\n if verbose>=2: print('[pca] >Warning: requires 3 PCs to make 3d plot.')\n return None, None\n mean_z = np.mean(self.results['PC'].iloc[:, 2].values)\n # zs = self.results['PC'].iloc[:,2].values\n fig, ax = self.scatter3d(y=y, label=label, legend=legend, SPE=SPE, hotellingt2=hotellingt2, cmap=cmap, visible=visible, figsize=figsize,\n alpha_transparency=alpha_transparency)\n else:\n fig, ax = self.scatter(y=y, label=label, legend=legend, SPE=SPE, hotellingt2=hotellingt2, cmap=cmap, visible=visible, figsize=figsize,\n alpha_transparency=alpha_transparency)\n\n # For vizualization purposes we will keep only the unique feature-names\n topfeat = topfeat.drop_duplicates(subset=['feature'])\n if topfeat.shape[0]<n_feat:\n n_feat = topfeat.shape[0]\n if verbose>=2: print('[pca] >Warning: n_feat can not be reached because of the limitation of n_components (=%d). n_feat is reduced to %d.' %(self.n_components, n_feat))\n\n # Plot arrows and text\n for i in range(0, n_feat):\n getfeat = topfeat['feature'].iloc[i]\n label = getfeat + ' (' + ('%.2f' %topfeat['loading'].iloc[i]) + ')'\n getcoef = coeff[getfeat].values\n # Set PC1 vs PC2 direction. Note that these are not neccarily the best loading.\n xarrow = getcoef[0] * scale # PC1 direction (aka the x-axis)\n yarrow = getcoef[1] * scale # PC2 direction (aka the y-axis)\n txtcolor = 'y' if topfeat['type'].iloc[i] == 'weak' else 'g'\n\n if d3:\n # zarrow = getcoef[np.minimum(2,len(getcoef))] * scale\n zarrow = getcoef[2] * scale\n ax.quiver(mean_x, mean_y, mean_z, xarrow - mean_x, yarrow - mean_y, zarrow - mean_z, color='red', alpha=0.8, lw=2)\n ax.text(xarrow * 1.11, yarrow * 1.11, zarrow * 1.11, label, color=txtcolor, ha='center', va='center')\n else:\n ax.arrow(mean_x, mean_y, xarrow - mean_x, yarrow - mean_y, color='r', width=0.005, head_width=0.01 * scale, alpha=0.8)\n ax.text(xarrow * 1.11, yarrow * 1.11, label, color=txtcolor, ha='center', va='center')\n\n if visible: plt.show()\n return(fig, ax)\n\n def biplot3d(self, y=None, n_feat=None, label=True, legend=True, SPE=False, hotellingt2=False, cmap='Set1', visible=True, figsize=(10, 8),\n alpha_transparency=1):\n \"\"\"Make biplot in 3d.\n\n Parameters\n ----------\n y : array-like, default: None\n Label for each sample. The labeling is used for coloring the samples.\n n_feat : int, default: 10\n Number of features that explain the space the most, dervied from the loadings. This parameter is used for vizualization purposes only.\n label : Bool, default: True\n Show the labels.\n legend : Bool, default: True\n Show the legend based on the unique y-labels.\n SPE : Bool, default: False\n Show the outliers based on SPE/DmodX method.\n hotellingt2 : Bool, default: False\n Show the outliers based on the hotelling T2 test.\n visible : Bool, default: True\n Visible status of the Figure. When False, figure is created on the background.\n figsize : (int, int), optional, default: (10,8)\n (width, height) in inches.\n alpha_transparency : Float, default: None\n The alpha blending value, between 0 (transparent) and 1 (opaque).\n\n Returns\n -------\n tuple containing (fig, ax)\n\n \"\"\"\n if self.results['PC'].shape[1]<3:\n print('[pca] >Requires 3 PCs to make 3d plot. Try to use biplot() instead.')\n return None, None\n\n fig, ax = self.biplot(y=y, n_feat=n_feat, d3=True, label=label, legend=legend, SPE=SPE, cmap=cmap, hotellingt2=hotellingt2, visible=visible, figsize=figsize, alpha_transparency=alpha_transparency)\n\n return(fig, ax)\n\n # Show explained variance plot\n def plot(self, n_components=None, figsize=(10, 8), xsteps=None, visible=True):\n \"\"\"Make plot.\n\n Parameters\n ----------\n model : dict\n model created by the fit() function.\n visible : Bool, default: True\n Visible status of the Figure. When False, figure is created on the background.\n figsize : (float, float), optional, default: None\n (width, height) in inches. If not provided, defaults to rcParams[\"figure.figsize\"] = (10,8)\n\n Returns\n -------\n tuple containing (fig, ax)\n\n \"\"\"\n if n_components is not None:\n explvarCum = self.results['explained_var'][0:n_components]\n explvar = self.results['model'].explained_variance_ratio_[0:n_components]\n else:\n explvarCum = self.results['explained_var']\n explvar = self.results['model'].explained_variance_ratio_\n xtick_idx = np.arange(1, len(explvar) + 1)\n\n # Make figure\n fig, ax = plt.subplots(figsize=figsize, edgecolor='k')\n fig.set_visible(visible)\n plt.plot(xtick_idx, explvarCum, 'o-', color='k', linewidth=1, label='Cumulative explained variance')\n\n # Set xticks if less then 100 datapoints\n if len(explvar)<100:\n ax.set_xticks(xtick_idx)\n xticklabel=xtick_idx.astype(str)\n if xsteps is not None:\n xticklabel[np.arange(1, len(xticklabel), xsteps)] = ''\n ax.set_xticklabels(xticklabel, rotation=90, ha='left', va='top')\n\n plt.ylabel('Percentage explained variance')\n plt.xlabel('Principle Component')\n plt.ylim([0, 1.05])\n plt.xlim([0, len(explvar) + 1])\n titletxt = 'Cumulative explained variance\\n ' + str(self.n_components) + ' Principal Components explain [' + str(self.results['pcp'] * 100)[0:5] + '%] of the variance.'\n plt.title(titletxt)\n plt.grid(True)\n\n # Plot vertical line To stress the cut-off point\n ax.axvline(self.n_components, linewidth=0.8, color='r')\n ax.axhline(y=self.results['pcp'], xmin=0, xmax=1, linewidth=0.8, color='r')\n if len(xtick_idx)<100:\n plt.bar(xtick_idx, explvar, color='#3182bd', alpha=0.8, label='Explained variance')\n\n if visible:\n plt.show()\n plt.draw()\n # Return\n return(fig, ax)\n\n # Top scoring components\n def norm(self, X, n_components=None, pcexclude=[1]):\n \"\"\"Normalize out PCs.\n\n Description\n -----------\n Normalize your data using the principal components.\n As an example, suppose there is (technical) variation in the fist\n component and you want that out. This function transforms the data using\n the components that you want, e.g., starting from the 2nd pc, up to the\n pc that contains at least 95% of the explained variance\n\n Parameters\n ----------\n X : numpy array\n Data set.\n n_components : float [0..1], optional\n Number of PCs to keep based on the explained variance. The default is 1 (keeping all)\n pcexclude : list of int, optional\n The PCs to exclude. The default is [1].\n\n Returns\n -------\n Normalized numpy array.\n\n \"\"\"\n if n_components is None:\n self.n_components = X.shape[1]\n else:\n self.n_components = n_components\n\n if not isinstance(pcexclude, list): pcexclude=[pcexclude]\n\n # Fit using PCA\n _ = self.fit_transform(X)\n coeff = self.results['loadings'].values\n score = self.results['PC']\n # Compute explained percentage of variance\n q = self.results['explained_var']\n ndims = np.where(q<=self.n_components)[0]\n ndims = (np.setdiff1d(ndims + 1, pcexclude)) - 1\n # Transform data\n out = np.repeat(np.mean(X.values, axis=1).reshape(-1, 1), X.shape[1], axis=1) + np.dot(score.values[:, ndims], coeff[:, ndims].T)\n # Return\n return(out)\n\n # Import example\n def import_example(self, data='titanic', verbose=3):\n \"\"\"Import example dataset from github source.\n\n Parameters\n ----------\n data : str, optional\n Name of the dataset 'sprinkler' or 'titanic' or 'student'.\n verbose : int, optional\n Print message to screen. The default is 3.\n\n Returns\n -------\n pd.DataFrame()\n Dataset containing mixed features.\n\n \"\"\"\n return import_example(data=data, verbose=verbose)\n\n\n# %%\ndef _get_coordinates(PCs, PC, fig, ax, d3):\n xs = PCs.iloc[:, PC[0]].values\n ys = np.zeros(len(xs))\n zs = None\n\n # Get y-axis\n if PCs.shape[1]>1:\n ys = PCs.iloc[:, PC[1]].values\n\n # Get Z-axis\n if d3:\n zs = PCs.iloc[:, PC[2]].values\n ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)\n\n return xs, ys, zs, ax\n\n\n# %%\ndef _eigsorted(cov, n_std):\n vals, vecs = np.linalg.eigh(cov)\n # vecs = vecs * np.sqrt(scipy.stats.chi2.ppf(0.95, n_std))\n order = vals.argsort()[::-1]\n return vals[order], vecs[:, order]\n\n\ndef spe_dmodx(X, n_std=2, param=None, calpha=0.3, color='green', showfig=False, verbose=3):\n \"\"\"Compute SPE/distance to model (DmodX).\n\n Description\n -----------\n Outlier can be detected using SPE/DmodX (distance to model) based on the mean and covariance of the first 2 dimensions of X.\n On the model plane (SPE ≈ 0). Note that the SPE or Hotelling’s T2 are complementary to each other.\n\n Parameters\n ----------\n X : Array-like\n Input data, in this case the Principal components.\n n_std : int, (default: 2)\n Standard deviation. The default is 2.\n param : 2-element tuple (default: None)\n Pre-computed g_ell_center and cov in the past run. None to compute from scratch with X. \n calpha : float, (default: 0.3)\n transperancy color.\n color : String, (default: 'green')\n Color of the ellipse.\n showfig : bool, (default: False)\n Scatter the points with the ellipse and mark the outliers.\n\n Returns\n -------\n outliers : pd.DataFrame()\n column with boolean outliers and euclidean distance of each sample to the center of the ellipse.\n ax : object\n Figure axis.\n param : 2-element tuple\n computed g_ell_center and cov from X.\n \"\"\"\n if verbose>=3: print('[pca] >Outlier detection using SPE/DmodX with n_std=[%d]' %(n_std))\n g_ellipse = None\n # The 2x2 covariance matrix to base the ellipse on the location of the center of the ellipse. Expects a 2-element sequence of [x0, y0].\n n_components = np.minimum(2, X.shape[1])\n X = X[:, 0:n_components]\n\n if X.shape[1]>=2:\n # Compute mean and covariance\n if (param is not None):\n g_ell_center, cov = param\n else:\n g_ell_center = X.mean(axis=0)\n cov = np.cov(X, rowvar=False)\n param = g_ell_center, cov\n\n # Width and height are \"full\" widths, not radius\n vals, vecs = _eigsorted(cov, n_std)\n angle = np.degrees(np.arctan2(*vecs[:, 0][::-1]))\n width, height = 2 * n_std * np.sqrt(vals)\n # Compute angles of ellipse\n cos_angle = np.cos(np.radians(180. - angle))\n sin_angle = np.sin(np.radians(180. - angle))\n # Determine the elipse range\n xc = X[:, 0] - g_ell_center[0]\n yc = X[:, 1] - g_ell_center[1]\n xct = xc * cos_angle - yc * sin_angle\n yct = xc * sin_angle + yc * cos_angle\n rad_cc = (xct**2 / (width / 2.)**2) + (yct**2 / (height / 2.)**2)\n\n # Mark the samples outside the ellipse\n outliers = rad_cc>1\n\n # Plot the raw points.\n g_ellipse = Ellipse(xy=g_ell_center, width=width, height=height, angle=angle, color=color, alpha=calpha)\n y_score = list(map(lambda x: euclidean_distances([g_ell_center], x.reshape(1, -1))[0][0], X))\n\n if showfig:\n ax = plt.gca()\n ax.add_artist(g_ellipse)\n ax.scatter(X[~outliers, 0], X[~outliers, 1], c='black', linewidths=0.3, label='normal')\n ax.scatter(X[outliers, 0], X[outliers, 1], c='red', linewidths=0.3, label='outlier')\n ax.legend()\n else:\n outliers = np.repeat(False, X.shape[1])\n y_score = np.repeat(None, X.shape[1])\n\n # Store in dataframe\n out = pd.DataFrame(data={'y_bool_spe': outliers, 'y_score_spe': y_score})\n return out, g_ellipse, param\n\n\n# %% Outlier detection\ndef hotellingsT2(X, alpha=0.05, df=1, n_components=5, param=None, verbose=3):\n \"\"\"Test for outlier using hotelling T2 test.\n\n Description\n -----------\n Test for outliers using chi-square tests for each of the n_components.\n The resulting P-value matrix is then combined using fishers method per sample.\n The results can be used to priortize outliers as those samples that are an outlier\n across multiple dimensions will be more significant then others.\n\n Parameters\n ----------\n X : numpy-array.\n Principal Components.\n alpha : float, (default: 0.05)\n Alpha level threshold to determine outliers.\n df : int, (default: 1)\n Degrees of freedom.\n n_components : int, (default: 5)\n Number of PC components to be used to compute the Pvalue.\n param : 2-element tuple (default: None)\n Pre-computed mean and variance in the past run. None to compute from scratch with X. \n Verbose : int (default : 3)\n Print to screen. 0: None, 1: Error, 2: Warning, 3: Info, 4: Debug, 5: Trace\n\n Returns\n -------\n outliers : pd.DataFrame\n dataframe containing probability, test-statistics and boolean value.\n y_bools : array-like\n boolean value when significant per PC.\n param : 2-element tuple\n computed mean and variance from X.\n \"\"\"\n n_components = np.minimum(n_components, X.shape[1])\n X = X[:, 0:n_components]\n y = X\n\n if (param is not None):\n mean, var = param\n else:\n mean, var = np.mean(X), np.var(X)\n param = (mean, var)\n if verbose>=3: print('[pca] >Outlier detection using Hotelling T2 test with alpha=[%.2f] and n_components=[%d]' %(alpha, n_components))\n y_score = (y - mean) ** 2 / var\n # Compute probability per PC whether datapoints are outside the boundary\n y_proba = 1 - stats.chi2.cdf(y_score, df=df)\n # Set probabilities at a very small value when 0. This is required for the Fishers method. Otherwise inf values will occur.\n y_proba[y_proba==0]=1e-300\n\n # Compute the anomaly threshold\n anomaly_score_threshold = stats.chi2.ppf(q=(1 - alpha), df=df)\n # Determine for each samples and per principal component the outliers\n y_bools = y_score >= anomaly_score_threshold\n\n # Combine Pvalues across the components\n Pcomb = []\n # weights = np.arange(0, 1, (1/n_components) )[::-1] + (1/n_components)\n for i in range(0, y_proba.shape[0]):\n # Pcomb.append(stats.combine_pvalues(y_proba[i, :], method='stouffer', weights=weights))\n Pcomb.append(stats.combine_pvalues(y_proba[i, :], method='fisher'))\n\n Pcomb = np.array(Pcomb)\n outliers = pd.DataFrame(data={'y_proba':Pcomb[:, 1], 'y_score': Pcomb[:, 0], 'y_bool': Pcomb[:, 1] <= alpha})\n # Return\n return outliers, y_bools, param\n\n\n# %% Explained variance\ndef _explainedvar(X, n_components=None, onehot=False, random_state=None, n_jobs=-1, verbose=3):\n # Create the model\n if sp.issparse(X):\n if verbose>=3: print('[pca] >Fitting using Truncated SVD..')\n model = TruncatedSVD(n_components=n_components, random_state=random_state)\n elif onehot:\n if verbose>=3: print('[pca] >Fitting using Sparse PCA..')\n model = SparsePCA(n_components=n_components, random_state=random_state, n_jobs=n_jobs)\n else:\n if verbose>=3: print('[pca] >Fitting using PCA..')\n model = PCA(n_components=n_components, random_state=random_state)\n\n # Fit model\n model.fit(X)\n # Do the reduction\n if verbose>=3: print('[pca] >Computing loadings and PCs..')\n loadings = model.components_ # Ook wel de coeeficienten genoemd: coefs!\n PC = model.transform(X)\n if not onehot:\n # Compute explained variance, top 95% variance\n if verbose>=3: print('[pca] >Computing explained variance..')\n percentExplVar = model.explained_variance_ratio_.cumsum()\n else:\n percentExplVar = None\n # Return\n return(model, PC, loadings, percentExplVar)\n\n\n# %% Store results\ndef _store(PC, loadings, percentExplVar, model_pca, n_components, pcp, col_labels, row_labels, topfeat, outliers, scaler, outliers_params):\n\n if not outliers.empty: outliers.index = row_labels\n out = {}\n out['loadings'] = loadings\n out['PC'] = pd.DataFrame(data=PC[:, 0:n_components], index=row_labels, columns=loadings.index.values[0:n_components])\n out['explained_var'] = percentExplVar\n out['model'] = model_pca\n out['scaler'] = scaler\n out['pcp'] = pcp\n out['topfeat'] = topfeat\n out['outliers'] = outliers\n out['outliers_params'] = outliers_params\n return out\n\n\n# %% Import example dataset from github.\ndef import_example(data='titanic', verbose=3):\n \"\"\"Import example dataset from github source.\n\n Parameters\n ----------\n data : str, optional\n Name of the dataset 'sprinkler' or 'titanic' or 'student'.\n verbose : int, optional\n Print message to screen. The default is 3.\n\n Returns\n -------\n pd.DataFrame()\n Dataset containing mixed features.\n\n \"\"\"\n if data=='sprinkler':\n url='https://erdogant.github.io/datasets/sprinkler.zip'\n elif data=='titanic':\n url='https://erdogant.github.io/datasets/titanic_train.zip'\n elif data=='student':\n url='https://erdogant.github.io/datasets/student_train.zip'\n\n curpath = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data')\n PATH_TO_DATA = os.path.join(curpath, wget.filename_from_url(url))\n if not os.path.isdir(curpath):\n os.mkdir(curpath)\n\n # Check file exists.\n if not os.path.isfile(PATH_TO_DATA):\n if verbose>=3: print('[pca] >Downloading example dataset from github source..')\n wget.download(url, curpath)\n\n # Import local dataset\n if verbose>=3: print('[pca] >Import dataset [%s]' %(data))\n df = pd.read_csv(PATH_TO_DATA)\n # Return\n return df\n"
] | [
[
"numpy.ones",
"numpy.logical_or",
"numpy.var",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.plot",
"numpy.cov",
"numpy.logical_and",
"numpy.linalg.eigh",
"matplotlib.pyplot.gca",
"numpy.abs",
"sklearn.decomposition.TruncatedSVD",
"matplotlib.pyplot.title",
"numpy.isin",
"numpy.where",
"numpy.round",
"numpy.unique",
"numpy.mean",
"numpy.minimum",
"matplotlib.pyplot.bar",
"numpy.sqrt",
"matplotlib.pyplot.draw",
"scipy.stats.chi2.ppf",
"pandas.read_csv",
"scipy.stats.combine_pvalues",
"numpy.setdiff1d",
"numpy.repeat",
"matplotlib.pyplot.subplots",
"numpy.arange",
"pandas.concat",
"numpy.min",
"matplotlib.pyplot.ylim",
"sklearn.preprocessing.StandardScaler",
"sklearn.decomposition.PCA",
"numpy.arctan2",
"numpy.radians",
"sklearn.decomposition.SparsePCA",
"scipy.sparse.issparse",
"matplotlib.pyplot.grid",
"matplotlib.patches.Ellipse",
"pandas.DataFrame",
"scipy.stats.chi2.cdf",
"matplotlib.pyplot.show",
"numpy.array",
"numpy.dot",
"matplotlib.pyplot.xlabel"
]
] |
fukatani/Chainer_training | [
"166acb8379cb1fb0c0eef760979d8da4c3a62840"
] | [
"py/Example.py"
] | [
"#-------------------------------------------------------------------------------\n# Name: Example\n# Purpose:\n#\n# Author: rf\n#\n# Created: 11/08/2015\n# Copyright: (c) rf 2015\n# Licence: Apache Licence 2.0\n#-------------------------------------------------------------------------------\n\nimport numpy as np\nimport PreTrainingChain.AbstractChain\nimport chainer.functions as F\n\nclass PreTrainingDNN(PreTrainingChain.AbstractChain.AbstractChain):\n \"\"\"\n [Classes]\n Sample of DNN for classification.\n If you use this class for minst.\n n_units = [784, n, m, ..., 10]\n 784 is dimension of sample.data and 10 is dimension of sample.target.\n \"\"\"\n def add_last_layer(self):\n self.add_link(F.Linear(self.n_units[-1], self.last_unit))\n\n def loss_function(self, x, y):\n return F.softmax_cross_entropy(x, y)\n\ndef make_sample(size):\n from sklearn.datasets import fetch_mldata\n print('fetch MNIST dataset')\n sample = fetch_mldata('MNIST original')\n perm = np.random.permutation(len(sample.data))\n sample.data = sample.data[perm[0: size]]\n sample.target = sample.target[perm[0: size]]\n print('Successed data fetching')\n sample.data = sample.data.astype(np.float32)\n sample.data /= 255\n sample.target = sample.target.astype(np.int32)\n return sample\n\nif __name__ == '__main__':\n pre_train_size = 5000\n pre_test_size = 200\n train_size = 2000\n test_size = 2000\n\n sample = make_sample(pre_train_size+pre_test_size+train_size+test_size)\n x_pre_train, x_pre_test, x_train, x_test, _ = np.split(sample.data,\n [pre_train_size,\n pre_train_size + pre_test_size,\n pre_train_size + pre_test_size + train_size,\n pre_train_size + pre_test_size + train_size + test_size])\n\n _, _, y_train, y_test, _ = np.split(sample.target,\n [pre_train_size,\n pre_train_size + pre_test_size,\n pre_train_size + pre_test_size + train_size,\n pre_train_size + pre_test_size + train_size + test_size])\n\n #input layer=784, hidden_layer 1st = 400, hidden_layer 2nd = 300,\n #hidden_layer 3rd = 150, hidden_layer 4th = 100, output layer = 10\n pc = PreTrainingDNN([784,400,300,150,100,10])\n\n #x_pre_train: sample data for pre-training\n #if x_pre_train == numpy.array([]), pre-training is skkiped.\n #x_pre_test: sample data for calculate loss after pre-training (optional)\n pc.pre_training(x_pre_train, x_pre_test)\n\n #x_train: sample data for learn as deep network\n #y_train: sample target for learn as deep network (e.g. 0-9 for MNIST)\n #x_train: sample data for test as deep network\n #y_train: sample target for test as deep network (e.g. 0-9 for MNIST)\n #isClassification: Classification problem or not\n pc.learn(x_train, y_train, x_test, y_test, isClassification=True)\n\n"
] | [
[
"sklearn.datasets.fetch_mldata",
"numpy.split"
]
] |
lakshara98/SDGP-Project | [
"3054db54d2ab9a61728836ba3e4fa0b74c1a7c36"
] | [
"src/generate.py"
] | [
"\"\"\"Data generator generates batches of inputs and outputs/labels for training.\n\nThe inputs are each made from two parts. The first maxlend words are the original description, followed by `eos` followed by the headline which we want to predict, except for the last word in the headline which is always `eos` and then `empty` padding until `maxlen` words.\n\nFor each, input, the output is the headline words (without the start `eos` but with the ending `eos`) padded with `empty` words up to `maxlenh` words. The output is also expanded to be y-hot encoding of each word.\n\nTo be more realistic, the second part of the input should be the result of generation and not the original headline.\nInstead we will flip just `nflips` words to be from the generator, but even this is too hard and instead\nimplement flipping in a naive way (which consumes less time.) Using the full input (description + eos + headline) generate predictions for outputs. For nflips random words from the output, replace the original word with the word with highest probability from the prediction.\n\"\"\"\nimport random\n\nimport numpy as np\nfrom keras.preprocessing import sequence\nfrom keras.utils import np_utils\n\nfrom constants import empty, eos, maxlend, maxlenh, maxlen, seed\nfrom sample_gen import vocab_fold, lpadd\n\n\ndef flip_headline(x, nflips, model, debug, oov0, idx2word):\n \"\"\"Flip some of the words in the second half (headline) with words predicted by the model.\"\"\"\n if nflips is None or model is None or nflips <= 0:\n return x\n\n batch_size = len(x)\n assert np.all(x[:, maxlend] == eos)\n probs = model.predict(x, verbose=0, batch_size=batch_size)\n x_out = x.copy()\n for b in range(batch_size):\n # pick locations we want to flip\n # 0...maxlend-1 are descriptions and should be fixed\n # maxlend is eos and should be fixed\n flips = sorted(random.sample(range(maxlend + 1, maxlen), nflips))\n if debug and b < debug:\n print(b)\n for input_idx in flips:\n if x[b, input_idx] == empty or x[b, input_idx] == eos:\n continue\n # convert from input location to label location\n # the output at maxlend (when input is eos) is feed as input at maxlend+1\n label_idx = input_idx - (maxlend + 1)\n prob = probs[b, label_idx]\n w = prob.argmax()\n if w == empty: # replace accidental empty with oov\n w = oov0\n if debug and b < debug:\n print('{} => {}'.format(idx2word[x_out[b, input_idx]], idx2word[w]),)\n x_out[b, input_idx] = w\n if debug and b < debug:\n print()\n return x_out\n\n\ndef conv_seq_labels(xds, xhs, nflips, model, debug, oov0, glove_idx2idx, vocab_size, nb_unknown_words, idx2word):\n \"\"\"Convert description and hedlines to padded input vectors; headlines are one-hot to label.\"\"\"\n batch_size = len(xhs)\n assert len(xds) == batch_size\n x = [\n vocab_fold(lpadd(xd) + xh, oov0, glove_idx2idx, vocab_size, nb_unknown_words)\n for xd, xh in zip(xds, xhs)] # the input does not have 2nd eos\n x = sequence.pad_sequences(x, maxlen=maxlen, value=empty, padding='post', truncating='post')\n x = flip_headline(x, nflips=nflips, model=model, debug=debug, oov0=oov0, idx2word=idx2word)\n\n y = np.zeros((batch_size, maxlenh, vocab_size))\n for i, xh in enumerate(xhs):\n xh = vocab_fold(xh, oov0, glove_idx2idx, vocab_size, nb_unknown_words) + [eos] + [empty] * maxlenh # output does have a eos at end\n xh = xh[:maxlenh]\n y[i, :, :] = np_utils.to_categorical(xh, vocab_size)\n\n return x, y\n\n\ndef gen(Xd, Xh, batch_size, nb_batches, nflips, model, debug, oov0, glove_idx2idx, vocab_size, nb_unknown_words, idx2word):\n \"\"\"Yield batches.\n\n for training use nb_batches=None\n for validation generate deterministic results repeating every nb_batches\n \"\"\"\n # while training it is good idea to flip once in a while the values of the headlines from the\n # value taken from Xh to value generated by the model.\n c = nb_batches if nb_batches else 0\n while True:\n xds = []\n xhs = []\n if nb_batches and c >= nb_batches:\n c = 0\n new_seed = random.randint(0, 2e10)\n random.seed(c + 123456789 + seed)\n for b in range(batch_size):\n t = random.randint(0, len(Xd) - 1)\n\n xd = Xd[t]\n s = random.randint(min(maxlend, len(xd)), max(maxlend, len(xd)))\n xds.append(xd[:s])\n\n xh = Xh[t]\n s = random.randint(min(maxlenh, len(xh)), max(maxlenh, len(xh)))\n xhs.append(xh[:s])\n\n # undo the seeding before we yield inorder not to affect the caller\n c += 1\n random.seed(new_seed)\n\n yield conv_seq_labels(\n xds,\n xhs,\n nflips=nflips,\n model=model,\n debug=debug,\n oov0=oov0,\n glove_idx2idx=glove_idx2idx,\n vocab_size=vocab_size,\n nb_unknown_words=nb_unknown_words,\n idx2word=idx2word,\n )\n"
] | [
[
"numpy.all",
"numpy.zeros"
]
] |
magelead/BiAAE | [
"1caa063ee9650e98b20e4e0e68b24374acc6e435"
] | [
"models/aaes/uniaae.py"
] | [
"# proposed model implementation\n\nimport numpy as np\n\nimport torch\nfrom torch import nn\nimport pytorch_lightning as pl\n\nfrom torch import autograd\n\nimport sys\nsys.path.append('..')\n\nfrom networks import MnistCNNDecoder, ExprDiffDecoder, ConditionedDecoder, RNNEncoder, FinetunedEncoder\nfrom networks import MnistCNNEncoder, ExprDiffEncoder, JointEncoder, RNNDecoder, FinetunedDecoder\nfrom networks import FCDiscriminator\n\nfrom torch.optim.lr_scheduler import StepLR\n\nclass UniAAE(pl.LightningModule):\n def __init__(self, dataset='paired_mnist'):\n super(UniAAE, self).__init__()\n self.dataset = dataset\n\n if self.dataset == 'paired_mnist':\n self.z_dim = 16\n self.joint_dim = 4\n\n self.loss_rec_lambda_x = 10\n self.loss_rec_lambda_y = 10\n \n self.loss_normal_lambda = 0.3\n self.loss_indep_lambda = 1\n \n self.discr_steps = 1\n self.gen_steps = 1\n\n self.enc_x = MnistCNNEncoder(out_dim=self.z_dim - self.joint_dim)\n self.enc_y = MnistCNNEncoder(out_dim=self.z_dim)\n\n self.dec_x = MnistCNNDecoder(in_dim=self.z_dim)\n self.dec_y = MnistCNNDecoder(in_dim=self.z_dim)\n\n self.discr = FCDiscriminator(in_dim=2 * self.z_dim -\n self.joint_dim,\n use_sigmoid=False)\n \n self.discr_indep = FCDiscriminator(in_dim=2 * self.z_dim -\n self.joint_dim,\n use_sigmoid=False)\n \n elif self.dataset == 'lincs_rnn':\n self.z_dim = 20\n self.joint_dim = 10\n\n self.loss_rec_lambda_x = 5\n self.loss_rec_lambda_y = 1\n \n self.loss_normal_lambda = 0.5\n self.loss_indep_lambda = 0.5\n\n self.discr_steps = 1\n self.gen_steps = 3\n\n rnn_1 = RNNEncoder(out_dim=88)\n rnn_1.load_state_dict(torch.load('../saved_models/rnn_enc.ckpt', map_location='cuda:0'))\n self.enc_x = FinetunedEncoder(rnn_1, out_dim=self.z_dim - self.joint_dim)\n self.enc_y = ExprDiffEncoder(out_dim=self.z_dim)\n\n rnn_2 = RNNDecoder(in_dim=44)\n rnn_2.load_state_dict(torch.load('../saved_models/rnn_dec.ckpt', map_location='cuda:0'))\n self.dec_x = FinetunedDecoder(rnn_2, in_dim=self.z_dim)\n\n self.dec_y = ExprDiffDecoder(in_dim=self.z_dim)\n \n self.discr = FCDiscriminator(in_dim=2 * self.z_dim -\n self.joint_dim,\n use_sigmoid=False)\n \n self.discr_indep = FCDiscriminator(in_dim=2 * self.z_dim -\n self.joint_dim,\n use_sigmoid=False)\n elif self.dataset == 'lincs_rnn_reverse':\n self.z_dim = 20\n self.joint_dim = 10\n\n self.loss_rec_lambda_x = 1\n self.loss_rec_lambda_y = 0.2\n \n self.loss_normal_lambda = 0.5\n self.loss_indep_lambda = 0.5\n\n self.discr_steps = 3\n self.gen_steps = 1\n\n rnn_1 = RNNEncoder(out_dim=88)\n rnn_1.load_state_dict(torch.load('../saved_models/rnn_enc.ckpt', map_location='cuda:0'))\n self.enc_y = FinetunedEncoder(rnn_1, out_dim=self.z_dim)\n\n self.enc_x = ExprDiffEncoder(out_dim=self.z_dim - self.joint_dim)\n\n rnn_2 = RNNDecoder(in_dim=44)\n rnn_2.load_state_dict(torch.load('../saved_models/rnn_dec.ckpt', map_location='cuda:0'))\n self.dec_y = FinetunedDecoder(rnn_2, in_dim=self.z_dim)\n\n self.dec_x = ExprDiffDecoder(in_dim=self.z_dim)\n \n self.discr = FCDiscriminator(in_dim=2 * self.z_dim -\n self.joint_dim,\n use_sigmoid=False)\n \n self.discr_indep = FCDiscriminator(in_dim=2 * self.z_dim -\n self.joint_dim,\n use_sigmoid=False)\n\n\n # ------------------------------------------------------------------------\n # TRAINING\n def get_latents(self, batch):\n # pair of objects\n x, y = batch\n \n z_y, s_y = torch.split(self.enc_y(y), self.z_dim - self.joint_dim, -1)\n z_x = torch.randn_like(z_y)\n\n return torch.cat((z_x, s_y), 1)\n\n def get_log_p_x_by_y(self, batch):\n return self.dec_x.get_log_prob(batch[0], self.get_latents(batch))\n \n def restore(self, batch):\n # pair of objects\n x, y = batch\n\n # compute encoder outputs and split them into joint and exclusive parts\n z_x = self.enc_x(x)\n z_y, s_y = torch.split(self.enc_y(y), self.z_dim - self.joint_dim, -1)\n \n x_rest = self.dec_x.sample(torch.cat((z_x, s_y), 1))\n y_rest = self.dec_y.sample(torch.cat((z_y, s_y), 1))\n \n return (x_rest, y_rest)\n \n def sample(self, y):\n # sample z\n z_y, s_y = torch.split(self.enc_y(y), self.z_dim - self.joint_dim, -1)\n z_x = torch.randn_like(z_y)\n\n sampled_x = self.dec_x.sample(z=torch.cat((z_x, s_y), 1))\n return sampled_x\n\n def training_step(self, batch, batch_nb, optimizer_i):\n # pair of objects\n x, y = batch\n\n # compute encoder outputs and split them into joint and exclusive parts\n z_x = self.enc_x(x)\n z_y, s_y = torch.split(self.enc_y(y), self.z_dim - self.joint_dim, -1)\n \n if optimizer_i == 0:# GENERATOR LOSS\n # Reconstruction losses\n lat_xy = torch.cat((z_x, s_y), dim=-1)\n lat_y = torch.cat((z_y, s_y), 1)\n \n loss_x_rec = -self.dec_x.get_log_prob(x, lat_xy).mean()\n loss_y_rec = -self.dec_y.get_log_prob(y, lat_y).mean()\n\n # run discriminators\n joint_lat = torch.cat((z_x, s_y, z_y), dim=-1)\n discr_outputs = self.discr(joint_lat)\n \n loss_norm = nn.BCEWithLogitsLoss()(discr_outputs, torch.ones_like(discr_outputs))\n \n discr_outputs = self.discr_indep(torch.cat((z_x, s_y.detach(), z_y), dim=-1))\n loss_indep = nn.BCEWithLogitsLoss()(discr_outputs, torch.ones_like(discr_outputs))\n\n g_loss = (loss_x_rec * self.loss_rec_lambda_x +\n loss_y_rec * self.loss_rec_lambda_y +\n loss_norm * self.loss_normal_lambda + \n loss_indep * self.loss_indep_lambda)\n\n return {'loss': g_loss,\n 'log': {\n 'loss_g': g_loss,\n 'x_rec': loss_x_rec,\n 'y_rec': loss_y_rec,\n 'loss_norm': loss_norm,\n 'loss_indep': loss_indep\n }\n }\n \n elif optimizer_i == 1:# DISCRIMINATOR LOSS\n z_x = z_x.detach()\n s_y = s_y.detach()\n z_y = z_y.detach()\n\n # normal noise discr\n real_inputs = torch.cat((z_x, s_y, z_y), dim=-1)\n real_dec_out = self.discr(real_inputs)\n\n fake_inputs = torch.randn_like(real_inputs)\n fake_dec_out = self.discr(fake_inputs)\n\n probs = torch.cat((real_dec_out, fake_dec_out), 0)\n targets = torch.cat((torch.zeros_like(real_dec_out), \n torch.ones_like(fake_dec_out)), 0)\n\n d_loss_normal = nn.BCEWithLogitsLoss()(probs, targets)\n \n # indep loss \n real_inputs = torch.cat((z_x, s_y, z_y), dim=-1)\n real_dec_out = self.discr_indep(real_inputs)\n \n real_input_shuffled = torch.cat((z_x[np.random.permutation(z_x.shape[0])],\n s_y,\n z_y[np.random.permutation(z_x.shape[0])]),\n dim=-1)\n \n fake_dec_out = self.discr_indep(real_input_shuffled)\n \n probs = torch.cat((real_dec_out, fake_dec_out), 0)\n targets = torch.cat((torch.zeros_like(real_dec_out), \n torch.ones_like(fake_dec_out)), 0)\n\n d_loss_indep = nn.BCEWithLogitsLoss()(probs, targets)\n \n return {'loss': d_loss_normal + d_loss_indep,\n 'log': {'loss_d_normal': d_loss_normal,\n 'loss_d_indep': d_loss_indep}\n }\n\n def configure_optimizers(self):\n gen_params = torch.nn.ModuleList([self.enc_x, self.dec_x, self.enc_y, self.dec_y])\n discr_params = torch.nn.ModuleList([self.discr_indep, self.discr])\n \n \n gen_optim = torch.optim.Adam(gen_params.parameters(), lr=3e-4, betas=(0.5, 0.9))\n discr_optim = torch.optim.Adam(discr_params.parameters(), lr=3e-4, betas=(0.5, 0.9))\n \n discriminator_sched = StepLR(discr_optim, step_size=5000, gamma=0.5)\n\n return [gen_optim, discr_optim], [discriminator_sched]\n \n def zero_grad(self):\n self.enc_x.zero_grad()\n self.dec_x.zero_grad()\n self.enc_y.zero_grad()\n self.dec_y.zero_grad()\n self.discr.zero_grad()\n self.discr_indep.zero_grad()\n \n def optimizer_step(self, current_epoch, batch_nb, optimizer, optimizer_i, optimizer_closure):\n discr_step = (batch_nb % (self.discr_steps + self.gen_steps)) < \\\n self.discr_steps\n\n gen_step = (not discr_step)\n\n if optimizer_i == 0:\n if gen_step:\n optimizer.step()\n optimizer.zero_grad()\n self.zero_grad()\n\n if optimizer_i == 1:\n if discr_step:\n optimizer.step()\n optimizer.zero_grad()\n self.zero_grad()\n\n if optimizer_i > 1:\n optimizer.step()\n optimizer.zero_grad()\n self.zero_grad()\n"
] | [
[
"torch.ones_like",
"torch.randn_like",
"torch.load",
"numpy.random.permutation",
"torch.zeros_like",
"torch.nn.ModuleList",
"torch.nn.BCEWithLogitsLoss",
"torch.optim.lr_scheduler.StepLR",
"torch.cat"
]
] |
mlmaster1995/Detection-of-AD-Using-Graph-Regularized-CNN-Based-on-MRIs | [
"0780be8e7bf68cf6651abb4c597dd87460deff01"
] | [
"python_files/Kmeans.py"
] | [
"\"\"\"\nLicensed to the Apache Software Foundation (ASF) under one\nor more contributor license agreements. See the NOTICE file\ndistributed with this work for additional information\nregarding copyright ownership. The ASF licenses this file\nto you under the Apache License, Version 2.0 (the\n\"License\"); you may not use this file except in compliance\nwith the License. You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing,\nsoftware distributed under the License is distributed on an\n\"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\nKIND, either express or implied. See the License for the\nspecific language governing permissions and limitations\nunder the License.\n\"\"\"\nimport tensorflow as tf\nimport numpy as np\nfrom graph_data_processing import GraphDataProcess\n\n\nclass KMeans(object):\n def __init__(self, data, K):\n self.__data = data\n self.__K = K\n self.__k_list = tf.range(0, K, 1)\n self.__k_centroids = self.__initialize_k_centorids()\n self.new_centroids = tf.convert_to_tensor([])\n self.centroids_mean = None\n\n def get_K(self):\n return self.__K\n\n def searching_K(self, K_limit, epoch_per_K, verbose=1):\n c_means_clc = []\n for k in range(K_limit):\n tf.keras.backend.clear_session()\n kmeans = KMeans(data=self.__data, K=k + 1)\n kmeans.set_centroids(optimize=True)\n kmeans.fit(epoch=epoch_per_K, verbose=verbose)\n cluster_data = kmeans.get_clusters()\n c_means_clc.append(kmeans.get_centroids_mean(kmeans.new_centroids, cluster_data))\n return c_means_clc, K_limit\n\n def get_centroids_mean(self, centroids, cluster_data):\n def calc_mean_dist_center_data(center, data):\n if np.array(data).size != 0:\n return np.mean(np.sqrt(np.sum(np.power((center - data), 2), axis=1)))\n else:\n return 0.0\n\n return np.mean([calc_mean_dist_center_data(center=centroids[data_pair[1]],\n data=data_pair[0]) for data_pair in cluster_data])\n\n def set_centroids(self, centroids=None, optimize=False):\n \"\"\"\n set up centroids with custom center values or optimized center values\n\n args:\n centroids: the custom centroid value, if optimize is true, the value will be overrided\n optimize: pick the best initial centroid from the dataset\n return:\n N/A, but initialize k center values internally\n \"\"\"\n self.__k_centroids = centroids\n if optimize:\n self.__k_centroids = self.__initialize_k_centorids_optimize()\n\n def get_clusters(self):\n min_distance_cluster_index = self.__get_min_distance_cluster_index()\n cluster_data = []\n for k in range(self.__K):\n k_index = tf.where(min_distance_cluster_index == k)\n if tf.size(k_index).numpy():\n data_cluster = tf.gather_nd(self.__data, k_index)\n cluster_data.append((data_cluster.numpy(), k, tf.shape(data_cluster).numpy()))\n else:\n cluster_data.append(([], k))\n return cluster_data\n\n def __neg_euc_dist_mat(self, mat):\n \"\"\"\n Calculate euclidean distance matrix for furture conditoinal prob computation\n\n Args:\n mat: input matrix or tensors\n Returns:\n negative distance matrix of input tensor\n \"\"\"\n mat_norm = tf.reduce_sum(mat * mat, 1)\n mat_norm = tf.reshape(mat_norm, shape=[-1, 1])\n dist_mat = mat_norm - 2 * tf.matmul(mat, tf.transpose(mat)) + tf.transpose(mat_norm)\n return dist_mat\n\n def __initialize_k_centorids_optimize(self):\n rows, cols = tf.shape(self.__data)\n if self.__K <= rows:\n distance_matrix = self.__neg_euc_dist_mat(self.__data)\n distance_mean = tf.reduce_mean(distance_matrix, axis=1)\n value, indices = tf.math.top_k(input=distance_mean, k=self.__K, sorted=True)\n indices = tf.expand_dims(indices, axis=1)\n k_centroids = tf.gather_nd(self.__data, indices)\n return k_centroids\n else:\n raise Exception('cluster number is over the data size!')\n\n def __initialize_k_centorids(self):\n rows, cols = tf.shape(self.__data)\n if self.__K <= rows:\n k_index = tf.random.uniform(shape=(self.__K,), minval=0, maxval=rows - 1, dtype=tf.int32)\n k_index = tf.expand_dims(k_index, axis=1)\n return tf.gather_nd(params=self.__data, indices=k_index)\n else:\n raise Exception('cluster number is over the data size!')\n\n def __get_min_distance_cluster_index(self, ):\n distance_matrix = self.__calc_distance()\n min_distance_cluster_index = tf.math.argmin(input=distance_matrix, axis=0)\n min_distance_cluster_index = tf.cast(min_distance_cluster_index, dtype=tf.int32)\n return min_distance_cluster_index\n\n def __calc_distance(self):\n square_diff = tf.math.squared_difference(x=tf.expand_dims(self.__k_centroids, axis=1),\n y=tf.expand_dims(self.__data, axis=0))\n square_diff_sum = tf.reduce_sum(square_diff, axis=2)\n distance_matrix = tf.sqrt(square_diff_sum)\n return distance_matrix\n\n def __update_centroids(self, min_distance_cluster_index, epoch):\n miss_k = tf.where(tf.convert_to_tensor([k in min_distance_cluster_index for k in self.__k_list]) == False)\n miss_k = tf.cast(miss_k, dtype=tf.int32)\n self.new_centroids = tf.convert_to_tensor([])\n\n def update_centroid(i):\n if i in miss_k:\n miss_centroid_index = tf.gather_nd(miss_k, tf.where(i == miss_k))\n miss_centroid = tf.gather_nd(self.__k_centroids, miss_centroid_index)\n miss_centroid = tf.squeeze(miss_centroid)\n self.new_centroids = tf.concat([self.new_centroids, miss_centroid], axis=0)\n print('epoch: ', epoch, ',miss_centroid: ', miss_centroid)\n else:\n data_index = tf.where(min_distance_cluster_index == i)\n data = tf.gather_nd(self.__data, data_index)\n new_center = tf.reduce_mean(data, axis=0)\n self.new_centroids = tf.concat([self.new_centroids, new_center], axis=0)\n return (tf.add(i, 1),)\n\n condition = lambda i: tf.less(i, self.__K)\n index = tf.constant(0)\n _ = tf.while_loop(condition, update_centroid, [index])\n cols = tf.size(self.new_centroids) / self.__K\n self.new_centroids = tf.reshape(self.new_centroids,\n shape=(self.__K, cols))\n self.__k_centroids = self.new_centroids\n\n def fit(self, epoch, verbose=1):\n \"\"\"fit k-means model\"\"\"\n\n def training_step(i):\n min_distance_cluster_index = self.__get_min_distance_cluster_index()\n self.__update_centroids(min_distance_cluster_index, epoch=i.numpy() + 1)\n # print('epoch: ', i.numpy() + 1,\n # ',centroid mean: ', tf.reduce_mean(self.new_centroids).numpy(),\n # '\\ncentroids:\\n', self.new_centroids.numpy())\n if verbose == 1:\n print('epoch: ', i.numpy() + 1,\n ',centroid mean: ', tf.math.abs(tf.reduce_mean(self.new_centroids)).numpy())\n return (tf.add(i, 1),)\n\n index = tf.constant(0)\n condition = lambda i: tf.less(i, epoch)\n _ = tf.while_loop(condition, training_step, [index])\n self.centroids_mean = tf.reduce_mean(self.new_centroids)\n\n\nclass KMeansModels(object):\n @staticmethod\n def generate_model_list(path_list, rep_dim, K_list, epoch, batch=1000):\n model_list = []\n for path_index, path in enumerate(path_list):\n tf.keras.backend.clear_session()\n print(f'\\n====================== Model {path_index} training ======================\\n')\n image_rep_data = GraphDataProcess.parse_tfr_to_image_rep(path_list=[path],\n rep_dim=rep_dim,\n batch_size=batch)\n iteration = iter(image_rep_data)\n data = iteration.get_next()\n data_rep = data['representation']\n kmeans = KMeans(data=data_rep, K=K_list[path_index])\n kmeans.set_centroids(optimize=True)\n kmeans.fit(epoch=epoch)\n model_list.append(kmeans)\n return model_list\n"
] | [
[
"tensorflow.reshape",
"tensorflow.gather_nd",
"tensorflow.squeeze",
"tensorflow.convert_to_tensor",
"tensorflow.concat",
"tensorflow.reduce_sum",
"tensorflow.math.argmin",
"tensorflow.less",
"tensorflow.constant",
"tensorflow.transpose",
"tensorflow.shape",
"tensorflow.expand_dims",
"tensorflow.cast",
"tensorflow.while_loop",
"numpy.power",
"tensorflow.size",
"tensorflow.range",
"tensorflow.sqrt",
"tensorflow.reduce_mean",
"tensorflow.keras.backend.clear_session",
"tensorflow.add",
"tensorflow.where",
"tensorflow.random.uniform",
"numpy.array",
"tensorflow.math.top_k"
]
] |
jungr-ait/spatialmath-python | [
"140d499e733ed9775762df90d36e4b2c4c2fc6eb"
] | [
"tests/base/test_graphics.py"
] | [
"import unittest\nimport numpy as np\nfrom spatialmath.base import *\n\n# test graphics primitives\n# TODO check they actually create artists\n\n\nclass TestGraphics(unittest.TestCase):\n def test_plotvol2(self):\n plotvol2(5)\n\n def test_plotvol3(self):\n plotvol3(5)\n\n def test_plot_box(self):\n plot_box(\"r--\", centre=(-2, -3), wh=(1, 1))\n plot_box(lt=(1, 1), rb=(2, 0), filled=True, color=\"b\")\n\n def test_plot_circle(self):\n plot_circle(1, (0, 0), \"r\") # red circle\n plot_circle(2, (0, 0), \"b--\") # blue dashed circle\n plot_circle(0.5, (0, 0), filled=True, color=\"y\") # yellow filled circle\n\n def test_ellipse(self):\n plot_ellipse(np.diag((1, 2)), \"r\") # red ellipse\n plot_ellipse(np.diag((1, 2)), \"b--\") # blue dashed ellipse\n plot_ellipse(\n np.diag((1, 2)), centre=(1, 1), filled=True, color=\"y\"\n ) # yellow filled ellipse\n\n def test_plot_homline(self):\n plot_homline((1, 2, 3))\n plot_homline((1, -2, 3), \"k--\")\n\n def test_cuboid(self):\n plot_cuboid((1, 2, 3), color=\"g\")\n plot_cuboid((1, 2, 3), centre=(2, 3, 4), color=\"g\")\n plot_cuboid((1, 2, 3), filled=True, color=\"y\")\n\n def test_sphere(self):\n plot_sphere(0.3, color=\"r\")\n plot_sphere(1, centre=(1, 1, 1), filled=True, color=\"b\")\n\n def test_ellipsoid(self):\n plot_ellipsoid(np.diag((1, 2, 3)), color=\"r\") # red ellipsoid\n plot_ellipsoid(\n np.diag((1, 2, 3)), centre=(1, 2, 3), filled=True, color=\"y\"\n ) # yellow filled ellipsoid\n\n def test_cylinder(self):\n plot_cylinder(radius=0.2, centre=(0.5, 0.5, 0), height=[-0.2, 0.2])\n plot_cylinder(\n radius=0.2,\n centre=(0.5, 0.5, 0),\n height=[-0.2, 0.2],\n filled=True,\n resolution=5,\n color=\"red\",\n )\n\n\n# ---------------------------------------------------------------------------------------#\nif __name__ == \"__main__\":\n\n unittest.main(buffer=True)\n"
] | [
[
"numpy.diag"
]
] |
williecostello/BetterReads | [
"318e438f342a08fe2db8f8dd4bce17cd85ef8795"
] | [
"src/04_modelling/embed_sentences.py"
] | [
"import numpy as np\nimport pandas as pd\nimport tensorflow_hub as hub\nimport os\nimport joblib\n\n# Loads Universal Sentence Encoder locally, from downloaded module\nembed = hub.load('data/04_models/universal_sentence_encoder/')\n# Loads Universal Sentence Encoder remotely, from Tensorflow Hub\n# embed = hub.load(\"https://tfhub.dev/google/universal-sentence-encoder/4\")\n\n# Set directories\nread_dir = 'data/03_processed/'\nwrite_dir = 'data/05_model_output/'\n\n# List all CSV files in read directory\nfile_list = [f for f in os.listdir(read_dir) if f.endswith('.csv')]\nctr = 0\n\n# Loop through file in file list\nfor file in file_list:\n\n # Read in processed file as dataframe\n df = pd.read_csv(f'{read_dir}{file}')\n file_stem = file.replace('.csv', '')\n\n # Sample dataframe down to 10000 rows if greater\n if len(df) > 10000:\n df = df.sample(n=10000, random_state=24).reset_index(drop=True)\n\n # Vectorize sentences\n sentence_vectors = embed(df['sentence'])\n\n # Transform Tensor object to Numpy array\n sentence_array = np.array(sentence_vectors)\n\n # Pickle array\n joblib.dump(sentence_array, f'{write_dir}{file_stem}.pkl')\n\n # Create new dataframe with just sentence and rating columns\n export_df = df[['sentence', 'rating']].copy()\n\n # Export dataframe\n export_df.to_csv(f'{write_dir}{file_stem}.csv', index=False)\n\n ctr += 1\n\n print(f'Finished {ctr} of {len(file_list)} ({file_stem})')"
] | [
[
"pandas.read_csv",
"numpy.array"
]
] |
poulamisganguly/foam_ct_phantom | [
"51820d5b570e2b9b3b0100500edb8666d4bc6909"
] | [
"foam_ct_phantom/generate.py"
] | [
"#-----------------------------------------------------------------------\n#Copyright 2019 Centrum Wiskunde & Informatica, Amsterdam\n#\n#Author: Daniel M. Pelt\n#Contact: [email protected]\n#Website: http://dmpelt.github.io/foam_ct_phantom/\n#License: MIT\n#\n#This file is part of foam_ct_phantom, a Python package for generating\n#foam-like phantoms for CT.\n#-----------------------------------------------------------------------\n\nimport numpy as np\nimport random\nimport sortedcollections\nimport tqdm\nimport h5py\nimport inspect\n\nfrom . import ccode, project, geometry\nfrom .utils import FILE_VERSION\n\n\n\ndef genphantom(outfile, seed, nspheres_per_unit=100000, ntrials_per_unit=1000000, maxsize=0.2, zrange=1.5):\n n = int(ntrials_per_unit*zrange)\n nsph = int(nspheres_per_unit*zrange)\n \n pos3 = np.zeros(n*3,dtype=np.float32) \n random.seed(seed)\n ccode.setseed(random.randint(0,4294967295))\n \n ds = np.zeros(n,dtype=np.float32)\n \n ccode.drawnewpositions(pos3, ds, zrange)\n\n if callable(maxsize):\n maxsizes = maxsize(pos3[::3],pos3[1::3], pos3[2::3])\n msk = ds<-maxsizes\n ds[msk] = -maxsizes[msk]\n\n upd = np.zeros(n, dtype=np.uint32)\n spheres = np.zeros(nsph*5, dtype=np.float32)\n\n sd = sortedcollections.ValueSortedDict(zip(range(ds.size), ds))\n\n for i in tqdm.trange(nsph):\n itms = sd.items()\n if callable(maxsize)==False and itms[0][1]<-maxsize:\n allchoices = []\n for itm in itms:\n if itm[1] >= -maxsize:\n break\n allchoices.append(itm[0])\n ch = random.choice(allchoices)\n spheres[5*i+3] = maxsize\n else:\n allchoices = [itms[0][0],]\n curmax = itms[0][1]\n for itm in range(1,len(itms)):\n if itms[itm][1] == curmax:\n allchoices.append(itms[itm][0])\n else:\n break\n ch = random.choice(allchoices)\n spheres[5*i+3] = -sd[ch]\n spheres[5*i] = pos3[3*ch]\n spheres[5*i+1] = pos3[3*ch+1]\n spheres[5*i+2] = pos3[3*ch+2]\n nupd = ccode.newsphere(pos3, ds, spheres[:5*(i+1)], zrange, upd)\n if callable(maxsize):\n maxsizes = maxsize(pos3[3*upd[:nupd]],pos3[3*upd[:nupd]+1],pos3[3*upd[:nupd]+2])\n msk = ds[upd[:nupd]] < -maxsizes\n ds[upd[:nupd][msk]] = -maxsizes[msk]\n for ky in upd[:nupd]:\n sd[ky] = ds[ky]\n \n with h5py.File(outfile,'w') as f:\n f['spheres'] = spheres\n f.attrs['FILE_VERSION'] = FILE_VERSION\n att = f['spheres'].attrs\n att['seed'] = seed\n att['nspheres_per_unit'] = nspheres_per_unit\n att['ntrials_per_unit'] = ntrials_per_unit\n if callable(maxsize):\n att['maxsize'] = inspect.getsource(maxsize)\n else:\n att['maxsize'] = maxsize\n att['zrange'] = zrange\n\n\ndef genvol(outfile, phantom, geom, zoomfactor=1):\n nx = geom.nx\n ny = geom.ny\n nz = geom.nz\n voxsize = geom.voxsize*zoomfactor\n supersampling = geom.supersampling\n if isinstance(phantom, str):\n with h5py.File(phantom, 'r') as f:\n spheres = f['spheres'][:]\n else:\n spheres = phantom\n mi = np.argmin([nx,ny,nz])\n vol = np.zeros((nz, ny, nx), dtype=np.float32)\n for i in tqdm.trange(nz):\n ccode.genvol(spheres, vol, nx, ny, nz, voxsize, i, cx=geom.cx*zoomfactor, cy=geom.cy*zoomfactor, cz=geom.cz*zoomfactor, supersampling=supersampling)\n\n with h5py.File(outfile, 'w') as f:\n f.attrs['FILE_VERSION'] = FILE_VERSION\n f['volume'] = vol\n att = f['volume'].attrs\n for key, val in geom.to_dict().items():\n att[key] = val\n if isinstance(phantom, str):\n att['phantom'] = phantom\n\ndef gen3d(phantom, nx, ny, pixsize, angle, tilt1, tilt2, maxz=1.5, cutout=0, cutoff=-np.inf):\n if isinstance(phantom, str):\n with h5py.File(phantom, 'r') as f:\n spheres = f['spheres'][:]\n else:\n spheres = phantom\n return ccode.gen3dproj(spheres, nx, ny, pixsize, angle, tilt1, tilt2, maxz=maxz, cutout=cutout, cutoff=cutoff)\n\ndef gen_dataset(outfile, phantom, geom):\n angles = geom.angles\n nx = geom.nx\n ny = geom.ny\n pixsize = geom.pixsize\n supersampling = geom.supersampling\n with h5py.File(outfile, 'w') as f:\n f.attrs['FILE_VERSION'] = FILE_VERSION\n dset = f.create_dataset('projs', (len(angles),ny,nx), dtype='f4')\n for i in tqdm.trange(len(angles)):\n if type(geom) == geometry.ParallelGeometry:\n dset[i] = project.single_par_projection(phantom,nx,ny,pixsize,angles[i],cx=geom.cx,cy=geom.cy,rotcx=geom.rotcx,rotcy=geom.rotcy, supersampling=supersampling)\n elif type(geom) == geometry.ConeGeometry:\n dset[i] = project.single_cone_projection(phantom, nx, ny, pixsize, angles[i], geom.sod, geom.sod + geom.odd, zoff=geom.zoff, supersampling=supersampling, usecuda=geom.usecuda)\n att = dset.attrs\n for key, val in geom.to_dict().items():\n att[key] = val\n att['phantom'] = phantom\n\n"
] | [
[
"numpy.argmin",
"numpy.zeros"
]
] |
Hiwyl/mmdetection-obj | [
"6d9c6064503ee87d490b20d30f9ae0dda7d60d27"
] | [
"tools/test.py"
] | [
"import argparse\nimport os\nimport os.path as osp\nimport pickle\nimport shutil\nimport tempfile\n\nimport mmcv\nimport torch\nimport torch.distributed as dist\nfrom mmcv.parallel import MMDataParallel, MMDistributedDataParallel\nfrom mmcv.runner import get_dist_info, init_dist, load_checkpoint\n\nfrom mmdet.core import wrap_fp16_model\nfrom mmdet.datasets import build_dataloader, build_dataset\nfrom mmdet.models import build_detector\n\n\ndef single_gpu_test(model, data_loader, show=False):\n model.eval()\n results = []\n dataset = data_loader.dataset\n prog_bar = mmcv.ProgressBar(len(dataset))\n for i, data in enumerate(data_loader):\n with torch.no_grad():\n result = model(return_loss=False, rescale=not show, **data)\n results.append(result)\n\n if show:\n model.module.show_result(data, result)\n\n batch_size = data['img'][0].size(0)\n for _ in range(batch_size):\n prog_bar.update()\n return results\n\n\ndef multi_gpu_test(model, data_loader, tmpdir=None, gpu_collect=False):\n \"\"\"Test model with multiple gpus.\n\n This method tests model with multiple gpus and collects the results\n under two different modes: gpu and cpu modes. By setting 'gpu_collect=True'\n it encodes results to gpu tensors and use gpu communication for results\n collection. On cpu mode it saves the results on different gpus to 'tmpdir'\n and collects them by the rank 0 worker.\n\n Args:\n model (nn.Module): Model to be tested.\n data_loader (nn.Dataloader): Pytorch data loader.\n tmpdir (str): Path of directory to save the temporary results from\n different gpus under cpu mode.\n gpu_collect (bool): Option to use either gpu or cpu to collect results.\n\n Returns:\n list: The prediction results.\n \"\"\"\n model.eval()\n results = []\n dataset = data_loader.dataset\n rank, world_size = get_dist_info()\n if rank == 0:\n prog_bar = mmcv.ProgressBar(len(dataset))\n for i, data in enumerate(data_loader):\n with torch.no_grad():\n result = model(return_loss=False, rescale=True, **data)\n results.append(result)\n\n if rank == 0:\n batch_size = data['img'][0].size(0)\n for _ in range(batch_size * world_size):\n prog_bar.update()\n\n # collect results from all ranks\n if gpu_collect:\n results = collect_results_gpu(results, len(dataset))\n else:\n results = collect_results_cpu(results, len(dataset), tmpdir)\n return results\n\n\ndef collect_results_cpu(result_part, size, tmpdir=None):\n rank, world_size = get_dist_info()\n # create a tmp dir if it is not specified\n if tmpdir is None:\n MAX_LEN = 512\n # 32 is whitespace\n dir_tensor = torch.full((MAX_LEN, ),\n 32,\n dtype=torch.uint8,\n device='cuda')\n if rank == 0:\n tmpdir = tempfile.mkdtemp()\n tmpdir = torch.tensor(\n bytearray(tmpdir.encode()), dtype=torch.uint8, device='cuda')\n dir_tensor[:len(tmpdir)] = tmpdir\n dist.broadcast(dir_tensor, 0)\n tmpdir = dir_tensor.cpu().numpy().tobytes().decode().rstrip()\n else:\n mmcv.mkdir_or_exist(tmpdir)\n # dump the part result to the dir\n mmcv.dump(result_part, osp.join(tmpdir, 'part_{}.pkl'.format(rank)))\n dist.barrier()\n # collect all parts\n if rank != 0:\n return None\n else:\n # load results of all parts from tmp dir\n part_list = []\n for i in range(world_size):\n part_file = osp.join(tmpdir, 'part_{}.pkl'.format(i))\n part_list.append(mmcv.load(part_file))\n # sort the results\n ordered_results = []\n for res in zip(*part_list):\n ordered_results.extend(list(res))\n # the dataloader may pad some samples\n ordered_results = ordered_results[:size]\n # remove tmp dir\n shutil.rmtree(tmpdir)\n return ordered_results\n\n\ndef collect_results_gpu(result_part, size):\n rank, world_size = get_dist_info()\n # dump result part to tensor with pickle\n part_tensor = torch.tensor(\n bytearray(pickle.dumps(result_part)), dtype=torch.uint8, device='cuda')\n # gather all result part tensor shape\n shape_tensor = torch.tensor(part_tensor.shape, device='cuda')\n shape_list = [shape_tensor.clone() for _ in range(world_size)]\n dist.all_gather(shape_list, shape_tensor)\n # padding result part tensor to max length\n shape_max = torch.tensor(shape_list).max()\n part_send = torch.zeros(shape_max, dtype=torch.uint8, device='cuda')\n part_send[:shape_tensor[0]] = part_tensor\n part_recv_list = [\n part_tensor.new_zeros(shape_max) for _ in range(world_size)\n ]\n # gather all result part\n dist.all_gather(part_recv_list, part_send)\n\n if rank == 0:\n part_list = []\n for recv, shape in zip(part_recv_list, shape_list):\n part_list.append(\n pickle.loads(recv[:shape[0]].cpu().numpy().tobytes()))\n # sort the results\n ordered_results = []\n for res in zip(*part_list):\n ordered_results.extend(list(res))\n # the dataloader may pad some samples\n ordered_results = ordered_results[:size]\n return ordered_results\n\n\nclass MultipleKVAction(argparse.Action):\n \"\"\"\n argparse action to split an argument into KEY=VALUE form\n on the first = and append to a dictionary.\n \"\"\"\n\n def _is_int(self, val):\n try:\n _ = int(val)\n return True\n except Exception:\n return False\n\n def _is_float(self, val):\n try:\n _ = float(val)\n return True\n except Exception:\n return False\n\n def _is_bool(self, val):\n return val.lower() in ['true', 'false']\n\n def __call__(self, parser, namespace, values, option_string=None):\n options = {}\n for val in values:\n parts = val.split('=')\n key = parts[0].strip()\n if len(parts) > 2:\n val = '='.join(parts[1:])\n else:\n val = parts[1].strip()\n # try parsing val to bool/int/float first\n if self._is_bool(val):\n import json\n val = json.loads(val.lower())\n elif self._is_int(val):\n val = int(val)\n elif self._is_float(val):\n val = float(val)\n options[key] = val\n setattr(namespace, self.dest, options)\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(\n description='MMDet test (and eval) a model')\n parser.add_argument('config', help='test config file path')\n parser.add_argument('checkpoint', help='checkpoint file')\n parser.add_argument('--out', help='output result file in pickle format')\n parser.add_argument(\n '--eval',\n type=str,\n nargs='+',\n help='evaluation metrics, which depends on the dataset, e.g., \"bbox\",'\n ' \"segm\", \"proposal\" for COCO, and \"mAP\", \"recall\" for PASCAL VOC')\n parser.add_argument('--show', action='store_true', help='show results')\n parser.add_argument(\n '--gpu_collect',\n action='store_true',\n help='whether to use gpu to collect results.')\n parser.add_argument(\n '--tmpdir',\n help='tmp directory used for collecting results from multiple '\n 'workers, available when gpu_collect is not specified')\n parser.add_argument(\n '--options', nargs='+', action=MultipleKVAction, help='custom options')\n parser.add_argument(\n '--launcher',\n choices=['none', 'pytorch', 'slurm', 'mpi'],\n default='none',\n help='job launcher')\n parser.add_argument('--local_rank', type=int, default=0)\n args = parser.parse_args()\n if 'LOCAL_RANK' not in os.environ:\n os.environ['LOCAL_RANK'] = str(args.local_rank)\n return args\n\n\ndef main():\n args = parse_args()\n\n assert args.out or args.eval or args.show, \\\n ('Please specify at least one operation (save or eval or show the '\n 'results) with the argument \"--out\", \"--eval\" or \"--show\"')\n\n if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):\n raise ValueError('The output file must be a pkl file.')\n\n cfg = mmcv.Config.fromfile(args.config)\n # set cudnn_benchmark\n if cfg.get('cudnn_benchmark', False):\n torch.backends.cudnn.benchmark = True\n cfg.model.pretrained = None\n cfg.data.test.test_mode = True\n\n # init distributed env first, since logger depends on the dist info.\n if args.launcher == 'none':\n distributed = False\n else:\n distributed = True\n init_dist(args.launcher, **cfg.dist_params)\n\n # build the dataloader\n # TODO: support multiple images per gpu (only minor changes are needed)\n dataset = build_dataset(cfg.data.test)\n data_loader = build_dataloader(\n dataset,\n imgs_per_gpu=1,\n workers_per_gpu=cfg.data.workers_per_gpu,\n dist=distributed,\n shuffle=False)\n\n # build the model and load checkpoint\n model = build_detector(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)\n fp16_cfg = cfg.get('fp16', None)\n if fp16_cfg is not None:\n wrap_fp16_model(model)\n checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu')\n # old versions did not save class info in checkpoints, this walkaround is\n # for backward compatibility\n if 'CLASSES' in checkpoint['meta']:\n model.CLASSES = checkpoint['meta']['CLASSES']\n else:\n model.CLASSES = dataset.CLASSES\n\n if not distributed:\n model = MMDataParallel(model, device_ids=[0])\n outputs = single_gpu_test(model, data_loader, args.show)\n else:\n model = MMDistributedDataParallel(\n model.cuda(),\n device_ids=[torch.cuda.current_device()],\n broadcast_buffers=False)\n outputs = multi_gpu_test(model, data_loader, args.tmpdir,\n args.gpu_collect)\n\n rank, _ = get_dist_info()\n if rank == 0:\n if args.out:\n print('\\nwriting results to {}'.format(args.out))\n mmcv.dump(outputs, args.out)\n if args.eval:\n kwargs = {} if args.options is None else args.options\n dataset.evaluate(outputs, args.eval, **kwargs)\n\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"torch.distributed.all_gather",
"torch.distributed.broadcast",
"torch.no_grad",
"torch.tensor",
"torch.full",
"torch.cuda.current_device",
"torch.distributed.barrier",
"torch.zeros"
]
] |
kokbent/covid-chicago | [
"a1530d25508453f84db9d61437c61ffd573901af"
] | [
"plotters/Ki_plotter.py"
] | [
"import argparse\nimport os\nimport pandas as pd\nimport matplotlib as mpl\nmpl.use('Agg')\nimport matplotlib.pyplot as plt\nimport sys\nsys.path.append('../')\nfrom load_paths import load_box_paths\nimport matplotlib.dates as mdates\nimport seaborn as sns\nfrom processing_helpers import *\n\nmpl.rcParams['pdf.fonttype'] = 42\n\ndef parse_args():\n description = \"Simulation run for modeling Covid-19\"\n parser = argparse.ArgumentParser(description=description)\n\n parser.add_argument(\n \"-stem\",\n \"--stem\",\n type=str,\n help=\"Name of simulation experiment\",\n default=\"20201202_IL_mr_v0_testrun\"\n )\n parser.add_argument(\n \"-loc\",\n \"--Location\",\n type=str,\n help=\"Local or NUCLUSTER\",\n default=\"Local\"\n )\n parser.add_argument(\n \"-l\",\n \"--labels\",\n type=str,\n nargs='+',\n help=\"Experiment labels, if not specified will be extracted from exp_names\",\n default=None\n )\n return parser.parse_args()\n\n\ndef plot_Ki(exp_name,first_day,last_day):\n\n base_list = ['time', 'startdate', 'scen_num', 'sample_num', 'run_num']\n \"\"\"Get group names\"\"\"\n grp_list, grp_suffix, grp_numbers = get_group_names(exp_path=sim_output_path)\n grp_list = [grp for grp in grp_list if grp !=\"All\"]\n\n fig = plt.figure(figsize=(16, 8))\n fig.subplots_adjust(right=0.97, left=0.05, hspace=0.4, wspace=0.2, top=0.95, bottom=0.05)\n palette = sns.color_palette('Set1', 12)\n\n for c, grp in enumerate(grp_list):\n column_list = base_list + [f'Ki_t_{grp}']\n df = load_sim_data(exp_name, region_suffix=f'_{grp}', column_list=column_list)\n df = df[df['date'].between(pd.Timestamp(first_day), pd.Timestamp(last_day))]\n\n\n ax = fig.add_subplot(3, 4, c + 1)\n mdf = df.groupby('date')[f'Ki_t'].agg([CI_50, CI_2pt5, CI_97pt5, CI_25, CI_75]).reset_index()\n\n ax.set_title(grp.replace('_EMS-', 'COVID-19 Region '))\n ax.plot(mdf['date'], mdf['CI_50'], color=palette[0])\n ax.fill_between(mdf['date'].values, mdf['CI_2pt5'], mdf['CI_97pt5'], color=palette[0], linewidth=0, alpha=0.2)\n ax.xaxis.set_major_formatter(mdates.DateFormatter('%b\\n%Y'))\n\n plotname = f'Ki_by_covidregion'\n plt.suptitle('Time varying transmission rate (Ki_t)', x=0.5, y=0.999, fontsize=14)\n plt.tight_layout()\n\n plt.savefig(os.path.join(plot_path, plotname + '.png'))\n plt.savefig(os.path.join(plot_path, 'pdf', plotname + '.pdf'), format='PDF')\n\n\ndef plot_Ki_compare(exp_names, labels, first_day, last_day):\n sim_output_path = os.path.join(wdir, 'simulation_output', exp_names[-1])\n plot_path = os.path.join(sim_output_path, '_plots')\n\n base_list = ['time', 'startdate', 'scen_num', 'sample_num', 'run_num']\n \"\"\"Get group names\"\"\"\n grp_list, grp_suffix, grp_numbers = get_group_names(exp_path=sim_output_path)\n grp_list = [grp for grp in grp_list if grp != \"All\"]\n\n fig = plt.figure(figsize=(16, 8))\n fig.subplots_adjust(right=0.97, left=0.05, hspace=0.4, wspace=0.2, top=0.95, bottom=0.05)\n axes = [fig.add_subplot(3, 4, x + 1) for x in range(len(grp_list))]\n\n palette = sns.color_palette('Set1', 12)\n\n\n for e, exp_name in enumerate(exp_names):\n\n for c, grp in enumerate(grp_list):\n column_list = base_list +[ f'Ki_t_{grp}']\n df = load_sim_data(exp_name, region_suffix=f'_{grp}', column_list=column_list)\n df = df[df['date'].between(pd.Timestamp(first_day), pd.Timestamp(last_day))]\n\n ax = axes[c]\n mdf = df.groupby('date')[f'Ki_t_{grp}'].agg([CI_50, CI_2pt5, CI_97pt5, CI_25, CI_75]).reset_index()\n\n ax.set_title(grp.replace('_EMS-', 'COVID-19 Region '))\n ax.plot(mdf['date'], mdf['CI_50'], color=palette[e], label=exp_name)\n ax.fill_between(mdf['date'].values, mdf['CI_2pt5'], mdf['CI_97pt5'], color=palette[e], linewidth=0,\n alpha=0.2)\n ax.xaxis.set_major_formatter(mdates.DateFormatter('%b\\n%Y'))\n\n axes[-1].legend()\n plotname = f'Ki_by_covidregion_compare'\n plt.suptitle('Time varying transmission rate (Ki_t)', x=0.5, y=0.999, fontsize=14)\n plt.tight_layout()\n\n plt.savefig(os.path.join(plot_path, plotname + '.png'))\n plt.savefig(os.path.join(plot_path, 'pdf', plotname + '.pdf'), format='PDF')\n\n\nif __name__ == '__main__':\n\n args = parse_args()\n stem = args.stem\n Location = args.Location\n\n first_plot_day = pd.Timestamp('2020-12-01') #pd.Timestamp.today()- pd.Timedelta(60,'days')\n last_plot_day = pd.Timestamp.today()+ pd.Timedelta(90,'days')\n\n datapath, projectpath, wdir, exe_dir, git_dir = load_box_paths(Location=Location)\n\n\n exp_names = [x for x in os.listdir(os.path.join(wdir, 'simulation_output')) if stem in x]\n for exp_name in exp_names:\n sim_output_path = os.path.join(wdir, 'simulation_output', exp_name)\n plot_path = os.path.join(sim_output_path, '_plots')\n\n plot_Ki(exp_name,first_day=first_plot_day, last_day=last_plot_day)\n if len(exp_names) > 1:\n labels = args.labels\n if labels == None:\n labels = [''.join(exp.split(\"_\")[-3:]) for exp in exp_names]\n plot_Ki_compare(exp_names, labels, first_day=first_plot_day, last_day=last_plot_day)\n"
] | [
[
"matplotlib.pyplot.figure",
"matplotlib.pyplot.tight_layout",
"matplotlib.dates.DateFormatter",
"pandas.Timestamp.today",
"pandas.Timedelta",
"matplotlib.pyplot.suptitle",
"matplotlib.use",
"pandas.Timestamp"
]
] |
arokem/nipy | [
"d6b2e862c65558bb5747c36140fd6261a7e1ecfe"
] | [
"nipy/modalities/fmri/tests/test_hrf.py"
] | [
"\"\"\" Testing hrf module\n\"\"\"\n\nfrom os.path import dirname, join as pjoin\n\nimport numpy as np\n\nfrom scipy.stats import gamma\nimport scipy.io as sio\n\nfrom ..hrf import (\n gamma_params,\n gamma_expr,\n lambdify_t,\n spm_hrf_compat,\n spmt,\n dspmt,\n ddspmt,\n )\n\nfrom nose.tools import assert_raises\nfrom numpy.testing import assert_almost_equal\n\n\ndef test_gamma():\n t = np.linspace(0, 30, 5000)\n # make up some numbers\n pk_t = 5.0\n fwhm = 6.0\n # get the estimated parameters\n shape, scale, coef = gamma_params(pk_t, fwhm)\n # get distribution function\n g_exp = gamma_expr(pk_t, fwhm)\n # make matching standard distribution\n gf = gamma(shape, scale=scale).pdf\n # get values\n L1t = gf(t)\n L2t = lambdify_t(g_exp)(t)\n # they are the same bar a scaling factor\n nz = np.abs(L1t) > 1e-15\n sf = np.mean(L1t[nz] / L2t[nz])\n assert_almost_equal(L1t , L2t*sf)\n\n\ndef test_spm_hrf():\n # Regression tests for spm hrf, time derivative and dispersion derivative\n # Check that absolute values don't change (much) with different dt, and that\n # max values are roughly the same and in the same place in time\n for dt in 0.1, 0.01, 0.001:\n t_vec = np.arange(0, 32, dt)\n hrf = spmt(t_vec)\n assert_almost_equal(np.max(hrf), 0.21053, 5)\n assert_almost_equal(t_vec[np.argmax(hrf)], 5, 2)\n dhrf = dspmt(t_vec)\n assert_almost_equal(np.max(dhrf), 0.08, 3)\n assert_almost_equal(t_vec[np.argmax(dhrf)], 3.3, 1)\n dhrf = ddspmt(t_vec)\n assert_almost_equal(np.max(dhrf), 0.10, 2)\n assert_almost_equal(t_vec[np.argmax(dhrf)], 5.7, 1)\n # Test reversed time vector to check that order of time values does not\n # affect result\n rt_vec = np.arange(0, 32, 0.01)\n rhrf = spmt(rt_vec)\n assert_almost_equal(np.max(rhrf), 0.21053, 5)\n assert_almost_equal(t_vec[np.argmax(hrf)], 5, 2)\n\n\ndef test_spm_hrf_octave():\n # Test SPM hrf against output from SPM code running in Octave\n my_path = dirname(__file__)\n hrfs_path = pjoin(my_path, 'spm_hrfs.mat')\n # mat file resulting from make_hrfs.m\n hrfs_mat = sio.loadmat(hrfs_path, squeeze_me=True)\n params = hrfs_mat['params']\n hrfs = hrfs_mat['hrfs']\n for i, pvec in enumerate(params):\n dt, ppk, upk, pdsp, udsp, rat = pvec\n t_vec = np.arange(0, 32.1, dt)\n our_hrf = spm_hrf_compat(t_vec,\n peak_delay=ppk,\n peak_disp=pdsp,\n under_delay=upk,\n under_disp=udsp,\n p_u_ratio=rat)\n # Normalize integral to match SPM\n assert_almost_equal(our_hrf, hrfs[i])\n # Test basis functions\n # mat file resulting from get_td_dd.m\n bases_path = pjoin(my_path, 'spm_bases.mat')\n bases_mat = sio.loadmat(bases_path, squeeze_me=True)\n dt = bases_mat['dt']\n t_vec = np.arange(0, 32 + dt, dt)\n # SPM function divides by sum of values - revert with dt\n assert_almost_equal(spmt(t_vec), bases_mat['hrf'] / dt, 4)\n assert_almost_equal(dspmt(t_vec), bases_mat['dhrf'] / dt, 4)\n assert_almost_equal(ddspmt(t_vec), bases_mat['ddhrf'] / dt, 4)\n\n\ndef test_spm_hrf_errors():\n t_vec = np.arange(0, 32)\n # All 1s is fine\n res = spm_hrf_compat(t_vec, 1, 1, 1, 1)\n # 0 or negative raise error for other args\n args = [0]\n for i in range(4):\n assert_raises(ValueError, spm_hrf_compat, t_vec, *args)\n args[-1] = -1\n assert_raises(ValueError, spm_hrf_compat, t_vec, *args)\n args[-1] = 1\n args.append(0)\n"
] | [
[
"numpy.testing.assert_almost_equal",
"scipy.io.loadmat",
"numpy.abs",
"numpy.argmax",
"numpy.arange",
"numpy.max",
"scipy.stats.gamma",
"numpy.linspace",
"numpy.mean"
]
] |
hirune924/ml-tools | [
"1a4e3d205b6eeef7bef7ef2f205ac8087ffa7f69"
] | [
"src/model/train.py"
] | [
"# import comet_ml in the top of your file\nfrom comet_ml import Experiment\n \n# Add the following code anywhere in your machine learning file\nexperiment = Experiment(api_key=\"QCxbRVX2qhQj1t0ajIZl2nk2c\",\n project_name=\"ml-tools\", workspace=\"hirune924\",\n auto_param_logging=False)\n\nfrom omegaconf import DictConfig, OmegaConf\nimport hydra\nfrom hydra import utils\nimport sys\nimport os\nsys.path.append(os.path.join(os.path.dirname(__file__), '../..'))\nimport numpy as np\nimport pandas as pd\n\nfrom sklearn.model_selection import StratifiedKFold\n\nfrom speeder.model.trainer import Trainer\nfrom speeder.feature.feature_manager import load_features, load_feature \nfrom speeder.utils import flatten_dict_cfg\[email protected](config_path=\"../config/modeling_lgbm.yaml\", strict=False)\ndef main(cfg: DictConfig) -> None:\n print(cfg.pretty())\n experiment_name = '/'.join(os.getcwd().split('/')[-2:])\n experiment.set_name(experiment_name)\n experiment.log_parameters(flatten_dict_cfg(cfg, sep='/'))\n # Define CV\n cv = StratifiedKFold(n_splits=5, shuffle=True, random_state=2020)\n\n # Load Features\n feature_list = cfg['feature']\n feature_names = [f + '_train.ftr' for f in feature_list]\n train_df = load_features(feature_names, dir=utils.to_absolute_path('features'), ignore_columns = None)\n feature_names = [f + '_test.ftr' for f in feature_list]\n test_df = load_features(feature_names, dir=utils.to_absolute_path('features'), ignore_columns = None)\n\n target_df = load_feature('_train.ftr', dir=utils.to_absolute_path('features'), ignore_columns = None)[['Survived']]\n sub_df = load_feature('_test.ftr', dir=utils.to_absolute_path('features'), ignore_columns = None)[['PassengerId']]\n\n #print(train_df.head())\n #print(test_df.head())\n #print(target_df.head())\n\n trainer = Trainer(configs=cfg, X_train=train_df, y_train=target_df, X_test=test_df, cv=cv, experiment=experiment)\n trainer.run_train_cv()\n trainer.run_predict_cv()\n trainer.submission(sub_df)\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"sklearn.model_selection.StratifiedKFold"
]
] |
enternityFan/FakeNewsProject | [
"b3bd18c2ae715ea2ce5e8cfdacb04e813b0b095b"
] | [
"test_ch.py"
] | [
"# @Time : 2022-02-22 18:56\n# @Author : Phalange\n# @File : test_ch.py.py\n# @Software: PyCharm\n# C'est la vie,enjoy it! :D\n\nimport os\nimport pandas as pd\nimport torch\nfrom d2l import torch as d2l\nimport DataProcess\nimport Module.AttentionModel\nimport Module.evalScript\nfrom torch import nn\nimport numpy as np\nimport jieba_fast as jieba\nfrom tqdm import *\nimport pickle\n\nweight_path = \"./Cache/epoch_20_ch.pth\"\nglove_file =\"./Data/wiki.zh.glove.Mode\"\ntrain_vocab_path = \"./Data/train_vocab.pkl\"\ntest_vocab_path = \"./Data/test_vocab.pkl\"\n\nlabel_set = {'disagreed': 0, 'agreed': 1, 'unrelated': 2}\nlabel_list = ['disagreed','agreed','unrelated']\n\nif not os.path.exists(weight_path):\n print(\"请检查权重路径是否正确!\")\n raise FileNotFoundError\n\n\ntest_data = pd.read_csv(\"./Data/test.csv\")\ntest_data = test_data.iloc[:,[0,3,4]]# id 前提 假设\ntest_data = list(test_data.values)\n\ndef preprocess(features):\n \"\"\"\n 传入的应该是一个[data.iloc[:,0] , data.iloc[:,1],data.iloc[:,2]]列表\n 返回一个三个列表组成的元组:(id,premises,hypotheses)\n \"\"\"\n # 去掉字符串\n premises,hypotheses = Replace_ch(features, ' ')\n id = [int(line.tolist()[0]) for line in features]\n\n\n return id,premises, hypotheses\n\ndef Replace_ch(text,new): #替换列表的字符串\n premises,hypotheses = [],[]\n sign = \"\\xa0!?。"#$%&'()*+,-/:;<=>@[\]^_`{|}~⦅⦆「」、、〃》「」『』【】〔〕〖〗〘〙〚〛〜〝〞〟〰〾〿–—‘’‛“”„‟…‧﹏.\"\n for line in text:\n line1,line2 = line[1],line[2]\n for ch in sign:\n line1 = str(line1).replace(ch,new)\n line2 = str(line2).replace(ch,new)\n premises.append(line1)\n hypotheses.append(line2)\n return premises,hypotheses\n\ntest_data = list(preprocess(test_data))\npredict_label = []\n\n# 读取事先保存的vocab\nvocab = DataProcess.Vocab()\nwith open(train_vocab_path,'rb') as f:\n vocab = pickle.loads(f.read())\nprint(\"读取vocab成功\")\n\n#vocab = DataProcess.Vocab(DataProcess.tokenize(test_data[1])+DataProcess.tokenize(test_data[2]),min_freq=5, reserved_tokens=['<pad>'])\n#print(\"vocab makes success!\")\n\nembed_size, num_hiddens, devices = 300, 200, d2l.try_all_gpus()\nnet = Module.AttentionModel.DecomposableAttention(vocab, embed_size, num_hiddens)\n\n\n\nnet.load_state_dict(torch.load(weight_path))\n#下面这个glove层应该就不用加载了,因为保存的时候就是有的。\nglove_embedding =DataProcess.Embedding(glove_file)\nembeds = glove_embedding[vocab.idx_to_token]\nnet.embedding.weight.data.copy_(embeds)\nnet.to(device=devices[0])\nprint(\"模型加载成功!!准备预测。。。\")\nnet.eval()\nsave_data = []\nfor i in tqdm(range(len(test_data[0]))):\n label = Module.evalScript.predict_fake_news(net, vocab, jieba.lcut(test_data[1][i]), jieba.lcut(test_data[2][i]))\n\n save_data.append([test_data[0][i],label])\n\nprint(predict_label[:5])\n\n# 保存submission.csv\nprint(\"saving data....\")\ndf = pd.DataFrame(save_data,columns=[\"Id\",\"Category\"])\ndf.to_csv(\"./Data/submission_ch.csv\",index=False)\nprint(\"data saving success!!\")\n\n"
] | [
[
"pandas.read_csv",
"pandas.DataFrame",
"torch.load"
]
] |
s4f-leipzig/Droughtstripes | [
"938cff9fa454dbf84c122ffcc38e07127f70778f"
] | [
"droughtstripes_monthly_python3.py"
] | [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed May 4 13:03:43 2020\n\n@author: Scientists4Future Leipzig\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport urllib \nfrom matplotlib.colors import LinearSegmentedColormap\n\n\n## Manual entries required: \n# Enter number of month to plot - add '0' before months with one digit\nmonth = \"04\"\n# Enter your local path and filename where to save the raw data\nlocal =\"regional_averages_rr_\"+month+\".txt\"\n\n\n## Definition of manual colormap:\n# create colormap\ndef custom_div_cmap(numcolors=256, name='custom_div_cmap',colors=['saddlebrown','chocolate','white','darkturquoise','darkcyan']):\n \"\"\" Create a custom colormap\n \tColors can be specified in any way understandable by matplotlib.colors.ColorConverter.to_rgb() \n \t-> https://matplotlib.org/3.1.0/gallery/color/named_colors.html\n \"\"\"\n cmap = LinearSegmentedColormap.from_list(name=name, colors=colors, N=numcolors)\n return cmap\n\n\n### RETRIEVE DATA FROM DWD \n#link to DWD server\nlink = \"https://opendata.dwd.de/climate_environment/CDC/regional_averages_DE/monthly/precipitation/regional_averages_rr_\"+month+\".txt\"\n#retrieve data and cleanup !for Python2.7 comment out lines 39, 40 and uncomment lines 41, 42\nprecip_raw = urllib.request.urlretrieve(link, local)\nurllib.request.urlcleanup()\n#precip_raw = urllib.urlretrieve(link, local)\n#urllib.urlcleanup()\n#read in the data as pandas table\ndata = pd.read_table(precip_raw[0],skiprows=1, sep=';')\n\n\n#### SOME SPECIFICATIONS BEFORE PLOTTING\n# reference period for mean calculation 1971 - 2000 according to warming stripes by Ed Hawkins (see https://showyourstripes.info/faq)\nref_min = 1971\nref_max = 2000\n\n#select the data during the refence period\nref_data = data[(data['Jahr']>=ref_min) & (data['Jahr']<=ref_max)]\n\n#reference period for the standard deviation, also ccording to the original warming stripes\nref_min_std = 1901\nref_max_std = 2000\n\n#select the data during the std ref period\nref_data_std = data[(data['Jahr']>=ref_min_std) & (data['Jahr']<=ref_max_std)]\n\n# a dictionary for the quick selection of a federal state or whole Germany by number\nregio = {1:'Sachsen',2:'Deutschland',3:'Brandenburg/Berlin', 4:'Brandenburg',\n 5:'Baden-Wuerttemberg', 6:'Bayern', 7:'Hessen', 8:'Mecklenburg-Vorpommern',\n 9:'Niedersachsen', 10:'Niedersachsen/Hamburg/Bremen',\n 11:'Nordrhein-Westfalen', 12:'Rheinland-Pfalz', 13:'Schleswig-Holstein',\n 14:'Saarland', 15:'Sachsen-Anhalt',\n 16:'Thueringen/Sachsen-Anhalt', 17:'Thueringen'}\n\n### PLOTTING OF DROUGHTSTRIPES\n#select the federal state you want to plot, numbers according to dictionary above, here: Sachsen, Deutschland\nregio_lst=[1,2]\n#loop through selected states and create a plot for each\nfor reg in regio_lst: \n region = regio[reg] \n # calculate the standard deviation for the period definded above\n std = ref_data_std[region].std()\n #select temperature in the region\n temps_region = data[region]\n \n # calculate the precipitation anomaly i.e. deviation from defined mean of ref period\n temps = temps_region - ref_data[region].mean()\n ## stack data to be able to plot them with imshow\n stacked_temps = np.stack((temps, temps))\n \n #min and max values for the colormap !this value deviates from the warming stripes where a standard deviation of +/-2.6 was chosen\n vmin = -1.7*std \n vmax = 1.7*std \n \n ## plotting\n fig = plt.figure(figsize=(16,9)) #adjust figsize, for example for cubic figure\n #plot the image, with manual color bar defined above in custom_div_cmap function\n cmap = custom_div_cmap()\n img = plt.imshow(stacked_temps, cmap=cmap, aspect='auto', vmin=vmin, vmax=vmax, interpolation='none')\n #this just turns all labels, axis etc off so that there are only the stripes\n plt.gca().set_axis_off()\n plt.subplots_adjust(top = 1, bottom = 0, right = 1, left = 0, hspace = 0, wspace = 0)\n plt.margins(0,0)\n plt.gca().xaxis.set_major_locator(plt.NullLocator())\n plt.gca().yaxis.set_major_locator(plt.NullLocator())\n #save in your desired directory\n plt.savefig(\"stripes_\"+temps.name+'_'+month+'_'+str(data['Jahr'].min())+'-'+str(data['Jahr'].max())+\".jpg\", bbox_inches = 'tight', pad_inches = 0, dpi=300)\n"
] | [
[
"pandas.read_table",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.gca",
"matplotlib.colors.LinearSegmentedColormap.from_list",
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.subplots_adjust",
"matplotlib.pyplot.margins",
"matplotlib.pyplot.NullLocator",
"numpy.stack"
]
] |
Anhmike/NVTabular | [
"e1f0d6edc9e99fea087944a06891564086e26ecd"
] | [
"nvtabular/inference/triton/ensemble.py"
] | [
"# Copyright (c) 2021, NVIDIA CORPORATION.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport json\nimport os\nimport warnings\nfrom shutil import copyfile, copytree\n\nimport numpy as np\n\nfrom nvtabular import ColumnSelector\n\n# this needs to be before any modules that import protobuf\nos.environ[\"PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION\"] = \"python\"\n\nfrom google.protobuf import text_format # noqa\n\nimport nvtabular.inference.triton.model_config_pb2 as model_config # noqa\nfrom merlin.schema import Tags # noqa\nfrom nvtabular.dispatch import is_string_dtype # noqa\n\n\ndef export_tensorflow_ensemble(\n model,\n workflow,\n name,\n model_path,\n label_columns=None,\n sparse_max=None,\n version=1,\n nvtabular_backend=\"nvtabular\",\n cats=None,\n conts=None,\n):\n \"\"\"Creates an ensemble triton server model, with the first model being a nvtabular\n preprocessing, and the second by a tensorflow savedmodel\n\n Parameters\n ----------\n model:\n The tensorflow model that should be served\n workflow:\n The nvtabular workflow used in preprocessing\n name:\n The base name of the various triton models\n model_path:\n The root path to write out files to\n cats:\n Names of the categorical columns\n conts:\n Names of the continuous columns\n label_columns:\n Labels in the dataset (will be removed from the dataset)\n sparse_max:\n Max length of the each row when the sparse data is converted to dense\n version:\n Version of the model\n nvtabular_backend: \"python\" or \"nvtabular\"\n The backend that will be used for inference in Triton.\n \"\"\"\n labels = (\n label_columns\n or workflow.output_schema.apply(ColumnSelector(tags=[Tags.TARGET])).column_names\n )\n workflow = workflow.remove_inputs(labels)\n\n # generate the TF saved model\n tf_path = os.path.join(model_path, name + \"_tf\")\n tf_config = export_tensorflow_model(model, name + \"_tf\", tf_path, version=version)\n\n # override the output dtype of the nvtabular model if necessary (fixes mismatches\n # in dtypes between tf inputs and nvt outputs)\n for column in tf_config.input:\n tf_dtype = _triton_datatype_to_dtype(column.data_type)\n nvt_col_name = column.name.replace(\"__values\", \"\").replace(\"__nnzs\", \"\")\n col_schema = workflow.output_schema[nvt_col_name]\n if col_schema.dtype and col_schema.dtype != tf_dtype:\n warnings.warn(\n f\"TF model expects {tf_dtype} for column {col_schema.name}, but workflow \"\n f\" is producing type {col_schema.dtype}. Overriding dtype in NVTabular workflow.\"\n )\n workflow.output_schema.column_schemas[col_schema.name] = col_schema.with_dtype(tf_dtype)\n\n # generate the nvtabular triton model\n preprocessing_path = os.path.join(model_path, name + \"_nvt\")\n nvt_config = generate_nvtabular_model(\n workflow,\n name + \"_nvt\",\n preprocessing_path,\n sparse_max=sparse_max,\n backend=nvtabular_backend,\n cats=cats,\n conts=conts,\n )\n\n # generate the triton ensemble\n ensemble_path = os.path.join(model_path, name)\n os.makedirs(ensemble_path, exist_ok=True)\n os.makedirs(os.path.join(ensemble_path, str(version)), exist_ok=True)\n _generate_ensemble_config(name, ensemble_path, nvt_config, tf_config)\n\n\ndef export_pytorch_ensemble(\n model,\n workflow,\n sparse_max,\n name,\n model_path,\n label_columns=None,\n use_fix_dtypes=True,\n version=1,\n nvtabular_backend=\"python\",\n cats=None,\n conts=None,\n):\n \"\"\"Creates an ensemble triton server model, with the first model being a nvtabular\n preprocessing, and the second by a pytorch savedmodel\n\n Parameters\n ----------\n model:\n The pytorch model that should be served\n workflow:\n The nvtabular workflow used in preprocessing\n sparse_max:\n Max length of the each row when the sparse data is converted to dense\n name:\n The base name of the various triton models\n model_path:\n The root path to write out files to\n cats:\n Names of the categorical columns\n conts:\n Names of the continuous columns\n label_columns:\n Labels in the dataset (will be removed from the dataset)\n use_fix_dtypes:\n Transformers4Rec is using fixed dtypes and this option is\n whether to use fixed dtypes in inference or not\n version:\n Version of the model\n nvtabular_backend: \"python\" or \"nvtabular\"\n The backend that will be used for inference in Triton.\n \"\"\"\n labels = (\n label_columns\n or workflow.output_schema.apply(ColumnSelector(tags=[Tags.TARGET])).column_names\n )\n workflow = workflow.remove_inputs(labels)\n\n # generate the TF saved model\n pt_path = os.path.join(model_path, name + \"_pt\")\n pt_config = export_pytorch_model(\n model, workflow, sparse_max, name + \"_pt\", pt_path, use_fix_dtypes, version=version\n )\n\n # override the output dtype of the nvtabular model if necessary (fixes mismatches\n # in dtypes between tf inputs and nvt outputs)\n for column in pt_config.input:\n pt_dtype = _triton_datatype_to_dtype(column.data_type)\n nvt_dtype = workflow.output_dtypes.get(column.name)\n if nvt_dtype and nvt_dtype != pt_dtype:\n warnings.warn(\n f\"PyTorch model expects {pt_dtype} for column {column.name}, but workflow \"\n f\" is producing type {nvt_dtype}. Overriding dtype in NVTabular workflow.\"\n )\n workflow.output_dtypes[column.name] = pt_dtype\n\n # generate the nvtabular triton model\n preprocessing_path = os.path.join(model_path, name + \"_nvt\")\n nvt_config = generate_nvtabular_model(\n workflow,\n name + \"_nvt\",\n preprocessing_path,\n backend=nvtabular_backend,\n cats=cats,\n conts=conts,\n )\n\n # generate the triton ensemble\n ensemble_path = os.path.join(model_path, name)\n os.makedirs(ensemble_path, exist_ok=True)\n os.makedirs(os.path.join(ensemble_path, str(version)), exist_ok=True)\n _generate_ensemble_config(name, ensemble_path, nvt_config, pt_config)\n\n\ndef export_hugectr_ensemble(\n workflow,\n hugectr_model_path,\n hugectr_params,\n name,\n output_path,\n version=1,\n max_batch_size=None,\n nvtabular_backend=\"python\",\n cats=None,\n conts=None,\n label_columns=None,\n):\n \"\"\"Creates an ensemble hugectr server model, with the first model being a nvtabular\n preprocessing, and the second by a hugectr savedmodel\n\n Parameters\n ----------\n workflow:\n The nvtabular workflow used in preprocessing\n hugectr_model_path:\n The path of the trained model files\n hugectr_params:\n HugeCTR specific parameters\n name:\n The base name of the various triton models\n output_path:\n The path where the models will be served\n version:\n The version of the model\n max_batch_size:\n Max batch size that Triton can receive\n nvtabular_backend: \"python\" or \"nvtabular\"\n The backend that will be used for inference in Triton.\n cats:\n Names of the categorical columns\n conts:\n Names of the continuous columns\n label_columns:\n Labels in the dataset (will be removed from the dataset)\n \"\"\"\n cats = cats or workflow.output_schema.apply(ColumnSelector(tags=[Tags.CATEGORICAL]))\n conts = conts or workflow.output_schema.apply(ColumnSelector(tags=[Tags.CONTINUOUS]))\n labels = label_columns or workflow.output_schema.apply(ColumnSelector(tags=[Tags.TARGET]))\n\n if not cats and not conts:\n raise ValueError(\"Either cats or conts has to have a value.\")\n\n workflow = workflow.remove_inputs(labels)\n\n # generate the nvtabular triton model\n preprocessing_path = os.path.join(output_path, name + \"_nvt\")\n nvt_config = generate_nvtabular_model(\n workflow=workflow,\n name=name + \"_nvt\",\n output_path=preprocessing_path,\n version=version,\n output_model=\"hugectr\",\n max_batch_size=max_batch_size,\n backend=nvtabular_backend,\n cats=cats,\n conts=conts,\n )\n\n hugectr_params[\"label_dim\"] = len(labels)\n if conts is None:\n hugectr_params[\"des_feature_num\"] = 0\n else:\n hugectr_params[\"des_feature_num\"] = len(conts)\n\n if cats is None:\n hugectr_params[\"cat_feature_num\"] = 0\n else:\n hugectr_params[\"cat_feature_num\"] = len(cats)\n\n # generate the HugeCTR saved model\n hugectr_config = generate_hugectr_model(\n trained_model_path=hugectr_model_path,\n hugectr_params=hugectr_params,\n name=name,\n output_path=output_path,\n version=version,\n max_batch_size=max_batch_size,\n )\n\n # generate the triton ensemble\n ensemble_path = os.path.join(output_path, name + \"_ens\")\n os.makedirs(ensemble_path, exist_ok=True)\n os.makedirs(os.path.join(ensemble_path, str(version)), exist_ok=True)\n _generate_ensemble_config(name, ensemble_path, nvt_config, hugectr_config, \"_ens\")\n\n\ndef _generate_ensemble_config(name, output_path, nvt_config, nn_config, name_ext=\"\"):\n config = model_config.ModelConfig(\n name=name + name_ext, platform=\"ensemble\", max_batch_size=nvt_config.max_batch_size\n )\n config.input.extend(nvt_config.input)\n config.output.extend(nn_config.output)\n\n nn_input_cols = set(col.name for col in nn_config.input)\n\n nvt_step = model_config.ModelEnsembling.Step(model_name=nvt_config.name, model_version=-1)\n for input_col in nvt_config.input:\n nvt_step.input_map[input_col.name] = input_col.name\n for output_col in nvt_config.output:\n if output_col.name not in nn_input_cols:\n warnings.warn(\n f\"Column {output_col.name} is being generated by NVTabular workflow \"\n f\" but is unused in {nn_config.name} model\"\n )\n continue\n nvt_step.output_map[output_col.name] = output_col.name + \"_nvt\"\n\n tf_step = model_config.ModelEnsembling.Step(model_name=nn_config.name, model_version=-1)\n for input_col in nn_config.input:\n tf_step.input_map[input_col.name] = input_col.name + \"_nvt\"\n for output_col in nn_config.output:\n tf_step.output_map[output_col.name] = output_col.name\n\n config.ensemble_scheduling.step.append(nvt_step)\n config.ensemble_scheduling.step.append(tf_step)\n\n with open(os.path.join(output_path, \"config.pbtxt\"), \"w\") as o:\n text_format.PrintMessage(config, o)\n return config\n\n\ndef generate_nvtabular_model(\n workflow,\n name,\n output_path,\n version=1,\n output_model=None,\n max_batch_size=None,\n sparse_max=None,\n backend=\"python\",\n cats=None,\n conts=None,\n):\n \"\"\"converts a workflow to a triton mode\n Parameters\n ----------\n sparse_max:\n Max length of the each row when the sparse data is converted to dense\n cats:\n Names of the categorical columns\n conts:\n Names of the continuous columns\n \"\"\"\n workflow.save(os.path.join(output_path, str(version), \"workflow\"))\n config = _generate_nvtabular_config(\n workflow,\n name,\n output_path,\n output_model,\n max_batch_size,\n sparse_max=sparse_max,\n backend=backend,\n cats=cats,\n conts=conts,\n )\n\n # copy the model file over. note that this isn't necessary with the c++ backend, but\n # does provide us to use the python backend with just changing the 'backend' parameter\n copyfile(\n os.path.join(os.path.dirname(__file__), \"workflow_model.py\"),\n os.path.join(output_path, str(version), \"model.py\"),\n )\n\n return config\n\n\ndef generate_hugectr_model(\n trained_model_path,\n hugectr_params,\n name,\n output_path,\n version=1,\n max_batch_size=None,\n):\n \"\"\"converts a trained HugeCTR model to a triton mode\"\"\"\n\n out_path = os.path.join(output_path, name)\n os.makedirs(os.path.join(output_path, name), exist_ok=True)\n out_path_version = os.path.join(out_path, str(version))\n os.makedirs(out_path_version, exist_ok=True)\n\n config = _generate_hugectr_config(name, out_path, hugectr_params, max_batch_size=max_batch_size)\n copytree(trained_model_path, out_path_version, dirs_exist_ok=True)\n\n return config\n\n\ndef _generate_nvtabular_config(\n workflow,\n name,\n output_path,\n output_model=None,\n max_batch_size=None,\n sparse_max=None,\n backend=\"python\",\n cats=None,\n conts=None,\n):\n \"\"\"given a workflow generates the trton modelconfig proto object describing the inputs\n and outputs to that workflow\"\"\"\n config = model_config.ModelConfig(name=name, backend=backend, max_batch_size=max_batch_size)\n\n config.parameters[\"python_module\"].string_value = \"nvtabular.inference.triton.workflow_model\"\n config.parameters[\"output_model\"].string_value = output_model if output_model else \"\"\n\n config.parameters[\"cats\"].string_value = json.dumps(cats) if cats else \"\"\n config.parameters[\"conts\"].string_value = json.dumps(conts) if conts else \"\"\n\n if sparse_max:\n # this assumes seq_length is same for each list column\n config.parameters[\"sparse_max\"].string_value = json.dumps(sparse_max)\n\n if output_model == \"hugectr\":\n config.instance_group.append(model_config.ModelInstanceGroup(kind=2))\n\n for column in workflow.output_node.input_columns.names:\n dtype = workflow.input_dtypes[column]\n config.input.append(\n model_config.ModelInput(name=column, data_type=_convert_dtype(dtype), dims=[-1])\n )\n\n config.output.append(\n model_config.ModelOutput(name=\"DES\", data_type=model_config.TYPE_FP32, dims=[-1])\n )\n\n config.output.append(\n model_config.ModelOutput(name=\"CATCOLUMN\", data_type=model_config.TYPE_INT64, dims=[-1])\n )\n\n config.output.append(\n model_config.ModelOutput(name=\"ROWINDEX\", data_type=model_config.TYPE_INT32, dims=[-1])\n )\n elif output_model == \"pytorch\":\n for col_name, col_schema in workflow.input_schema.column_schemas.items():\n _add_model_param(col_schema, model_config.ModelInput, config.input)\n\n for col_name, col_schema in workflow.output_schema.column_schemas.items():\n _add_model_param(\n col_schema,\n model_config.ModelOutput,\n config.output,\n [-1, 1],\n )\n else:\n for col_name, col_schema in workflow.input_schema.column_schemas.items():\n _add_model_param(col_schema, model_config.ModelInput, config.input)\n\n for col_name, col_schema in workflow.output_schema.column_schemas.items():\n if sparse_max and col_name in sparse_max.keys():\n # this assumes max_sequence_length is equal for all output columns\n dim = sparse_max[col_name]\n _add_model_param(col_schema, model_config.ModelOutput, config.output, [-1, dim])\n else:\n _add_model_param(col_schema, model_config.ModelOutput, config.output)\n\n with open(os.path.join(output_path, \"config.pbtxt\"), \"w\") as o:\n text_format.PrintMessage(config, o)\n return config\n\n\ndef export_tensorflow_model(model, name, output_path, version=1):\n \"\"\"Exports a TensorFlow model for serving with Triton\n\n Parameters\n ----------\n model:\n The tensorflow model that should be served\n name:\n The name of the triton model to export\n output_path:\n The path to write the exported model to\n \"\"\"\n tf_model_path = os.path.join(output_path, str(version), \"model.savedmodel\")\n model.save(tf_model_path, include_optimizer=False)\n config = model_config.ModelConfig(\n name=name, backend=\"tensorflow\", platform=\"tensorflow_savedmodel\"\n )\n\n inputs, outputs = model.inputs, model.outputs\n\n if not inputs or not outputs:\n signatures = getattr(model, \"signatures\", {}) or {}\n default_signature = signatures.get(\"serving_default\")\n if not default_signature:\n # roundtrip saved model to disk to generate signature if it doesn't exist\n import tensorflow as tf\n\n reloaded = tf.keras.models.load_model(tf_model_path)\n default_signature = reloaded.signatures[\"serving_default\"]\n\n inputs = list(default_signature.structured_input_signature[1].values())\n outputs = list(default_signature.structured_outputs.values())\n\n config.parameters[\"TF_GRAPH_TAG\"].string_value = \"serve\"\n config.parameters[\"TF_SIGNATURE_DEF\"].string_value = \"serving_default\"\n\n for col in inputs:\n config.input.append(\n model_config.ModelInput(\n name=col.name, data_type=_convert_dtype(col.dtype), dims=[-1, col.shape[1]]\n )\n )\n\n for col in outputs:\n # this assumes the list columns are 1D tensors both for cats and conts\n config.output.append(\n model_config.ModelOutput(\n name=col.name.split(\"/\")[0],\n data_type=_convert_dtype(col.dtype),\n dims=[-1, col.shape[1]],\n )\n )\n\n with open(os.path.join(output_path, \"config.pbtxt\"), \"w\") as o:\n text_format.PrintMessage(config, o)\n return config\n\n\ndef export_pytorch_model(\n model, workflow, sparse_max, name, output_path, use_fix_dtypes=True, version=1, backend=\"python\"\n):\n \"\"\"Exports a PyTorch model for serving with Triton\n\n Parameters\n ----------\n model:\n The PyTorch model that should be served\n workflow:\n The nvtabular workflow used in preprocessing\n sparse_max:\n Max length of the each row when the sparse data is converted to dense\n name:\n The name of the triton model to export\n output_path:\n The path to write the exported model to\n use_fix_dtypes:\n Transformers4Rec is using fixed dtypes and this option is\n whether to use fixed dtypes in inference or not\n version:\n Version of the model\n backend: \"python\" or \"nvtabular\"\n The backend that will be used for inference in Triton.\n \"\"\"\n import cloudpickle\n import torch\n\n os.makedirs(os.path.join(output_path, str(version)), exist_ok=True)\n\n pt_model_path = os.path.join(output_path, str(version), \"model.pth\")\n torch.save(model.state_dict(), pt_model_path)\n\n pt_model_path = os.path.join(output_path, str(version), \"model.pkl\")\n with open(pt_model_path, \"wb\") as o:\n cloudpickle.dump(model, o)\n\n copyfile(\n os.path.join(os.path.dirname(__file__), \"model\", \"model_pt.py\"),\n os.path.join(output_path, str(version), \"model.py\"),\n )\n\n config = model_config.ModelConfig(name=name, backend=backend)\n\n for col_name, col_schema in workflow.output_schema.column_schemas.items():\n _add_model_param(col_schema, model_config.ModelInput, config.input)\n\n *_, last_layer = model.parameters()\n dims = last_layer.shape[0]\n dtype = last_layer.dtype\n config.output.append(\n model_config.ModelOutput(\n name=\"output\", data_type=_convert_pytorch_dtype(dtype), dims=[-1, dims]\n )\n )\n\n if sparse_max:\n with open(os.path.join(output_path, str(version), \"model_info.json\"), \"w\") as o:\n model_info = dict()\n model_info[\"sparse_max\"] = sparse_max\n model_info[\"use_fix_dtypes\"] = use_fix_dtypes\n json.dump(model_info, o)\n\n with open(os.path.join(output_path, \"config.pbtxt\"), \"w\") as o:\n text_format.PrintMessage(config, o)\n return config\n\n\ndef _generate_pytorch_config(model, name, output_path, max_batch_size=None):\n \"\"\"given a workflow generates the trton modelconfig proto object describing the inputs\n and outputs to that workflow\"\"\"\n config = model_config.ModelConfig(name=name, backend=\"python\", max_batch_size=max_batch_size)\n\n for col in model.inputs:\n config.input.append(\n model_config.ModelInput(name=col.name, data_type=_convert_dtype(col.dtype), dims=[-1])\n )\n\n for col in model.outputs:\n config.output.append(\n model_config.ModelOutput(\n name=col.name.split(\"/\")[0], data_type=_convert_dtype(col.dtype), dims=[-1]\n )\n )\n\n with open(os.path.join(output_path, \"config.pbtxt\"), \"w\") as o:\n text_format.PrintMessage(config, o)\n return config\n\n\ndef _generate_hugectr_config(name, output_path, hugectr_params, max_batch_size=None):\n config = model_config.ModelConfig(name=name, backend=\"hugectr\", max_batch_size=max_batch_size)\n\n config.input.append(\n model_config.ModelInput(name=\"DES\", data_type=model_config.TYPE_FP32, dims=[-1])\n )\n\n config.input.append(\n model_config.ModelInput(name=\"CATCOLUMN\", data_type=model_config.TYPE_INT64, dims=[-1])\n )\n\n config.input.append(\n model_config.ModelInput(name=\"ROWINDEX\", data_type=model_config.TYPE_INT32, dims=[-1])\n )\n\n for i in range(hugectr_params[\"n_outputs\"]):\n config.output.append(\n model_config.ModelOutput(\n name=\"OUTPUT\" + str(i), data_type=model_config.TYPE_FP32, dims=[-1]\n )\n )\n\n config.instance_group.append(model_config.ModelInstanceGroup(gpus=[0], count=1, kind=1))\n\n config_hugectr = model_config.ModelParameter(string_value=hugectr_params[\"config\"])\n config.parameters[\"config\"].CopyFrom(config_hugectr)\n\n gpucache_val = hugectr_params.get(\"gpucache\", \"true\")\n\n gpucache = model_config.ModelParameter(string_value=gpucache_val)\n config.parameters[\"gpucache\"].CopyFrom(gpucache)\n\n gpucacheper_val = str(hugectr_params.get(\"gpucacheper_val\", \"0.5\"))\n\n gpucacheper = model_config.ModelParameter(string_value=gpucacheper_val)\n config.parameters[\"gpucacheper\"].CopyFrom(gpucacheper)\n\n label_dim = model_config.ModelParameter(string_value=str(hugectr_params[\"label_dim\"]))\n config.parameters[\"label_dim\"].CopyFrom(label_dim)\n\n slots = model_config.ModelParameter(string_value=str(hugectr_params[\"slots\"]))\n config.parameters[\"slots\"].CopyFrom(slots)\n\n des_feature_num = model_config.ModelParameter(\n string_value=str(hugectr_params[\"des_feature_num\"])\n )\n config.parameters[\"des_feature_num\"].CopyFrom(des_feature_num)\n\n cat_feature_num = model_config.ModelParameter(\n string_value=str(hugectr_params[\"cat_feature_num\"])\n )\n config.parameters[\"cat_feature_num\"].CopyFrom(cat_feature_num)\n\n max_nnz = model_config.ModelParameter(string_value=str(hugectr_params[\"max_nnz\"]))\n config.parameters[\"max_nnz\"].CopyFrom(max_nnz)\n\n embedding_vector_size = model_config.ModelParameter(\n string_value=str(hugectr_params[\"embedding_vector_size\"])\n )\n config.parameters[\"embedding_vector_size\"].CopyFrom(embedding_vector_size)\n\n embeddingkey_long_type_val = hugectr_params.get(\"embeddingkey_long_type\", \"true\")\n\n embeddingkey_long_type = model_config.ModelParameter(string_value=embeddingkey_long_type_val)\n config.parameters[\"embeddingkey_long_type\"].CopyFrom(embeddingkey_long_type)\n\n with open(os.path.join(output_path, \"config.pbtxt\"), \"w\") as o:\n text_format.PrintMessage(config, o)\n return config\n\n\ndef _add_model_param(col_schema, paramclass, params, dims=None):\n dims = dims if dims is not None else [-1, 1]\n if col_schema.is_list and col_schema.is_ragged:\n params.append(\n paramclass(\n name=col_schema.name + \"__values\",\n data_type=_convert_dtype(col_schema.dtype),\n dims=dims,\n )\n )\n params.append(\n paramclass(\n name=col_schema.name + \"__nnzs\", data_type=model_config.TYPE_INT64, dims=dims\n )\n )\n else:\n params.append(\n paramclass(name=col_schema.name, data_type=_convert_dtype(col_schema.dtype), dims=dims)\n )\n\n\ndef _convert_dtype(dtype):\n \"\"\"converts a dtype to the appropriate triton proto type\"\"\"\n\n if dtype and not isinstance(dtype, str):\n dtype_name = dtype.name if hasattr(dtype, \"name\") else dtype.__name__\n else:\n dtype_name = dtype\n\n dtypes = {\n \"float64\": model_config.TYPE_FP64,\n \"float32\": model_config.TYPE_FP32,\n \"float16\": model_config.TYPE_FP16,\n \"int64\": model_config.TYPE_INT64,\n \"int32\": model_config.TYPE_INT32,\n \"int16\": model_config.TYPE_INT16,\n \"int8\": model_config.TYPE_INT8,\n \"uint64\": model_config.TYPE_UINT64,\n \"uint32\": model_config.TYPE_UINT32,\n \"uint16\": model_config.TYPE_UINT16,\n \"uint8\": model_config.TYPE_UINT8,\n \"bool\": model_config.TYPE_BOOL,\n }\n\n if is_string_dtype(dtype):\n return model_config.TYPE_STRING\n elif dtype_name in dtypes:\n return dtypes[dtype_name]\n else:\n raise ValueError(f\"Can't convert {dtype} to a Triton dtype\")\n\n\ndef _convert_pytorch_dtype(dtype):\n \"\"\"converts a dtype to the appropriate triton proto type\"\"\"\n\n import torch\n\n dtypes = {\n torch.float64: model_config.TYPE_FP64,\n torch.float32: model_config.TYPE_FP32,\n torch.float16: model_config.TYPE_FP16,\n torch.int64: model_config.TYPE_INT64,\n torch.int32: model_config.TYPE_INT32,\n torch.int16: model_config.TYPE_INT16,\n torch.int8: model_config.TYPE_INT8,\n torch.uint8: model_config.TYPE_UINT8,\n torch.bool: model_config.TYPE_BOOL,\n }\n\n if is_string_dtype(dtype):\n return model_config.TYPE_STRING\n elif dtype in dtypes:\n return dtypes[dtype]\n else:\n raise ValueError(f\"Can't convert dtype {dtype})\")\n\n\ndef _convert_string2pytorch_dtype(dtype):\n \"\"\"converts a dtype to the appropriate torch type\"\"\"\n\n import torch\n\n if not isinstance(dtype, str):\n dtype_name = dtype.name\n else:\n dtype_name = dtype\n\n dtypes = {\n \"TYPE_FP64\": torch.float64,\n \"TYPE_FP32\": torch.float32,\n \"TYPE_FP16\": torch.float16,\n \"TYPE_INT64\": torch.int64,\n \"TYPE_INT32\": torch.int32,\n \"TYPE_INT16\": torch.int16,\n \"TYPE_INT8\": torch.int8,\n \"TYPE_UINT8\": torch.uint8,\n \"TYPE_BOOL\": torch.bool,\n }\n\n if is_string_dtype(dtype):\n return model_config.TYPE_STRING\n elif dtype_name in dtypes:\n return dtypes[dtype_name]\n else:\n raise ValueError(f\"Can't convert dtype {dtype})\")\n\n\ndef _triton_datatype_to_dtype(data_type):\n \"\"\"the reverse of _convert_dtype: converts a triton proto data_type to a numpy dtype\"\"\"\n name = model_config._DATATYPE.values[data_type].name[5:].lower()\n if name == \"string\":\n return np.dtype(\"str\")\n return np.dtype(name.replace(\"fp\", \"float\"))\n"
] | [
[
"numpy.dtype",
"tensorflow.keras.models.load_model"
]
] |
sgarg18/arshadowgan | [
"8183f8c06f93c249e48193cdfa41a5e78bcc3d5e"
] | [
"shadow_class/networks.py"
] | [
"# -*- coding: utf-8 -*-\n\nimport torch\nimport torch.nn as nn\nimport segmentation_models_pytorch as smp\n\n\nclass Generator_with_Refin(nn.Module):\n def __init__(self, encoder):\n \"\"\"Generator initialization\n\n Args:\n encoder: an encoder for Unet generator\n \"\"\"\n super(Generator_with_Refin, self).__init__()\n\n # declare Unet generator\n self.generator = smp.Unet(\n encoder_name=encoder,\n classes=1,\n activation='identity',\n encoder_depth=4,\n decoder_channels=[128, 64, 32, 16],\n )\n # replace the first conv block in generator (6 channels tensor as input)\n self.generator.encoder.conv1 = nn.Conv2d(4, 64, kernel_size=(6, 6), stride=(2, 2), padding=(2, 2), bias=False)\n self.generator.segmentation_head = nn.Identity()\n\n # RGB-shadow mask as output before refinement module\n self.SG_head = nn.Conv2d(in_channels=16, out_channels=3, kernel_size=3, stride=1, padding=1)\n\n # refinement module\n self.refinement = torch.nn.Sequential()\n for i in range(4):\n self.refinement.add_module(f'refinement{3*i+1}', nn.BatchNorm2d(16))\n self.refinement.add_module(f'refinement{3*i+2}', nn.ReLU())\n self.refinement.add_module(f'refinement{3*i+3}', nn.Conv2d(in_channels=16, out_channels=16, kernel_size=3, stride=1, padding=1))\n\n # RGB-shadow mask as output after refinement module\n self.output1 = nn.Conv2d(in_channels=16, out_channels=3, kernel_size=3, stride=1, padding=1)\n\n def forward(self, x):\n \"\"\"Forward for generator\n\n Args:\n x: torch.FloatTensor or torch.cuda.FloatTensor - input tensor with images and masks\n \"\"\"\n x = self.generator(x)\n out1 = self.SG_head(x)\n\n x = self.refinement(x)\n x = self.output1(x)\n return out1, x\n\n\nclass Discriminator(nn.Module):\n def __init__(self, input_shape):\n \"\"\"Discriminator initialization\n\n Args:\n input_shape (tuple): shape of input image\n \"\"\"\n super(Discriminator, self).__init__()\n\n self.input_shape = input_shape\n in_channels, in_height, in_width = self.input_shape\n patch_h, patch_w = int(in_height / 2 ** 4), int(in_width / 2 ** 4)\n self.output_shape = (1, patch_h, patch_w)\n\n def discriminator_block(in_filters, out_filters, first_block=False):\n layers = []\n layers.append(nn.Conv2d(in_filters, out_filters, kernel_size=3, stride=1, padding=1))\n if not first_block:\n layers.append(nn.BatchNorm2d(out_filters))\n layers.append(nn.LeakyReLU(0.2, inplace=True))\n layers.append(nn.Conv2d(out_filters, out_filters, kernel_size=4, stride=2, padding=1)) #k=3,p=1\n layers.append(nn.BatchNorm2d(out_filters))\n layers.append(nn.LeakyReLU(0.2, inplace=True))\n return layers\n\n layers = []\n in_filters = in_channels\n for i, out_filters in enumerate([64, 128, 256, 512]):\n layers.extend(discriminator_block(in_filters, out_filters, first_block=(i == 0)))\n in_filters = out_filters\n\n layers.append(nn.Conv2d(out_filters, 1, kernel_size=3, stride=1, padding=1))\n\n self.model = nn.Sequential(*layers)\n\n def forward(self, img):\n \"\"\"Discriminator forward\n \"\"\"\n return self.model(img)\n\n\n"
] | [
[
"torch.nn.BatchNorm2d",
"torch.nn.Conv2d",
"torch.nn.Sequential",
"torch.nn.Identity",
"torch.nn.ReLU",
"torch.nn.LeakyReLU"
]
] |
DDQXZcp/FYP_ProjectFile_TANG_Zhiheng | [
"b0e3b9d1c5cee61e1d09a32e405244bda09b6f0d"
] | [
"CNN/training_result_test.py"
] | [
"from tensorflow import keras\r\nfrom skimage import io\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport cv2\r\nimport os\r\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = '-1'\r\n'''\r\nmymodel4_30: 30:0.5\r\nmymodel4_50epoch:50epoch 0.98:0.2\r\n'''\r\n\r\n\r\npath='D:\\pycharm project'\r\n#ori_files= os.listdir('D:\\FYP\\original') #D:\\FYP\\original D:\\FYP\\\\test_o\r\nori_files= os.listdir('D:\\珩珩工作室\\polyu study\\EIE4433\\戴宪洁大佬\\CNN训练资源\\\\test_o') #D:\\FYP\\original D:\\FYP\\\\test_o\r\n#gt_path = os.listdir('D:\\FYP\\\\truth')\r\ngt_path = os.listdir('D:\\珩珩工作室\\polyu study\\EIE4433\\戴宪洁大佬\\CNN训练资源\\\\test_t')\r\na = np.zeros((1, 600, 600, 1))\r\nb = np.zeros((1, 600, 600))\r\n#c =np.array(io.imread('D:\\FYP\\original\\\\'+ori_files[28]))\r\nc =np.array(io.imread('D:\\珩珩工作室\\polyu study\\EIE4433\\戴宪洁大佬\\CNN训练资源\\\\test_o\\\\'+ori_files[28]))\r\nprint(ori_files[28])\r\nc = cv2.cvtColor(c, cv2.COLOR_RGB2GRAY)\r\n\r\ne=np.zeros((600,600,1))\r\ne[:,:,0]=c\r\nplt.imshow(c)\r\nplt.show()\r\n#d = np.array(io.imread('D:\\FYP\\\\truth\\\\'+gt_path[28], as_gray=True))\r\nd = np.array(io.imread('D:\\珩珩工作室\\polyu study\\EIE4433\\戴宪洁大佬\\CNN训练资源\\\\test_t\\\\'+gt_path[28], as_gray=True))\r\nplt.imshow(d)\r\nplt.show()\r\na[0]=e/255\r\nb[0]=d\r\nx_test=a\r\ny_test=b\r\n\r\nmodel = keras.models.load_model(\"mymodel4_0.985\")\r\n\r\n#test_scores = model.evaluate(x_test, y_test)\r\npredict_scores = model.predict(x_test)\r\nprint()\r\nprint(\"Output shape:\",predict_scores.shape)\r\nprint(\"Output max:\", predict_scores.max())\r\nprint(\"Output min:\", predict_scores.min())\r\nm=np.zeros((600,600))\r\nm=predict_scores[0]*255\r\n\r\nplt.imshow(m)\r\nplt.show()\r\n\r\nn=np.zeros((600,600))\r\n'''\r\nre1=map(m.index,heapq.nlargest(4,m))\r\nprint(list(re1))\r\n'''\r\nfor i in range(600):\r\n for j in range(600):\r\n if m[i][j][0]<2:\r\n n[i][j]=255\r\n else:\r\n n[i][j] = 0\r\nn1=plt.imshow(n)\r\nn1.set_cmap('gray')\r\nplt.show()\r\nprint(\"After threshold max:\",m.max())\r\n\r\nhist1=np.histogram(m[:,:,0],bins=256)\r\nprint(hist1)\r\n\r\n#plt.hist(m[:,:,0],bins=256)\r\n#plt.show()\r\n"
] | [
[
"tensorflow.keras.models.load_model",
"numpy.zeros",
"numpy.histogram",
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.show"
]
] |
rtagirov/python_scr_pc_imperial | [
"423204964ddbc9c117bd2b3bb4397ee98b89a56d"
] | [
"nat/nesatl_clv.py"
] | [
"import numpy as np\nimport pylab as pl\nimport matplotlib.pyplot as plt\n\nfrom matplotlib.ticker import AutoMinorLocator\nfrom matplotlib.ticker import MultipleLocator\n\nfrom scipy import interpolate\n\nimport importlib\nimport sys\nimport os\n\nimport paths; importlib.reload(paths)\nimport pltaux; importlib.reload(pltaux)\nimport sysaux; importlib.reload(sysaux)\nimport phys; importlib.reload(phys)\n\ndef clv_band(wvl, clv, band):\n\n idx_b = np.where((wvl >= band[0]) & (wvl <= band[1]))\n\n frq_b = phys.c / (wvl[idx_b] * 1.0e-7)\n\n its = np.zeros(len(clv[0, :]))\n\n for i in range(len(clv[0, :])):\n\n its[i] = np.trapz(clv[idx_b[0], i], frq_b)\n\n return its / its[0]\n\ndef ip(mu):\n\n p = np.sqrt(1.0 - mu**2.0)\n\n return ['%.3f' % z for z in p]\n \nmu = np.array([1.00, 0.90, 0.80, 0.70, 0.60, 0.50, 0.40, 0.30, 0.20, 0.10, 0.05])\n\nclv_n = np.loadtxt(paths.it0f + 'murmean_r_atl_abd/CLV')\nclv_a = np.loadtxt(paths.out + 'atl_clv_86.dat')\n\nwvl_n = clv_n[:, 0] / 10.0\nwvl_a = clv_a[:, 0]\n\nbands = [[117.00000, 242.00000], \\\n [242.00000, 360.00000], \\\n [410.00000, 750.00000], \\\n [1500.0000, 3000.0000], \\\n [3000.0000, 3500.0000], \\\n [3500.0000, 4000.0000], \\\n [10020.000, 160001.02]]\n\n#col = ['k', 'r', 'b', 'g', 'o', 'c', 'm']\ncol = ['C0', 'C1', 'C2', 'C3', 'C4', 'C5', 'C6']\n\n#sysaux.clean_dir(paths.figdir)\n\npltaux.figpar(fontsize = 20)\n\nfig, ax1 = plt.subplots(nrows = 1, ncols = 1, figsize = (12, 10))\n\nfig.tight_layout()\n\nidx_b = 0\n\nfor band in bands:\n\n its_n = clv_band(wvl_n, clv_n[:, 1 : 12], band)\n its_a = clv_band(wvl_a, clv_a[:, 1 : 12], band)\n\n if idx_b != 6: l = str(int(band[0])) + r' nm $\\rightarrow$ ' + str(int(band[1])) + ' nm'\n if idx_b == 6: l = str(int(band[0] / 1e3)) + r' $\\mu$m $\\rightarrow$ ' + str(int(band[1] / 1e3)) + ' $\\mu$m'\n\n ax1.plot(mu, its_n, color = col[idx_b], label = l)\n ax1.plot(mu, its_a, '--', color = col[idx_b])\n\n idx_b = idx_b + 1\n\nax1.xaxis.set_major_locator(MultipleLocator(0.1))\nax1.yaxis.set_major_locator(MultipleLocator(0.1))\n\nax1.set_xlabel(r'$\\mu = \\cos\\theta$')\nax1.set_ylabel('$I(\\mu) / I(\\mu = 1)$')\n\nax1.grid(True)\n\nax1.set_xlim(1.0, 0.05)\nax1.set_ylim(0.0, 1.00)\n\nax2 = ax1.twiny()\n\nax2.set_xlim(ax1.get_xlim())\n\nax2.set_xticks(mu[0 : len(mu) - 1])\n\nax2.set_xticklabels(ip(mu[0 : len(mu) - 1]))\n\nax2.set_xlabel(r'$p = R / R_\\odot$', labelpad = 10.5)\n\nleg = ax1.legend(frameon = True, framealpha = 1, loc = 'best', prop={'size': 23})\n\nfor handle in leg.legendHandles: handle.set_linewidth(5.0)\n\npltaux.savepdf('nesatl_clv')\n"
] | [
[
"numpy.sqrt",
"matplotlib.ticker.MultipleLocator",
"matplotlib.pyplot.subplots",
"numpy.array",
"numpy.trapz",
"numpy.where",
"numpy.loadtxt"
]
] |
prokia/deepHops | [
"bd42d72c7c6e3b7813342a1dac0befc46bc66838"
] | [
"deephop/onmt/train_single.py"
] | [
"#!/usr/bin/env python\n\"\"\"\n Training on a single process\n\"\"\"\nfrom __future__ import division\n\nimport argparse\nimport os\nimport random\nimport torch\n\nimport onmt.opts as opts\nfrom graph_embedding import init_condition_transformer\n\nfrom onmt.inputters.inputter import build_dataset_iter, lazily_load_dataset, \\\n _load_fields, _collect_report_features\nfrom onmt.model_builder import build_model\nfrom onmt.utils.optimizers import build_optim\nfrom onmt.trainer import build_trainer\nfrom onmt.models import build_model_saver\nfrom onmt.utils.logging import init_logger, logger\n\n\ndef _check_save_model_path(opt):\n save_model_path = os.path.abspath(opt.save_model)\n model_dirname = os.path.dirname(save_model_path)\n if not os.path.exists(model_dirname):\n os.makedirs(model_dirname)\n\n\ndef _tally_parameters(model):\n n_params = sum([p.nelement() for p in model.parameters()])\n enc = 0\n dec = 0\n for name, param in model.named_parameters():\n if 'encoder' in name:\n enc += param.nelement()\n elif 'decoder' or 'generator' in name:\n dec += param.nelement()\n return n_params, enc, dec\n\n\ndef training_opt_postprocessing(opt, device_id):\n if opt.word_vec_size != -1:\n opt.src_word_vec_size = opt.word_vec_size\n opt.tgt_word_vec_size = opt.word_vec_size\n\n if opt.layers != -1:\n opt.enc_layers = opt.layers\n opt.dec_layers = opt.layers\n\n if opt.rnn_size != -1:\n opt.enc_rnn_size = opt.rnn_size\n\n if opt.arch in ['transformer', 'after_encoding']:\n opt.dec_rnn_size = opt.rnn_size + opt.condition_dim\n else:\n opt.dec_rnn_size = opt.rnn_size\n if opt.model_type == 'text' and opt.enc_rnn_size != opt.dec_rnn_size:\n raise AssertionError(\"\"\"We do not support different encoder and\n decoder rnn sizes for translation now.\"\"\")\n\n opt.brnn = (opt.encoder_type == \"brnn\")\n\n if opt.rnn_type == \"SRU\" and not opt.gpu_ranks:\n raise AssertionError(\"Using SRU requires -gpu_ranks set.\")\n\n if torch.cuda.is_available() and not opt.gpu_ranks:\n logger.info(\"WARNING: You have a CUDA device, \\\n should run with -gpu_ranks\")\n\n if opt.seed > 0:\n torch.manual_seed(opt.seed)\n # this one is needed for torchtext random call (shuffled iterator)\n # in multi gpu it ensures datasets are read in the same order\n random.seed(opt.seed)\n # some cudnn methods can be random even after fixing the seed\n # unless you tell it to be deterministic\n torch.backends.cudnn.deterministic = True\n\n if device_id >= 0:\n torch.cuda.set_device(device_id)\n if opt.seed > 0:\n # These ensure same initialization in multi gpu mode\n torch.cuda.manual_seed(opt.seed)\n\n return opt\n\n\ndef main(opt, device_id):\n opt = training_opt_postprocessing(opt, device_id)\n init_logger(opt.log_file)\n # 初始化口袋特征编码器\n init_condition_transformer(opt.use_graph_embedding, opt.condition_dim)\n # Load checkpoint if we resume from a previous training.\n if opt.train_from:\n logger.info('Loading checkpoint from %s' % opt.train_from)\n checkpoint = torch.load(opt.train_from,\n map_location=lambda storage, loc: storage)\n model_opt = checkpoint['opt']\n else:\n checkpoint = None\n model_opt = opt\n\n # Peek the first dataset to determine the data_type.\n # (All datasets have the same data_type).\n first_dataset = next(lazily_load_dataset(\"train\", opt))\n data_type = first_dataset.data_type\n\n # Load fields generated from preprocess phase.\n fields = _load_fields(first_dataset, data_type, opt, checkpoint)\n # Report src/tgt features.\n\n src_features, tgt_features = _collect_report_features(fields)\n for j, feat in enumerate(src_features):\n logger.info(' * src feature %d size = %d'\n % (j, len(fields[feat].vocab)))\n for j, feat in enumerate(tgt_features):\n logger.info(' * tgt feature %d size = %d'\n % (j, len(fields[feat].vocab)))\n\n # Build model.\n model = build_model(model_opt, opt, fields, checkpoint)\n n_params, enc, dec = _tally_parameters(model)\n logger.info('encoder: %d' % enc)\n logger.info('decoder: %d' % dec)\n logger.info('* number of parameters: %d' % n_params)\n _check_save_model_path(opt)\n\n # Build optimizer.\n optim = build_optim(model, opt, checkpoint)\n\n # Build model saver\n model_saver = build_model_saver(model_opt, opt, model, fields, optim)\n\n trainer = build_trainer(opt, device_id, model, fields,\n optim, data_type, model_saver=model_saver)\n\n def train_iter_fct():\n return build_dataset_iter(\n lazily_load_dataset(\"train\", opt), fields, opt)\n\n def valid_iter_fct():\n return build_dataset_iter(\n lazily_load_dataset(\"valid\", opt), fields, opt, is_train=False)\n\n # Do training.\n trainer.train(train_iter_fct, valid_iter_fct, opt.train_steps,\n opt.valid_steps)\n\n if opt.tensorboard:\n trainer.report_manager.tensorboard_writer.close()\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(\n description='train.py',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n\n opts.add_md_help_argument(parser)\n opts.model_opts(parser)\n opts.train_opts(parser)\n\n opt = parser.parse_args()\n main(opt)\n"
] | [
[
"torch.load",
"torch.cuda.manual_seed",
"torch.manual_seed",
"torch.cuda.is_available",
"torch.cuda.set_device"
]
] |
potterhsu/easy-fpn.pytorch | [
"cac901f2570bd8dba7bb456128c7c7985c255ea4"
] | [
"evaluator.py"
] | [
"from typing import Tuple\n\nimport torch\nfrom torch.utils.data import DataLoader\nfrom tqdm import tqdm\n\nfrom dataset.base import Base as DatasetBase\nfrom model import Model\n\n\nclass Evaluator(object):\n def __init__(self, dataset: DatasetBase, path_to_data_dir: str, path_to_results_dir: str):\n super().__init__()\n self._dataset = dataset\n self._dataloader = DataLoader(dataset, batch_size=1, shuffle=False, num_workers=8, pin_memory=True)\n self._path_to_data_dir = path_to_data_dir\n self._path_to_results_dir = path_to_results_dir\n\n def evaluate(self, model: Model) -> Tuple[float, str]:\n all_image_ids, all_detection_bboxes, all_detection_classes, all_detection_probs = [], [], [], []\n\n with torch.no_grad():\n for batch_index, (image_id_batch, image_batch, scale_batch, _, _) in enumerate(tqdm(self._dataloader)):\n image_id = image_id_batch[0]\n image = image_batch[0].cuda()\n scale = scale_batch[0].item()\n\n forward_input = Model.ForwardInput.Eval(image)\n forward_output: Model.ForwardOutput.Eval = model.eval().forward(forward_input)\n\n detection_bboxes, detection_classes, detection_probs = forward_output\n detection_bboxes /= scale\n\n selected_indices = (detection_probs > 0.05).nonzero().view(-1)\n detection_bboxes = detection_bboxes[selected_indices]\n detection_classes = detection_classes[selected_indices]\n detection_probs = detection_probs[selected_indices]\n\n all_detection_bboxes.extend(detection_bboxes.tolist())\n all_detection_classes.extend(detection_classes.tolist())\n all_detection_probs.extend(detection_probs.tolist())\n all_image_ids.extend([image_id] * len(detection_bboxes))\n\n mean_ap, detail = self._dataset.evaluate(self._path_to_results_dir, all_image_ids, all_detection_bboxes, all_detection_classes, all_detection_probs)\n return mean_ap, detail\n"
] | [
[
"torch.utils.data.DataLoader",
"torch.no_grad"
]
] |
julesy89/pysurrogate | [
"6fe58f157920ef7819bcd4756342b2ca778f43b8"
] | [
"pysurrogate/archive/eim/meta_eim.py"
] | [
"import numpy as np\n\nfrom pysao.eim.evolutionary_interpolation import EvolutionaryInterpolationModel\nfrom pysao.metamodels.metamodel import Metamodel\n\n\nclass EIModel(Metamodel):\n def __init__(self):\n Metamodel.__init__(self)\n self.model = None\n\n def _predict(self, X):\n return self.model.predict(X), np.zeros(X.shape[0])\n\n def _fit(self, X, F, data):\n self.model = EvolutionaryInterpolationModel(xl=0 * np.ones(X.shape[1]), xu=1 * np.ones(X.shape[1]))\n self.model.fit(X, F)\n self.model.optimize()\n return self\n\n @staticmethod\n def get_params():\n val = [{}]\n return val\n"
] | [
[
"numpy.ones",
"numpy.zeros"
]
] |
WikiMegrez/wikisearch | [
"89dcd07962bacf0dc3cce55bf529b8af44e8150e"
] | [
"wiki_search/core/bert_ranking.py"
] | [
"import torch\nfrom transformers import BertTokenizer, BertModel\nfrom wiki_search.dataset import Document\n\n\ntorch.set_grad_enabled(False)\nMODEL_NAME = 'bert-base-cased'\n\n\nclass BertRanking(object):\n def __init__(self, device: str = 'cuda:0'):\n self.device = torch.device(device)\n self.model = BertModel.from_pretrained(MODEL_NAME).to(self.device)\n self.tokenizer = BertTokenizer.from_pretrained(MODEL_NAME)\n\n def _embed(self, text: str) -> torch.FloatTensor:\n tokens_pt = self.tokenizer(text, return_tensors='pt', max_length=512)\n tokens_pt = {k: v.to(self.device) for k, v in tokens_pt.items()}\n outputs = self.model(**tokens_pt)\n return outputs.pooler_output\n\n def score(self, query: str, doc: Document):\n query_z = self._embed(query)\n doc_z = self._embed(doc.raw_main_desc)\n score = (query_z * doc_z).sum()\n\n return score\n"
] | [
[
"torch.set_grad_enabled",
"torch.device"
]
] |
kkmumu/DeepRobust | [
"0cc1950177ed6604e55274e1a7cd578d54fba5c4"
] | [
"deeprobust/image/netmodels/feed_dataset.py"
] | [
"\"\"\"\r\nThis function help to feed in train and test datasets.\r\nSelect model archtecture and seed then output corresponding model.\r\n\r\n\"\"\"\r\nfrom __future__ import print_function\r\nimport os\r\nimport argparse\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F #233\r\nimport torch.optim as optim\r\nfrom torchvision import datasets, transforms\r\nimport numpy as np\r\nfrom PIL import Image\r\n \r\n\r\ndef feed_dataset(data, data_dict, seedin = 100, random_train = False):\r\n \r\n torch.manual_seed(seedin)\r\n \r\n if random_train == True:\r\n if(data == 'MNIST'):\r\n train_set = datasets.MNIST('./', train=True, download = True,transform=transforms.Compose([transforms.ToTensor()]))\r\n test_set = datasets.MNIST('../data', train=False, download = True,transform=transforms.Compose([transforms.ToTensor()]))\r\n full_set = torch.utils.data.ConcatDataset([train_set,test_set])\r\n \r\n trans = transforms.Compose(transforms = [\r\n transforms.ToTensor(),transforms.Normalize((0.1307,), (0.3081,))])\r\n \r\n train_len = 60000\r\n test_len = 10000\r\n trainset_new, testset_new = torch.utils.data.random_split(full_set,[train_len, test_len])\r\n trainset_new.transform = trans\r\n testset_new.transform = trans\r\n train_loader = torch.utils.data.DataLoader(trainset_new, batch_size = 64, shuffle = True)\r\n test_loader = torch.utils.data.DataLoader(testset_new, batch_size = 1000, shuffle = True)\r\n \r\n else:\r\n pass\r\n \r\n return train_loader, test_loader\r\n \r\n else:\r\n if(data == 'CIFAR10'):\r\n transform_train = transforms.Compose([\r\n transforms.RandomCrop(32, padding=5),\r\n transforms.RandomHorizontalFlip(),\r\n transforms.ToTensor(),\r\n #transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),\r\n ])\r\n \r\n transform_val = transforms.Compose([\r\n transforms.ToTensor(),\r\n #transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),\r\n ])\r\n \r\n train_loader = torch.utils.data.DataLoader(\r\n datasets.CIFAR10(data_dict, train=True, download = True,\r\n transform=transform_train),\r\n batch_size= 1000, shuffle=True) #, **kwargs)\r\n \r\n test_loader = torch.utils.data.DataLoader(\r\n datasets.CIFAR10(data_dict, train=False, download = True,\r\n transform=transform_val),\r\n batch_size= 1000, shuffle=True) #, **kwargs)\r\n \r\n elif(data == 'MNIST'):\r\n train_loader = torch.utils.data.DataLoader(\r\n datasets.MNIST(data_dict, train=True, download = True,\r\n transform=transforms.Compose([transforms.ToTensor(),\r\n transforms.Normalize((0.1307,), (0.3081,))])),\r\n batch_size=64,\r\n shuffle=True)\r\n \r\n test_loader = torch.utils.data.DataLoader(\r\n datasets.MNIST('../data', train=False, download = True,\r\n transform=transforms.Compose([transforms.ToTensor(),\r\n transforms.Normalize((0.1307,), (0.3081,))])),\r\n batch_size=1000,\r\n shuffle=True)\r\n \r\n elif(data == 'ImageNet'):\r\n pass\r\n \r\n return train_loader, test_loader\r\n"
] | [
[
"torch.utils.data.DataLoader",
"torch.manual_seed",
"torch.utils.data.random_split",
"torch.utils.data.ConcatDataset"
]
] |
GSEL9/data-science-tools | [
"950ea7b3c675dc5b1648e6289e796da7b8cafb95"
] | [
"dstools/model_comparison.py"
] | [
"# -*- coding: utf-8 -*-\n#\n# model_comparison.py\n#\n\n\"\"\"\n\"\"\"\n\n__author__ = 'Severin Langberg'\n__email__ = '[email protected]'\n\n\nimport os\nimport utils\nimport ioutil\nimport shutil\nimport logging\nimport model_selection\nimport feature_selection\n\nimport numpy as np\nimport pandas as pd\n\nfrom datetime import datetime\nfrom multiprocessing import cpu_count\nfrom sklearn.externals import joblib\nfrom sklearn.metrics import roc_auc_score\nfrom sklearn.model_selection import ParameterGrid\n\n\nTMP_RESULTS_DIR = 'tmp_model_comparison'\n\n\ndef _cleanup(results, path_to_results):\n\n ioutil.write_final_results(path_to_results, results)\n\n # Remove temporary directory if process completed succesfully.\n ioutil.teardown_tempdir(TMP_RESULTS_DIR)\n\n return results\n\n\ndef model_comparison(*args, verbose=1, score_func=None, n_jobs=None, **kwargs):\n \"\"\"Collecting repeated average performance measures of selected models.\n\n \"\"\"\n (\n comparison_scheme, X, y, estimators, estimator_params, selectors,\n fs_params, random_states, n_splits, path_to_results\n ) = args\n\n global TMP_RESULTS_DIR\n\n # Setup temporary directory.\n path_tempdir = ioutil.setup_tempdir(TMP_RESULTS_DIR, root='.')\n\n # Set number of CPUs.\n if n_jobs is None:\n n_jobs = cpu_count() - 1 if cpu_count() > 1 else cpu_count()\n\n results = []\n for estimator_name, estimator in estimators.items():\n\n print('Running estimator: {}\\n{}'.format(estimator.__name__, '-' * 30))\n\n # Setup hyperparameter grid.\n hparam_grid = ParameterGrid(estimator_params[estimator_name])\n\n for fs_name, fs_func in selectors.items():\n\n print('Running selector: {}\\n{}'.format(fs_name, '-' * 30))\n\n selector = {\n 'name': fs_name, 'func': fs_func, 'params': fs_params[fs_name]\n }\n # Repeating experiments.\n results.extend(\n joblib.Parallel(\n n_jobs=n_jobs, verbose=verbose\n )(\n joblib.delayed(comparison_scheme)(\n X, y, estimator, hparam_grid, selector, n_splits,\n random_state, path_tempdir, verbose=verbose,\n score_func=score_func, n_jobs=n_jobs\n ) for random_state in random_states\n )\n )\n results = _cleanup(results, path_to_results)\n\n return results\n\n\nif __name__ == '__main__':\n\n pass\n"
] | [
[
"sklearn.model_selection.ParameterGrid",
"sklearn.externals.joblib.Parallel",
"sklearn.externals.joblib.delayed"
]
] |
JarnoRFB/xarray | [
"1ab7569561db50eaccbae977b0ef69993e0c0d0c"
] | [
"xarray/core/duck_array_ops.py"
] | [
"\"\"\"Compatibility module defining operations on duck numpy-arrays.\n\nCurrently, this means Dask or NumPy arrays. None of these functions should\naccept or return xarray objects.\n\"\"\"\nimport contextlib\nimport inspect\nimport warnings\nfrom functools import partial\n\nimport numpy as np\nimport pandas as pd\n\nfrom . import dask_array_ops, dtypes, npcompat, nputils\nfrom .nputils import nanfirst, nanlast\nfrom .pycompat import dask_array_type, sparse_array_type\n\ntry:\n import dask.array as dask_array\n from . import dask_array_compat\nexcept ImportError:\n dask_array = None # type: ignore\n dask_array_compat = None # type: ignore\n\n\ndef _dask_or_eager_func(name, eager_module=np, dask_module=dask_array,\n list_of_args=False, array_args=slice(1),\n requires_dask=None):\n \"\"\"Create a function that dispatches to dask for dask array inputs.\"\"\"\n if dask_module is not None:\n def f(*args, **kwargs):\n if list_of_args:\n dispatch_args = args[0]\n else:\n dispatch_args = args[array_args]\n if any(isinstance(a, dask_array.Array) for a in dispatch_args):\n try:\n wrapped = getattr(dask_module, name)\n except AttributeError as e:\n raise AttributeError(\"%s: requires dask >=%s\" %\n (e, requires_dask))\n else:\n wrapped = getattr(eager_module, name)\n return wrapped(*args, **kwargs)\n else:\n def f(*args, **kwargs):\n return getattr(eager_module, name)(*args, **kwargs)\n return f\n\n\ndef fail_on_dask_array_input(values, msg=None, func_name=None):\n if isinstance(values, dask_array_type):\n if msg is None:\n msg = '%r is not yet a valid method on dask arrays'\n if func_name is None:\n func_name = inspect.stack()[1][3]\n raise NotImplementedError(msg % func_name)\n\n\n# switch to use dask.array / __array_function__ version when dask supports it:\n# https://github.com/dask/dask/pull/4822\nmoveaxis = npcompat.moveaxis\n\naround = _dask_or_eager_func('around')\nisclose = _dask_or_eager_func('isclose')\n\n\nif hasattr(np, 'isnat') and (\n dask_array is None or hasattr(dask_array_type, '__array_ufunc__')):\n # np.isnat is available since NumPy 1.13, so __array_ufunc__ is always\n # supported.\n isnat = np.isnat\nelse:\n isnat = _dask_or_eager_func('isnull', eager_module=pd)\nisnan = _dask_or_eager_func('isnan')\nzeros_like = _dask_or_eager_func('zeros_like')\n\n\npandas_isnull = _dask_or_eager_func('isnull', eager_module=pd)\n\n\ndef isnull(data):\n data = asarray(data)\n scalar_type = data.dtype.type\n if issubclass(scalar_type, (np.datetime64, np.timedelta64)):\n # datetime types use NaT for null\n # note: must check timedelta64 before integers, because currently\n # timedelta64 inherits from np.integer\n return isnat(data)\n elif issubclass(scalar_type, np.inexact):\n # float types use NaN for null\n return isnan(data)\n elif issubclass(\n scalar_type, (np.bool_, np.integer, np.character, np.void)\n ):\n # these types cannot represent missing values\n return zeros_like(data, dtype=bool)\n else:\n # at this point, array should have dtype=object\n if isinstance(data, (np.ndarray, dask_array_type)):\n return pandas_isnull(data)\n else:\n # Not reachable yet, but intended for use with other duck array\n # types. For full consistency with pandas, we should accept None as\n # a null value as well as NaN, but it isn't clear how to do this\n # with duck typing.\n return data != data\n\n\ndef notnull(data):\n return ~isnull(data)\n\n\ntranspose = _dask_or_eager_func('transpose')\n_where = _dask_or_eager_func('where', array_args=slice(3))\nisin = _dask_or_eager_func('isin', eager_module=npcompat,\n dask_module=dask_array_compat, array_args=slice(2))\ntake = _dask_or_eager_func('take')\nbroadcast_to = _dask_or_eager_func('broadcast_to')\n\n_concatenate = _dask_or_eager_func('concatenate', list_of_args=True)\n_stack = _dask_or_eager_func('stack', list_of_args=True)\n\narray_all = _dask_or_eager_func('all')\narray_any = _dask_or_eager_func('any')\n\ntensordot = _dask_or_eager_func('tensordot', array_args=slice(2))\neinsum = _dask_or_eager_func('einsum', array_args=slice(1, None),\n requires_dask='0.17.3')\n\n\ndef gradient(x, coord, axis, edge_order):\n if isinstance(x, dask_array_type):\n return dask_array_compat.gradient(\n x, coord, axis=axis, edge_order=edge_order)\n return npcompat.gradient(x, coord, axis=axis, edge_order=edge_order)\n\n\ndef trapz(y, x, axis):\n if axis < 0:\n axis = y.ndim + axis\n x_sl1 = (slice(1, None), ) + (None, ) * (y.ndim - axis - 1)\n x_sl2 = (slice(None, -1), ) + (None, ) * (y.ndim - axis - 1)\n slice1 = (slice(None),) * axis + (slice(1, None), )\n slice2 = (slice(None),) * axis + (slice(None, -1), )\n dx = (x[x_sl1] - x[x_sl2])\n integrand = dx * 0.5 * (y[tuple(slice1)] + y[tuple(slice2)])\n return sum(integrand, axis=axis, skipna=False)\n\n\nmasked_invalid = _dask_or_eager_func(\n 'masked_invalid', eager_module=np.ma,\n dask_module=getattr(dask_array, 'ma', None))\n\n\ndef asarray(data):\n return (\n data if (isinstance(data, dask_array_type)\n or hasattr(data, '__array_function__'))\n else np.asarray(data)\n )\n\n\ndef as_shared_dtype(scalars_or_arrays):\n \"\"\"Cast a arrays to a shared dtype using xarray's type promotion rules.\"\"\"\n arrays = [asarray(x) for x in scalars_or_arrays]\n # Pass arrays directly instead of dtypes to result_type so scalars\n # get handled properly.\n # Note that result_type() safely gets the dtype from dask arrays without\n # evaluating them.\n out_type = dtypes.result_type(*arrays)\n return [x.astype(out_type, copy=False) for x in arrays]\n\n\ndef as_like_arrays(*data):\n if all(isinstance(d, dask_array_type) for d in data):\n return data\n elif any(isinstance(d, sparse_array_type) for d in data):\n from sparse import COO\n return tuple(COO(d) for d in data)\n else:\n return tuple(np.asarray(d) for d in data)\n\n\ndef allclose_or_equiv(arr1, arr2, rtol=1e-5, atol=1e-8):\n \"\"\"Like np.allclose, but also allows values to be NaN in both arrays\n \"\"\"\n arr1, arr2 = as_like_arrays(arr1, arr2)\n if arr1.shape != arr2.shape:\n return False\n return bool(\n isclose(arr1, arr2, rtol=rtol, atol=atol, equal_nan=True).all())\n\n\ndef array_equiv(arr1, arr2):\n \"\"\"Like np.array_equal, but also allows values to be NaN in both arrays\n \"\"\"\n arr1, arr2 = as_like_arrays(arr1, arr2)\n if arr1.shape != arr2.shape:\n return False\n\n with warnings.catch_warnings():\n warnings.filterwarnings('ignore', \"In the future, 'NAT == x'\")\n\n flag_array = (arr1 == arr2)\n flag_array |= (isnull(arr1) & isnull(arr2))\n\n return bool(flag_array.all())\n\n\ndef array_notnull_equiv(arr1, arr2):\n \"\"\"Like np.array_equal, but also allows values to be NaN in either or both\n arrays\n \"\"\"\n arr1, arr2 = as_like_arrays(arr1, arr2)\n if arr1.shape != arr2.shape:\n return False\n\n with warnings.catch_warnings():\n warnings.filterwarnings('ignore', \"In the future, 'NAT == x'\")\n\n flag_array = (arr1 == arr2)\n flag_array |= isnull(arr1)\n flag_array |= isnull(arr2)\n\n return bool(flag_array.all())\n\n\ndef count(data, axis=None):\n \"\"\"Count the number of non-NA in this array along the given axis or axes\n \"\"\"\n return np.sum(np.logical_not(isnull(data)), axis=axis)\n\n\ndef where(condition, x, y):\n \"\"\"Three argument where() with better dtype promotion rules.\"\"\"\n return _where(condition, *as_shared_dtype([x, y]))\n\n\ndef where_method(data, cond, other=dtypes.NA):\n if other is dtypes.NA:\n other = dtypes.get_fill_value(data.dtype)\n return where(cond, data, other)\n\n\ndef fillna(data, other):\n return where(isnull(data), other, data)\n\n\ndef concatenate(arrays, axis=0):\n \"\"\"concatenate() with better dtype promotion rules.\"\"\"\n return _concatenate(as_shared_dtype(arrays), axis=axis)\n\n\ndef stack(arrays, axis=0):\n \"\"\"stack() with better dtype promotion rules.\"\"\"\n return _stack(as_shared_dtype(arrays), axis=axis)\n\n\[email protected]\ndef _ignore_warnings_if(condition):\n if condition:\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n yield\n else:\n yield\n\n\ndef _create_nan_agg_method(name, coerce_strings=False):\n from . import nanops\n\n def f(values, axis=None, skipna=None, **kwargs):\n if kwargs.pop('out', None) is not None:\n raise TypeError('`out` is not valid for {}'.format(name))\n\n values = asarray(values)\n\n if coerce_strings and values.dtype.kind in 'SU':\n values = values.astype(object)\n\n func = None\n if skipna or (skipna is None and values.dtype.kind in 'cfO'):\n nanname = 'nan' + name\n func = getattr(nanops, nanname)\n else:\n func = _dask_or_eager_func(name)\n\n try:\n return func(values, axis=axis, **kwargs)\n except AttributeError:\n if isinstance(values, dask_array_type):\n try: # dask/dask#3133 dask sometimes needs dtype argument\n # if func does not accept dtype, then raises TypeError\n return func(values, axis=axis, dtype=values.dtype,\n **kwargs)\n except (AttributeError, TypeError):\n msg = '%s is not yet implemented on dask arrays' % name\n else:\n msg = ('%s is not available with skipna=False with the '\n 'installed version of numpy; upgrade to numpy 1.12 '\n 'or newer to use skipna=True or skipna=None' % name)\n raise NotImplementedError(msg)\n\n f.__name__ = name\n return f\n\n\n# Attributes `numeric_only`, `available_min_count` is used for docs.\n# See ops.inject_reduce_methods\nargmax = _create_nan_agg_method('argmax', coerce_strings=True)\nargmin = _create_nan_agg_method('argmin', coerce_strings=True)\nmax = _create_nan_agg_method('max', coerce_strings=True)\nmin = _create_nan_agg_method('min', coerce_strings=True)\nsum = _create_nan_agg_method('sum')\nsum.numeric_only = True\nsum.available_min_count = True\nstd = _create_nan_agg_method('std')\nstd.numeric_only = True\nvar = _create_nan_agg_method('var')\nvar.numeric_only = True\nmedian = _create_nan_agg_method('median')\nmedian.numeric_only = True\nprod = _create_nan_agg_method('prod')\nprod.numeric_only = True\nsum.available_min_count = True\ncumprod_1d = _create_nan_agg_method('cumprod')\ncumprod_1d.numeric_only = True\ncumsum_1d = _create_nan_agg_method('cumsum')\ncumsum_1d.numeric_only = True\n\n\n_mean = _create_nan_agg_method('mean')\n\n\ndef datetime_to_numeric(array, offset=None, datetime_unit=None, dtype=float):\n \"\"\"Convert an array containing datetime-like data to an array of floats.\n\n Parameters\n ----------\n da : np.array\n Input data\n offset: Scalar with the same type of array or None\n If None, subtract minimum values to reduce round off error\n datetime_unit: None or any of {'Y', 'M', 'W', 'D', 'h', 'm', 's', 'ms',\n 'us', 'ns', 'ps', 'fs', 'as'}\n dtype: target dtype\n\n Returns\n -------\n array\n \"\"\"\n # TODO: make this function dask-compatible?\n if offset is None:\n offset = array.min()\n array = array - offset\n\n if not hasattr(array, 'dtype'): # scalar is converted to 0d-array\n array = np.array(array)\n\n if array.dtype.kind in 'O':\n # possibly convert object array containing datetime.timedelta\n array = np.asarray(pd.Series(array.ravel())).reshape(array.shape)\n\n if datetime_unit:\n array = array / np.timedelta64(1, datetime_unit)\n\n # convert np.NaT to np.nan\n if array.dtype.kind in 'mM':\n return np.where(isnull(array), np.nan, array.astype(dtype))\n return array.astype(dtype)\n\n\ndef _to_pytimedelta(array, unit='us'):\n index = pd.TimedeltaIndex(array.ravel(), unit=unit)\n return index.to_pytimedelta().reshape(array.shape)\n\n\ndef mean(array, axis=None, skipna=None, **kwargs):\n \"\"\"inhouse mean that can handle np.datetime64 or cftime.datetime\n dtypes\"\"\"\n from .common import _contains_cftime_datetimes\n\n array = asarray(array)\n if array.dtype.kind in 'Mm':\n offset = min(array)\n # xarray always uses np.datetime64[ns] for np.datetime64 data\n dtype = 'timedelta64[ns]'\n return _mean(datetime_to_numeric(array, offset), axis=axis,\n skipna=skipna, **kwargs).astype(dtype) + offset\n elif _contains_cftime_datetimes(array):\n if isinstance(array, dask_array_type):\n raise NotImplementedError(\n 'Computing the mean of an array containing '\n 'cftime.datetime objects is not yet implemented on '\n 'dask arrays.')\n offset = min(array)\n timedeltas = datetime_to_numeric(array, offset, datetime_unit='us')\n mean_timedeltas = _mean(timedeltas, axis=axis, skipna=skipna,\n **kwargs)\n return _to_pytimedelta(mean_timedeltas, unit='us') + offset\n else:\n return _mean(array, axis=axis, skipna=skipna, **kwargs)\n\n\nmean.numeric_only = True # type: ignore\n\n\ndef _nd_cum_func(cum_func, array, axis, **kwargs):\n array = asarray(array)\n if axis is None:\n axis = tuple(range(array.ndim))\n if isinstance(axis, int):\n axis = (axis,)\n\n out = array\n for ax in axis:\n out = cum_func(out, axis=ax, **kwargs)\n return out\n\n\ndef cumprod(array, axis=None, **kwargs):\n \"\"\"N-dimensional version of cumprod.\"\"\"\n return _nd_cum_func(cumprod_1d, array, axis, **kwargs)\n\n\ndef cumsum(array, axis=None, **kwargs):\n \"\"\"N-dimensional version of cumsum.\"\"\"\n return _nd_cum_func(cumsum_1d, array, axis, **kwargs)\n\n\n_fail_on_dask_array_input_skipna = partial(\n fail_on_dask_array_input,\n msg='%r with skipna=True is not yet implemented on dask arrays')\n\n\ndef first(values, axis, skipna=None):\n \"\"\"Return the first non-NA elements in this array along the given axis\n \"\"\"\n if (skipna or skipna is None) and values.dtype.kind not in 'iSU':\n # only bother for dtypes that can hold NaN\n _fail_on_dask_array_input_skipna(values)\n return nanfirst(values, axis)\n return take(values, 0, axis=axis)\n\n\ndef last(values, axis, skipna=None):\n \"\"\"Return the last non-NA elements in this array along the given axis\n \"\"\"\n if (skipna or skipna is None) and values.dtype.kind not in 'iSU':\n # only bother for dtypes that can hold NaN\n _fail_on_dask_array_input_skipna(values)\n return nanlast(values, axis)\n return take(values, -1, axis=axis)\n\n\ndef rolling_window(array, axis, window, center, fill_value):\n \"\"\"\n Make an ndarray with a rolling window of axis-th dimension.\n The rolling dimension will be placed at the last dimension.\n \"\"\"\n if isinstance(array, dask_array_type):\n return dask_array_ops.rolling_window(\n array, axis, window, center, fill_value)\n else: # np.ndarray\n return nputils.rolling_window(\n array, axis, window, center, fill_value)\n"
] | [
[
"numpy.array",
"numpy.asarray",
"numpy.timedelta64"
]
] |
QUVA-Lab/escnn | [
"59ed6b96f61f8616f87b3f25aa2f8abdb6f1a882"
] | [
"escnn/nn/modules/pointconv/r2_point_convolution.py"
] | [
"\n\nfrom escnn.nn import FieldType, GeometricTensor\nfrom escnn.group import Representation\nfrom escnn.kernels import KernelBasis\n\nfrom torch_geometric.data import Data\n\nfrom .rd_point_convolution import _RdPointConv\n\nfrom typing import Callable, Tuple, Dict, Union, List\n\nimport torch\nimport numpy as np\n\n\nimport math\n\n\n__all__ = [\"R2PointConv\"]\n\n\nclass R2PointConv(_RdPointConv):\n \n def __init__(self,\n in_type: FieldType,\n out_type: FieldType,\n groups: int = 1,\n bias: bool = True,\n sigma: Union[List[float], float] = None,\n width: float = None,\n n_rings: int = None,\n frequencies_cutoff: Union[float, Callable[[float], int]] = None,\n rings: List[float] = None,\n basis_filter: Callable[[dict], bool] = None,\n recompute: bool = False,\n ):\n\n basis_filter, self._rings, self._sigma, self._maximum_frequency = compute_basis_params(\n frequencies_cutoff, rings, sigma, width, n_rings, basis_filter\n )\n\n super(R2PointConv, self).__init__(\n in_type, out_type,\n d=2,\n groups=groups,\n bias=bias,\n basis_filter=basis_filter,\n recompute=recompute\n )\n\n def _build_kernel_basis(self, in_repr: Representation, out_repr: Representation) -> KernelBasis:\n return self.space.build_kernel_basis(in_repr, out_repr,\n self._sigma, self._rings,\n maximum_frequency=self._maximum_frequency\n )\n\n\ndef bandlimiting_filter(frequency_cutoff: Union[float, Callable[[float], float]]) -> Callable[[dict], bool]:\n r\"\"\"\n\n Returns a method which takes as input the attributes (as a dictionary) of a basis element and returns a boolean\n value: whether to preserve that element (true) or not (false)\n\n If the parameter ``frequency_cutoff`` is a scalar value, the maximum frequency allowed at a certain radius is\n proportional to the radius itself. in thi case, the parameter ``frequency_cutoff`` is the factor controlling this\n proportionality relation.\n\n If the parameter ``frequency_cutoff`` is a callable, it needs to take as input a radius (a scalar value) and return\n the maximum frequency which can be sampled at that radius.\n\n args:\n frequency_cutoff (float): factor controlling the bandlimiting\n\n returns:\n a function which checks the attributes of individual basis elements and chooses whether to discard them or not\n\n \"\"\"\n\n if isinstance(frequency_cutoff, float):\n frequency_cutoff = lambda r, fco=frequency_cutoff: r * frequency_cutoff\n\n def bl_filter(attributes: dict) -> bool:\n return math.fabs(attributes[\"irrep:frequency\"]) <= frequency_cutoff(attributes[\"radius\"])\n\n return bl_filter\n\n\ndef compute_basis_params(\n frequencies_cutoff: Union[float, Callable[[float], float]] = None,\n rings: List[float] = None,\n sigma: List[float] = None,\n width: float = None,\n n_rings: int = None,\n custom_basis_filter: Callable[[dict], bool] = None,\n):\n\n assert (width is not None and n_rings is not None) != (rings is not None)\n\n # by default, the number of rings equals half of the filter size\n if rings is None:\n assert width > 0.\n assert n_rings > 0\n rings = torch.linspace(0, width, n_rings)\n rings = rings.tolist()\n\n if sigma is None:\n sigma = [0.6] * (len(rings) - 1) + [0.4]\n for i, r in enumerate(rings):\n if r == 0.:\n sigma[i] = 0.005\n elif isinstance(sigma, float):\n sigma = [sigma] * len(rings)\n\n if frequencies_cutoff is None:\n frequencies_cutoff = 3.\n\n if isinstance(frequencies_cutoff, float):\n frequencies_cutoff = lambda r, fco=frequencies_cutoff: fco * r\n\n # check if the object is a callable function\n assert callable(frequencies_cutoff)\n\n maximum_frequency = int(max(frequencies_cutoff(r) for r in rings))\n\n fco_filter = bandlimiting_filter(frequencies_cutoff)\n\n if custom_basis_filter is not None:\n basis_filter = lambda d, custom_basis_filter=custom_basis_filter, fco_filter=fco_filter: (\n custom_basis_filter(d) and fco_filter(d)\n )\n else:\n basis_filter = fco_filter\n\n return basis_filter, rings, sigma, maximum_frequency\n\n"
] | [
[
"torch.linspace"
]
] |
Johannes0Horn/deeplabmodel | [
"ee8618265d336b7f235bc7cbb6779b7dd06436a0"
] | [
"deeplab/utils/train_utils.py"
] | [
"# Copyright 2018 The TensorFlow Authors All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Utility functions for training.\"\"\"\n\nimport six\n\nimport tensorflow as tf\nfrom deeplab.core import preprocess_utils\n\nslim = tf.contrib.slim\n\n\ndef add_softmax_cross_entropy_loss_for_each_scale(scales_to_logits,\n labels,\n num_classes,\n ignore_label,\n loss_weight=1.0,\n upsample_logits=True,\n scope=None):\n \"\"\"Adds softmax cross entropy loss for logits of each scale.\n\n Args:\n scales_to_logits: A map from logits names for different scales to logits.\n The logits have shape [batch, logits_height, logits_width, num_classes].\n labels: Groundtruth labels with shape [batch, image_height, image_width, 1].\n num_classes: Integer, number of target classes.\n ignore_label: Integer, label to ignore.\n loss_weight: Float, loss weight.\n upsample_logits: Boolean, upsample logits or not.\n scope: String, the scope for the loss.\n\n Raises:\n ValueError: Label or logits is None.\n \"\"\"\n if labels is None:\n raise ValueError('No label for softmax cross entropy loss.')\n\n for scale, logits in six.iteritems(scales_to_logits):\n loss_scope = None\n if scope:\n loss_scope = '%s_%s' % (scope, scale)\n\n if upsample_logits:\n # Label is not downsampled, and instead we upsample logits.\n logits = tf.image.resize_bilinear(\n logits,\n preprocess_utils.resolve_shape(labels, 4)[1:3],\n align_corners=True)\n scaled_labels = labels\n else:\n # Label is downsampled to the same size as logits.\n scaled_labels = tf.image.resize_nearest_neighbor(\n labels,\n preprocess_utils.resolve_shape(logits, 4)[1:3],\n align_corners=True)\n\n scaled_labels = tf.reshape(scaled_labels, shape=[-1])\n not_ignore_mask = tf.to_float(tf.not_equal(scaled_labels,\n ignore_label)) * loss_weight\n one_hot_labels = slim.one_hot_encoding(\n scaled_labels, num_classes, on_value=1.0, off_value=0.0)\n\n # LOSSWEIGTHS\n ignoore_weight = 0\n background_weight = 1\n trunk_weight = 5\n card_weight = 10\n not_ignore_mask = \\\n tf.to_float(tf.equal(scaled_labels, 0)) * background_weight + \\\n tf.to_float(tf.equal(scaled_labels, 1)) * trunk_weight + \\\n tf.to_float(tf.equal(scaled_labels, 2)) * card_weight + \\\n tf.to_float(tf.equal(scaled_labels, ignore_label)) * ignoore_weight\n\n ######\n\n tf.losses.softmax_cross_entropy(\n one_hot_labels,\n tf.reshape(logits, shape=[-1, num_classes]),\n weights=not_ignore_mask,\n scope=loss_scope)\n\n\ndef get_model_init_fn(train_logdir,\n tf_initial_checkpoint,\n initialize_last_layer,\n last_layers,\n ignore_missing_vars=False):\n \"\"\"Gets the function initializing model variables from a checkpoint.\n\n Args:\n train_logdir: Log directory for training.\n tf_initial_checkpoint: TensorFlow checkpoint for initialization.\n initialize_last_layer: Initialize last layer or not.\n last_layers: Last layers of the model.\n ignore_missing_vars: Ignore missing variables in the checkpoint.\n\n Returns:\n Initialization function.\n \"\"\"\n if tf_initial_checkpoint is None:\n tf.logging.info('Not initializing the model from a checkpoint.')\n return None\n\n if tf.train.latest_checkpoint(train_logdir):\n tf.logging.info('Ignoring initialization; other checkpoint exists')\n return None\n\n tf.logging.info('Initializing model from path: %s', tf_initial_checkpoint)\n\n # Variables that will not be restored.\n exclude_list = ['global_step', 'logits']\n if not initialize_last_layer:\n exclude_list.extend(last_layers)\n\n variables_to_restore = slim.get_variables_to_restore(exclude=exclude_list)\n\n if variables_to_restore:\n return slim.assign_from_checkpoint_fn(\n tf_initial_checkpoint,\n variables_to_restore,\n ignore_missing_vars=ignore_missing_vars)\n return None\n\n\ndef get_model_gradient_multipliers(last_layers, last_layer_gradient_multiplier):\n \"\"\"Gets the gradient multipliers.\n\n The gradient multipliers will adjust the learning rates for model\n variables. For the task of semantic segmentation, the models are\n usually fine-tuned from the models trained on the task of image\n classification. To fine-tune the models, we usually set larger (e.g.,\n 10 times larger) learning rate for the parameters of last layer.\n\n Args:\n last_layers: Scopes of last layers.\n last_layer_gradient_multiplier: The gradient multiplier for last layers.\n\n Returns:\n The gradient multiplier map with variables as key, and multipliers as value.\n \"\"\"\n gradient_multipliers = {}\n\n for var in slim.get_model_variables():\n # Double the learning rate for biases.\n if 'biases' in var.op.name:\n gradient_multipliers[var.op.name] = 2.\n\n # Use larger learning rate for last layer variables.\n for layer in last_layers:\n if layer in var.op.name and 'biases' in var.op.name:\n gradient_multipliers[var.op.name] = 2 * last_layer_gradient_multiplier\n break\n elif layer in var.op.name:\n gradient_multipliers[var.op.name] = last_layer_gradient_multiplier\n break\n\n return gradient_multipliers\n\n\ndef get_model_learning_rate(\n learning_policy, base_learning_rate, learning_rate_decay_step,\n learning_rate_decay_factor, training_number_of_steps, learning_power,\n slow_start_step, slow_start_learning_rate):\n \"\"\"Gets model's learning rate.\n\n Computes the model's learning rate for different learning policy.\n Right now, only \"step\" and \"poly\" are supported.\n (1) The learning policy for \"step\" is computed as follows:\n current_learning_rate = base_learning_rate *\n learning_rate_decay_factor ^ (global_step / learning_rate_decay_step)\n See tf.train.exponential_decay for details.\n (2) The learning policy for \"poly\" is computed as follows:\n current_learning_rate = base_learning_rate *\n (1 - global_step / training_number_of_steps) ^ learning_power\n\n Args:\n learning_policy: Learning rate policy for training.\n base_learning_rate: The base learning rate for model training.\n learning_rate_decay_step: Decay the base learning rate at a fixed step.\n learning_rate_decay_factor: The rate to decay the base learning rate.\n training_number_of_steps: Number of steps for training.\n learning_power: Power used for 'poly' learning policy.\n slow_start_step: Training model with small learning rate for the first\n few steps.\n slow_start_learning_rate: The learning rate employed during slow start.\n\n Returns:\n Learning rate for the specified learning policy.\n\n Raises:\n ValueError: If learning policy is not recognized.\n \"\"\"\n global_step = tf.train.get_or_create_global_step()\n if learning_policy == 'step':\n learning_rate = tf.train.exponential_decay(\n base_learning_rate,\n global_step,\n learning_rate_decay_step,\n learning_rate_decay_factor,\n staircase=True)\n elif learning_policy == 'poly':\n learning_rate = tf.train.polynomial_decay(\n base_learning_rate,\n global_step,\n training_number_of_steps,\n end_learning_rate=0,\n power=learning_power)\n else:\n raise ValueError('Unknown learning policy.')\n\n # Employ small learning rate at the first few steps for warm start.\n return tf.where(global_step < slow_start_step, slow_start_learning_rate,\n learning_rate)\n"
] | [
[
"tensorflow.train.polynomial_decay",
"tensorflow.equal",
"tensorflow.reshape",
"tensorflow.logging.info",
"tensorflow.train.latest_checkpoint",
"tensorflow.train.exponential_decay",
"tensorflow.where",
"tensorflow.train.get_or_create_global_step",
"tensorflow.not_equal"
]
] |
Muflhi01/TextWorld | [
"c419bb63a92c7f6960aa004a367fb18894043e7f"
] | [
"textworld/generator/game.py"
] | [
"# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT license.\n\n\nimport copy\nimport json\nimport textwrap\n\nfrom typing import List, Dict, Optional, Mapping, Any, Iterable, Union, Tuple\nfrom collections import OrderedDict\nfrom functools import partial\n\nimport numpy as np\nfrom numpy.random import RandomState\n\nfrom textworld import g_rng\nfrom textworld.utils import encode_seeds\nfrom textworld.generator.data import KnowledgeBase\nfrom textworld.generator.text_grammar import Grammar, GrammarOptions\nfrom textworld.generator.world import World\nfrom textworld.logic import Action, Proposition, State\nfrom textworld.generator.graph_networks import DIRECTIONS\n\nfrom textworld.generator.chaining import ChainingOptions\n\nfrom textworld.generator.dependency_tree import DependencyTree\nfrom textworld.generator.dependency_tree import DependencyTreeElement\n\n\nclass UnderspecifiedEventError(NameError):\n def __init__(self):\n msg = \"Either the actions or the conditions is needed to create an event.\"\n super().__init__(msg)\n\n\nclass UnderspecifiedQuestError(NameError):\n def __init__(self):\n msg = \"At least one winning or failing event is needed to create a quest.\"\n super().__init__(msg)\n\n\ndef gen_commands_from_actions(actions: Iterable[Action], kb: Optional[KnowledgeBase] = None) -> List[str]:\n kb = kb or KnowledgeBase.default()\n\n def _get_name_mapping(action):\n mapping = kb.rules[action.name].match(action)\n return {ph.name: var.name for ph, var in mapping.items()}\n\n commands = []\n for action in actions:\n command = \"None\"\n if action is not None:\n command = kb.inform7_commands[action.name]\n command = command.format(**_get_name_mapping(action))\n\n commands.append(command)\n\n return commands\n\n\nclass Event:\n \"\"\"\n Event happening in TextWorld.\n\n An event gets triggered when its set of conditions become all statisfied.\n\n Attributes:\n actions: Actions to be performed to trigger this event\n commands: Human readable version of the actions.\n condition: :py:class:`textworld.logic.Action` that can only be applied\n when all conditions are statisfied.\n \"\"\"\n\n def __init__(self, actions: Iterable[Action] = (),\n conditions: Iterable[Proposition] = (),\n commands: Iterable[str] = ()) -> None:\n \"\"\"\n Args:\n actions: The actions to be performed to trigger this event.\n If an empty list, then `conditions` must be provided.\n conditions: Set of propositions which need to\n be all true in order for this event\n to get triggered.\n commands: Human readable version of the actions.\n \"\"\"\n self.actions = actions\n self.commands = commands\n self.condition = self.set_conditions(conditions)\n\n @property\n def actions(self) -> Iterable[Action]:\n return self._actions\n\n @actions.setter\n def actions(self, actions: Iterable[Action]) -> None:\n self._actions = tuple(actions)\n\n @property\n def commands(self) -> Iterable[str]:\n return self._commands\n\n @commands.setter\n def commands(self, commands: Iterable[str]) -> None:\n self._commands = tuple(commands)\n\n def is_triggering(self, state: State) -> bool:\n \"\"\" Check if this event would be triggered in a given state. \"\"\"\n return state.is_applicable(self.condition)\n\n def set_conditions(self, conditions: Iterable[Proposition]) -> Action:\n \"\"\"\n Set the triggering conditions for this event.\n\n Args:\n conditions: Set of propositions which need to\n be all true in order for this event\n to get triggered.\n Returns:\n Action that can only be applied when all conditions are statisfied.\n \"\"\"\n if not conditions:\n if len(self.actions) == 0:\n raise UnderspecifiedEventError()\n\n # The default winning conditions are the postconditions of the\n # last action in the quest.\n conditions = self.actions[-1].postconditions\n\n variables = sorted(set([v for c in conditions for v in c.arguments]))\n event = Proposition(\"event\", arguments=variables)\n self.condition = Action(\"trigger\", preconditions=conditions,\n postconditions=list(conditions) + [event])\n return self.condition\n\n def __hash__(self) -> int:\n return hash((self.actions, self.commands, self.condition))\n\n def __eq__(self, other: Any) -> bool:\n return (isinstance(other, Event)\n and self.actions == other.actions\n and self.commands == other.commands\n and self.condition == other.condition)\n\n @classmethod\n def deserialize(cls, data: Mapping) -> \"Event\":\n \"\"\" Creates an `Event` from serialized data.\n\n Args:\n data: Serialized data with the needed information to build a\n `Event` object.\n \"\"\"\n actions = [Action.deserialize(d) for d in data[\"actions\"]]\n condition = Action.deserialize(data[\"condition\"])\n event = cls(actions, condition.preconditions, data[\"commands\"])\n return event\n\n def serialize(self) -> Mapping:\n \"\"\" Serialize this event.\n\n Results:\n `Event`'s data serialized to be JSON compatible.\n \"\"\"\n data = {}\n data[\"commands\"] = self.commands\n data[\"actions\"] = [action.serialize() for action in self.actions]\n data[\"condition\"] = self.condition.serialize()\n return data\n\n def copy(self) -> \"Event\":\n \"\"\" Copy this event. \"\"\"\n return self.deserialize(self.serialize())\n\n\nclass Quest:\n \"\"\" Quest representation in TextWorld.\n\n A quest is defined by a mutually exclusive set of winning events and\n a mutually exclusive set of failing events.\n\n Attributes:\n win_events: Mutually exclusive set of winning events. That is,\n only one such event needs to be triggered in order\n to complete this quest.\n fail_events: Mutually exclusive set of failing events. That is,\n only one such event needs to be triggered in order\n to fail this quest.\n reward: Reward given for completing this quest.\n desc: A text description of the quest.\n commands: List of text commands leading to this quest completion.\n optional: Whether this quest is optional or not to finish the game.\n repeatable: Whether this quest can be completed more than once.\n \"\"\"\n\n def __init__(self,\n win_events: Iterable[Event] = (),\n fail_events: Iterable[Event] = (),\n reward: Optional[int] = None,\n desc: Optional[str] = None,\n commands: Iterable[str] = (),\n optional: bool = False,\n repeatable: bool = False) -> None:\n r\"\"\"\n Args:\n win_events: Mutually exclusive set of winning events. That is,\n only one such event needs to be triggered in order\n to complete this quest.\n fail_events: Mutually exclusive set of failing events. That is,\n only one such event needs to be triggered in order\n to fail this quest.\n reward: Reward given for completing this quest. By default,\n reward is set to 1 if there is at least one winning events\n otherwise it is set to 0.\n desc: A text description of the quest.\n commands: List of text commands leading to this quest completion.\n optional: If True, this quest is optional to finish the game.\n repeatable: If True, this quest can be completed more than once.\n \"\"\"\n self.win_events = tuple(win_events)\n self.fail_events = tuple(fail_events)\n self.desc = desc\n self.commands = tuple(commands)\n self.optional = optional\n self.repeatable = repeatable\n if self.repeatable:\n assert self.optional # Only optional quest can be repeatable.\n\n # Unless explicitly provided, reward is set to 1 if there is at least\n # one winning events otherwise it is set to 0.\n self.reward = int(len(win_events) > 0) if reward is None else reward\n\n if len(self.win_events) == 0 and len(self.fail_events) == 0:\n raise UnderspecifiedQuestError()\n\n @property\n def win_events(self) -> Iterable[Event]:\n return self._win_events\n\n @win_events.setter\n def win_events(self, events: Iterable[Event]) -> None:\n self._win_events = tuple(events)\n\n @property\n def fail_events(self) -> Iterable[Event]:\n return self._fail_events\n\n @fail_events.setter\n def fail_events(self, events: Iterable[Event]) -> None:\n self._fail_events = tuple(events)\n\n @property\n def commands(self) -> Iterable[str]:\n return self._commands\n\n @commands.setter\n def commands(self, commands: Iterable[str]) -> None:\n self._commands = tuple(commands)\n\n def is_winning(self, state: State) -> bool:\n \"\"\" Check if this quest is winning in that particular state. \"\"\"\n return any(event.is_triggering(state) for event in self.win_events)\n\n def is_failing(self, state: State) -> bool:\n \"\"\" Check if this quest is failing in that particular state. \"\"\"\n return any(event.is_triggering(state) for event in self.fail_events)\n\n def __hash__(self) -> int:\n return hash((self.win_events, self.fail_events, self.reward,\n self.desc, self.commands, self.optional, self.repeatable))\n\n def __eq__(self, other: Any) -> bool:\n return (isinstance(other, Quest)\n and self.win_events == other.win_events\n and self.fail_events == other.fail_events\n and self.reward == other.reward\n and self.desc == other.desc\n and self.commands == other.commands\n and self.optional == other.optional\n and self.repeatable == other.repeatable)\n\n @classmethod\n def deserialize(cls, data: Mapping) -> \"Quest\":\n \"\"\" Creates a `Quest` from serialized data.\n\n Args:\n data: Serialized data with the needed information to build a\n `Quest` object.\n \"\"\"\n win_events = [Event.deserialize(d) for d in data[\"win_events\"]]\n fail_events = [Event.deserialize(d) for d in data[\"fail_events\"]]\n commands = data.get(\"commands\", [])\n reward = data[\"reward\"]\n desc = data[\"desc\"]\n optional = data.get(\"optional\", False)\n repeatable = data.get(\"repeatable\", False)\n return cls(win_events, fail_events, reward, desc, commands, optional, repeatable)\n\n def serialize(self) -> Mapping:\n \"\"\" Serialize this quest.\n\n Results:\n Quest's data serialized to be JSON compatible\n \"\"\"\n data = {}\n data[\"desc\"] = self.desc\n data[\"reward\"] = self.reward\n data[\"commands\"] = self.commands\n data[\"win_events\"] = [event.serialize() for event in self.win_events]\n data[\"fail_events\"] = [event.serialize() for event in self.fail_events]\n data[\"optional\"] = self.optional\n data[\"repeatable\"] = self.repeatable\n return data\n\n def copy(self) -> \"Quest\":\n \"\"\" Copy this quest. \"\"\"\n return self.deserialize(self.serialize())\n\n\nclass EntityInfo:\n \"\"\" Additional information about entities in the game. \"\"\"\n __slots__ = ['id', 'type', 'name', 'noun', 'adj', 'desc', 'room_type', 'definite', 'indefinite', 'synonyms']\n\n def __init__(self, id: str, type: str) -> None:\n #: str: Unique name for this entity. It is used when generating\n self.id = id\n #: str: The type of this entity.\n self.type = type\n #: str: The name that will be displayed in-game to identify this entity.\n self.name = None\n #: str: The noun part of the name, if available.\n self.noun = None\n #: str: The adjective (i.e. descriptive) part of the name, if available.\n self.adj = None\n #: str: The definite article to use for this entity.\n self.definite = None\n #: str: The indefinite article to use for this entity.\n self.indefinite = None\n #: List[str]: Alternative names that can be used to refer to this entity.\n self.synonyms = None\n #: str: Text description displayed when examining this entity in the game.\n self.desc = None\n #: str: Type of the room this entity belongs to. It used to influence\n #: its `name` during text generation.\n self.room_type = None\n\n def __eq__(self, other: Any) -> bool:\n return (isinstance(other, EntityInfo)\n and all(getattr(self, slot) == getattr(other, slot)\n for slot in self.__slots__))\n\n def __hash__(self) -> int:\n return hash(tuple(getattr(self, slot) for slot in self.__slots__))\n\n def __str__(self) -> str:\n return \"Info({}: {} | {})\".format(self.name, self.adj, self.noun)\n\n @classmethod\n def deserialize(cls, data: Mapping) -> \"EntityInfo\":\n \"\"\" Creates a `EntityInfo` from serialized data.\n\n Args:\n data: Serialized data with the needed information to build a\n `EntityInfo` object.\n \"\"\"\n info = cls(data[\"id\"], data[\"type\"])\n for slot in cls.__slots__:\n setattr(info, slot, data.get(slot))\n\n return info\n\n def serialize(self) -> Mapping:\n \"\"\" Serialize this object.\n\n Results:\n EntityInfo's data serialized to be JSON compatible\n \"\"\"\n return {slot: getattr(self, slot) for slot in self.__slots__}\n\n\nclass Game:\n \"\"\" Game representation in TextWorld.\n\n A `Game` is defined by a world and it can have quest(s) or not.\n Additionally, a grammar can be provided to control the text generation.\n \"\"\"\n\n _SERIAL_VERSION = 1\n\n def __init__(self, world: World, grammar: Optional[Grammar] = None,\n quests: Iterable[Quest] = ()) -> None:\n \"\"\"\n Args:\n world: The world to use for the game.\n quests: The quests to be done in the game.\n grammar: The grammar to control the text generation.\n \"\"\"\n self.world = world\n self.quests = tuple(quests)\n self.metadata = {}\n self._objective = None\n self._infos = self._build_infos()\n self.kb = world.kb\n\n self.change_grammar(grammar)\n\n @property\n def infos(self) -> Dict[str, EntityInfo]:\n \"\"\" Information about the entities in the game. \"\"\"\n return self._infos\n\n def _build_infos(self) -> Dict[str, EntityInfo]:\n mapping = OrderedDict()\n for entity in self.world.entities:\n if entity not in mapping:\n mapping[entity.id] = EntityInfo(entity.id, entity.type)\n\n return mapping\n\n def copy(self) -> \"Game\":\n \"\"\" Make a shallow copy of this game. \"\"\"\n game = Game(self.world, None, self.quests)\n game._infos = dict(self.infos)\n game._objective = self._objective\n game.metadata = dict(self.metadata)\n return game\n\n def change_grammar(self, grammar: Grammar) -> None:\n \"\"\" Changes the grammar used and regenerate all text. \"\"\"\n\n self.grammar = grammar\n _gen_commands = partial(gen_commands_from_actions, kb=self.kb)\n if self.grammar:\n from textworld.generator.inform7 import Inform7Game\n from textworld.generator.text_generation import generate_text_from_grammar\n inform7 = Inform7Game(self)\n _gen_commands = inform7.gen_commands_from_actions\n generate_text_from_grammar(self, self.grammar)\n\n for quest in self.quests:\n for event in quest.win_events:\n event.commands = _gen_commands(event.actions)\n\n if quest.win_events:\n quest.commands = quest.win_events[0].commands\n\n # Check if we can derive a global winning policy from the quests.\n if self.grammar:\n from textworld.generator.text_generation import describe_event\n policy = GameProgression(self).winning_policy\n if policy:\n mapping = {k: info.name for k, info in self._infos.items()}\n commands = [a.format_command(mapping) for a in policy]\n self.metadata[\"walkthrough\"] = commands\n self.objective = describe_event(Event(policy), self, self.grammar)\n\n def save(self, filename: str) -> None:\n \"\"\" Saves the serialized data of this game to a file. \"\"\"\n with open(filename, 'w') as f:\n json.dump(self.serialize(), f)\n\n @classmethod\n def load(cls, filename: str) -> \"Game\":\n \"\"\" Creates `Game` from serialized data saved in a file. \"\"\"\n with open(filename, 'r') as f:\n return cls.deserialize(json.load(f))\n\n @classmethod\n def deserialize(cls, data: Mapping) -> \"Game\":\n \"\"\" Creates a `Game` from serialized data.\n\n Args:\n data: Serialized data with the needed information to build a\n `Game` object.\n \"\"\"\n\n version = data.get(\"version\", cls._SERIAL_VERSION)\n if version != cls._SERIAL_VERSION:\n msg = \"Cannot deserialize a TextWorld version {} game, expected version {}\"\n raise ValueError(msg.format(version, cls._SERIAL_VERSION))\n\n kb = KnowledgeBase.deserialize(data[\"KB\"])\n world = World.deserialize(data[\"world\"], kb=kb)\n game = cls(world)\n game.grammar_options = GrammarOptions(data[\"grammar\"])\n game.quests = tuple([Quest.deserialize(d) for d in data[\"quests\"]])\n game._infos = {k: EntityInfo.deserialize(v) for k, v in data[\"infos\"]}\n game.metadata = data.get(\"metadata\", {})\n game._objective = data.get(\"objective\", None)\n\n return game\n\n def serialize(self) -> Mapping:\n \"\"\" Serialize this object.\n\n Results:\n Game's data serialized to be JSON compatible\n \"\"\"\n data = {}\n data[\"version\"] = self._SERIAL_VERSION\n data[\"world\"] = self.world.serialize()\n data[\"grammar\"] = self.grammar.options.serialize() if self.grammar else {}\n data[\"quests\"] = [quest.serialize() for quest in self.quests]\n data[\"infos\"] = [(k, v.serialize()) for k, v in self._infos.items()]\n data[\"KB\"] = self.kb.serialize()\n data[\"metadata\"] = self.metadata\n data[\"objective\"] = self._objective\n\n return data\n\n def __eq__(self, other: Any) -> bool:\n return (isinstance(other, Game)\n and self.world == other.world\n and self.infos == other.infos\n and self.quests == other.quests\n and self.metadata == other.metadata\n and self._objective == other._objective)\n\n def __hash__(self) -> int:\n state = (self.world,\n frozenset(self.quests),\n frozenset(self.infos.items()),\n self._objective)\n\n return hash(state)\n\n @property\n def max_score(self) -> float:\n \"\"\" Sum of the reward of all quests. \"\"\"\n if any(quest.repeatable and quest.reward > 0 for quest in self.quests):\n return np.inf\n\n return sum(quest.reward for quest in self.quests if not quest.optional or quest.reward > 0)\n\n @property\n def command_templates(self) -> List[str]:\n \"\"\" All command templates understood in this game. \"\"\"\n return sorted(set(cmd for cmd in self.kb.inform7_commands.values()))\n\n @property\n def directions_names(self) -> List[str]:\n return DIRECTIONS\n\n @property\n def objects_types(self) -> List[str]:\n \"\"\" All types of objects in this game. \"\"\"\n return sorted(self.kb.types.types)\n\n @property\n def objects_names(self) -> List[str]:\n \"\"\" The names of all relevant objects in this game. \"\"\"\n def _filter_unnamed_and_room_entities(e):\n return e.name and e.type != \"r\"\n\n entities_infos = filter(_filter_unnamed_and_room_entities, self.infos.values())\n return [info.name for info in entities_infos]\n\n @property\n def entity_names(self) -> List[str]:\n return self.objects_names + self.directions_names\n\n @property\n def objects_names_and_types(self) -> List[str]:\n \"\"\" The names of all non-player objects along with their type in this game. \"\"\"\n def _filter_unnamed_and_room_entities(e):\n return e.name and e.type != \"r\"\n\n entities_infos = filter(_filter_unnamed_and_room_entities, self.infos.values())\n return [(info.name, info.type) for info in entities_infos]\n\n @property\n def verbs(self) -> List[str]:\n \"\"\" Verbs that should be recognized in this game. \"\"\"\n # Retrieve commands templates for every rule.\n return sorted(set(cmd.split()[0] for cmd in self.command_templates))\n\n @property\n def objective(self) -> str:\n if self._objective is not None:\n return self._objective\n\n # TODO: Find a better way of describing the objective of the game with several quests.\n self._objective = \"\\nAND\\n\".join(quest.desc for quest in self.quests if quest.desc)\n\n return self._objective\n\n @objective.setter\n def objective(self, value: str):\n self._objective = value\n\n @property\n def walkthrough(self) -> Optional[List[str]]:\n walkthrough = self.metadata.get(\"walkthrough\")\n if walkthrough:\n return walkthrough\n\n # Check if we can derive a walkthrough from the quests.\n policy = GameProgression(self).winning_policy\n if policy:\n mapping = {k: info.name for k, info in self._infos.items()}\n walkthrough = [a.format_command(mapping) for a in policy]\n self.metadata[\"walkthrough\"] = walkthrough\n\n return walkthrough\n\n\nclass ActionDependencyTreeElement(DependencyTreeElement):\n \"\"\" Representation of an `Action` in the dependency tree.\n\n The notion of dependency and ordering is defined as follows:\n\n * action1 depends on action2 if action1 needs the propositions\n added by action2;\n * action1 should be performed before action2 if action2 removes\n propositions needed by action1.\n \"\"\"\n\n def depends_on(self, other: \"ActionDependencyTreeElement\") -> bool:\n \"\"\" Check whether this action depends on the `other`.\n\n Action1 depends on action2 when the intersection between\n the propositions added by action2 and the preconditions\n of the action1 is not empty, i.e. action1 needs the\n propositions added by action2.\n \"\"\"\n return len(other.action.added & self.action._pre_set) > 0\n\n @property\n def action(self) -> Action:\n return self.value\n\n def is_distinct_from(self, others: List[\"ActionDependencyTreeElement\"]) -> bool:\n \"\"\"\n Check whether this element is distinct from `others`.\n\n We check if self.action has any additional information\n that `others` actions don't have. This helps us to\n identify whether a group of nodes in the dependency tree\n already contain all the needed information that self.action\n would bring.\n \"\"\"\n new_facts = set(self.action.added)\n for other in others:\n new_facts -= other.action.added\n\n return len(new_facts) > 0\n\n def __lt__(self, other: \"ActionDependencyTreeElement\") -> bool:\n \"\"\" Order ActionDependencyTreeElement elements.\n\n Actions that remove information needed by other actions\n should be sorted further in the list.\n\n Notes:\n This is not a proper ordering, i.e. two actions\n can mutually removed information needed by each other.\n \"\"\"\n def _required_facts(node):\n pre_set = set(node.action._pre_set)\n while node.parent is not None:\n pre_set |= node.parent.action._pre_set\n pre_set -= node.action.added\n node = node.parent\n\n return pre_set\n\n return len(other.action.removed & _required_facts(self)) > len(self.action.removed & _required_facts(other))\n\n def __str__(self) -> str:\n params = \", \".join(map(str, self.action.variables))\n return \"{}({})\".format(self.action.name, params)\n\n\nclass ActionDependencyTree(DependencyTree):\n\n def __init__(self, *args, kb: Optional[KnowledgeBase] = None, **kwargs):\n super().__init__(*args, **kwargs)\n self._kb = kb or KnowledgeBase.default()\n\n def remove(self, action: Action) -> Tuple[bool, Optional[Action]]:\n changed = super().remove(action)\n\n # The last action might have impacted one of the subquests.\n reverse_action = self._kb.get_reverse_action(action)\n if self.empty:\n return changed, reverse_action\n\n if reverse_action is not None:\n changed = self.push(reverse_action)\n elif self.push(action.inverse()):\n # The last action did impact one of the subquests\n # but there's no reverse action to recover from it.\n changed = True\n\n return changed, reverse_action\n\n def flatten(self) -> Iterable[Action]:\n \"\"\"\n Generates a flatten representation of this dependency tree.\n\n Actions are greedily yielded by iteratively popping leaves from\n the dependency tree.\n \"\"\"\n tree = self.copy() # Make a copy of the tree to work on.\n last_reverse_action = None\n changed = False\n while len(tree.roots) > 0:\n # Use 'sort' to try leaves that doesn't affect the others first.\n for leaf in sorted(tree.leaves_elements):\n if leaf.action != last_reverse_action or not changed:\n break # Choose an action that avoids cycles.\n\n yield leaf.action\n changed, last_reverse_action = tree.remove(leaf.action)\n\n # Prune empty roots\n for root in list(tree.roots):\n if len(root.children) == 0:\n yield root.element.action\n tree.remove(root.element.action)\n\n def copy(self) -> \"ActionDependencyTree\":\n tree = super().copy()\n tree._kb = self._kb\n return tree\n\n\nclass EventProgression:\n \"\"\" EventProgression monitors a particular event.\n\n Internally, the event is represented as a dependency tree of\n relevant actions to be performed.\n \"\"\"\n\n def __init__(self, event: Event, kb: KnowledgeBase) -> None:\n \"\"\"\n Args:\n quest: The quest to keep track of its completion.\n \"\"\"\n self._kb = kb or KnowledgeBase.default()\n self.event = event\n self._triggered = False\n self._untriggerable = False\n self._policy = ()\n\n # Build a tree representation of the quest.\n self._tree = ActionDependencyTree(kb=self._kb,\n element_type=ActionDependencyTreeElement)\n\n if len(event.actions) > 0:\n self._tree.push(event.condition)\n\n for action in event.actions[::-1]:\n self._tree.push(action)\n\n self._policy = event.actions + (event.condition,)\n\n def copy(self) -> \"EventProgression\":\n \"\"\" Return a soft copy. \"\"\"\n ep = EventProgression(self.event, self._kb)\n ep._triggered = self._triggered\n ep._untriggerable = self._untriggerable\n ep._policy = self._policy\n ep._tree = self._tree.copy()\n return ep\n\n @property\n def triggering_policy(self) -> List[Action]:\n \"\"\" Actions to be performed in order to trigger the event. \"\"\"\n if self.done:\n return ()\n\n # Discard all \"trigger\" actions.\n return tuple(a for a in self._policy if a.name != \"trigger\")\n\n @property\n def done(self) -> bool:\n \"\"\" Check if the quest is done (i.e. triggered or untriggerable). \"\"\"\n return self.triggered or self.untriggerable\n\n @property\n def triggered(self) -> bool:\n \"\"\" Check whether the event has been triggered. \"\"\"\n return self._triggered\n\n @property\n def untriggerable(self) -> bool:\n \"\"\" Check whether the event is in an untriggerable state. \"\"\"\n return self._untriggerable\n\n def update(self, action: Optional[Action] = None, state: Optional[State] = None) -> None:\n \"\"\" Update event progression given available information.\n\n Args:\n action: Action potentially affecting the event progression.\n state: Current game state.\n \"\"\"\n if self.done:\n return # Nothing to do, the quest is already done.\n\n if state is not None:\n # Check if event is triggered.\n self._triggered = self.event.is_triggering(state)\n\n # Try compressing the winning policy given the new game state.\n if self.compress_policy(state):\n return # A shorter winning policy has been found.\n\n if action is not None and not self._tree.empty:\n # Determine if we moved away from the goal or closer to it.\n changed, reverse_action = self._tree.remove(action)\n if changed and reverse_action is None: # Irreversible action.\n self._untriggerable = True # Can't track quest anymore.\n\n if changed and reverse_action is not None:\n # Rebuild policy.\n self._policy = tuple(self._tree.flatten())\n\n def compress_policy(self, state: State) -> bool:\n \"\"\" Compress the policy given a game state.\n\n Args:\n state: Current game state.\n\n Returns:\n Whether the policy was compressed or not.\n \"\"\"\n\n def _find_shorter_policy(policy):\n for j in range(0, len(policy)):\n for i in range(j + 1, len(policy))[::-1]:\n shorter_policy = policy[:j] + policy[i:]\n if state.is_sequence_applicable(shorter_policy):\n self._tree = ActionDependencyTree(kb=self._kb,\n element_type=ActionDependencyTreeElement)\n for action in shorter_policy[::-1]:\n self._tree.push(action)\n\n return shorter_policy\n\n return None\n\n compressed = False\n policy = _find_shorter_policy(tuple(a for a in self._tree.flatten()))\n while policy is not None:\n compressed = True\n self._policy = policy\n policy = _find_shorter_policy(policy)\n\n return compressed\n\n\nclass QuestProgression:\n \"\"\" QuestProgression keeps track of the completion of a quest.\n\n Internally, the quest is represented as a dependency tree of\n relevant actions to be performed.\n \"\"\"\n\n def __init__(self, quest: Quest, kb: KnowledgeBase) -> None:\n \"\"\"\n Args:\n quest: The quest to keep track of its completion.\n \"\"\"\n self.quest = quest\n self.kb = kb\n self.nb_completions = 0\n self.win_events = [EventProgression(event, kb) for event in quest.win_events]\n self.fail_events = [EventProgression(event, kb) for event in quest.fail_events]\n\n def copy(self) -> \"QuestProgression\":\n \"\"\" Return a soft copy. \"\"\"\n qp = QuestProgression(self.quest, self.kb)\n qp.win_events = [event_progression.copy() for event_progression in self.win_events]\n qp.fail_events = [event_progression.copy() for event_progression in self.fail_events]\n qp.nb_completions = self.nb_completions\n return qp\n\n @property\n def _tree(self) -> Optional[List[ActionDependencyTree]]:\n events = [event for event in self.win_events if len(event.triggering_policy) > 0]\n if len(events) == 0:\n return None\n\n event = min(events, key=lambda event: len(event.triggering_policy))\n return event._tree\n\n @property\n def winning_policy(self) -> Optional[List[Action]]:\n \"\"\" Actions to be performed in order to complete the quest. \"\"\"\n if self.done:\n return None\n\n winning_policies = [event.triggering_policy for event in self.win_events if len(event.triggering_policy) > 0]\n if len(winning_policies) == 0:\n return None\n\n return min(winning_policies, key=lambda policy: len(policy))\n\n @property\n def completable(self) -> bool:\n \"\"\" Check if the quest has winning events. \"\"\"\n return len(self.win_events) > 0\n\n @property\n def done(self) -> bool:\n \"\"\" Check if the quest is done (i.e. completed, failed or unfinishable). \"\"\"\n return self.completed or self.failed or self.unfinishable\n\n @property\n def completed(self) -> bool:\n \"\"\" Check whether the quest is completed. \"\"\"\n return any(event.triggered for event in self.win_events)\n\n @property\n def failed(self) -> bool:\n \"\"\" Check whether the quest has failed. \"\"\"\n return any(event.triggered for event in self.fail_events)\n\n @property\n def unfinishable(self) -> bool:\n \"\"\" Check whether the quest is in an unfinishable state. \"\"\"\n return any(event.untriggerable for event in self.win_events)\n\n def update(self, action: Optional[Action] = None, state: Optional[State] = None) -> None:\n \"\"\" Update quest progression given available information.\n\n Args:\n action: Action potentially affecting the quest progression.\n state: Current game state.\n \"\"\"\n if self.done:\n return # Nothing to do, the quest is already done.\n\n for event in (self.win_events + self.fail_events):\n event.update(action, state)\n\n if self.completed:\n self.nb_completions += 1\n\n # If repeatable quest is completed, reset its win_events' triggered state.\n if self.quest.repeatable:\n for event in self.win_events:\n event._triggered = False\n\n assert not self.completed # TODO make a unit test for this.\n\n\nclass GameProgression:\n \"\"\" GameProgression keeps track of the progression of a game.\n\n If `tracking_quests` is True, then `winning_policy` will be the list\n of Action that need to be applied in order to complete the game.\n \"\"\"\n\n def __init__(self, game: Game, track_quests: bool = True) -> None:\n \"\"\"\n Args:\n game: The game for which to track progression.\n track_quests: whether quest progressions are being tracked.\n \"\"\"\n self.game = game\n self.state = game.world.state.copy()\n self._valid_actions = list(self.state.all_applicable_actions(self.game.kb.rules.values(),\n self.game.kb.types.constants_mapping))\n\n self.quest_progressions = []\n if track_quests:\n self.quest_progressions = [QuestProgression(quest, game.kb) for quest in game.quests]\n for quest_progression in self.quest_progressions:\n quest_progression.update(action=None, state=self.state)\n\n def copy(self) -> \"GameProgression\":\n \"\"\" Return a soft copy. \"\"\"\n gp = GameProgression(self.game, track_quests=False)\n gp.state = self.state.copy()\n gp._valid_actions = self._valid_actions\n if self.tracking_quests:\n gp.quest_progressions = [quest_progression.copy() for quest_progression in self.quest_progressions]\n\n return gp\n\n @property\n def done(self) -> bool:\n \"\"\" Whether all non-optional quests are completed or at least one has failed or is unfinishable. \"\"\"\n return self.completed or self.failed\n\n @property\n def completed(self) -> bool:\n \"\"\" Whether all non-optional quests are completed. \"\"\"\n if not self.tracking_quests:\n return False # There is nothing to be \"completed\".\n\n return all(qp.completed for qp in self.quest_progressions if qp.completable and not qp.quest.optional)\n\n @property\n def failed(self) -> bool:\n \"\"\" Whether at least one non-optional quest has failed or is unfinishable. \"\"\"\n if not self.tracking_quests:\n return False # There is nothing to be \"failed\".\n\n return any((qp.failed or qp.unfinishable) for qp in self.quest_progressions if not qp.quest.optional)\n\n @property\n def score(self) -> int:\n \"\"\" Sum of the reward of all completed quests. \"\"\"\n return sum(qp.quest.reward * qp.nb_completions for qp in self.quest_progressions)\n\n @property\n def tracking_quests(self) -> bool:\n \"\"\" Whether quests are being tracked or not. \"\"\"\n return len(self.quest_progressions) > 0\n\n @property\n def valid_actions(self) -> List[Action]:\n \"\"\" Actions that are valid at the current state. \"\"\"\n return self._valid_actions\n\n @property\n def winning_policy(self) -> Optional[List[Action]]:\n \"\"\" Actions to be performed in order to complete the game.\n\n Returns:\n A policy that leads to winning the game. It can be `None`\n if `tracking_quests` is `False` or the quest has failed.\n \"\"\"\n if not self.tracking_quests:\n return None\n\n if self.done:\n return None\n\n # Greedily build a new winning policy by merging all quest trees.\n trees = [quest._tree for quest in self.quest_progressions\n if quest.completable and not quest.done and not quest.quest.optional]\n if None in trees:\n # Some quests don't have triggering policy.\n return None\n\n master_quest_tree = ActionDependencyTree(kb=self.game.kb,\n element_type=ActionDependencyTreeElement,\n trees=trees)\n\n # Discard all \"trigger\" actions.\n return tuple(a for a in master_quest_tree.flatten() if a.name != \"trigger\")\n\n def update(self, action: Action) -> None:\n \"\"\" Update the state of the game given the provided action.\n\n Args:\n action: Action affecting the state of the game.\n \"\"\"\n # Update world facts.\n self.state.apply(action)\n\n # Get valid actions.\n self._valid_actions = list(self.state.all_applicable_actions(self.game.kb.rules.values(),\n self.game.kb.types.constants_mapping))\n\n # Update all quest progressions given the last action and new state.\n for quest_progression in self.quest_progressions:\n quest_progression.update(action, self.state)\n\n\nclass GameOptions:\n \"\"\"\n Options for customizing the game generation.\n\n Attributes:\n nb_rooms (int):\n Number of rooms in the game.\n nb_objects (int):\n Number of objects in the game.\n nb_parallel_quests (int):\n Number of parallel quests, i.e. not sharing a common goal.\n quest_length (int):\n Number of actions that need to be performed to complete the game.\n quest_breadth (int):\n Number of subquests per independent quest. It controls how nonlinear\n a quest can be (1: linear).\n quest_depth (int):\n Number of actions that need to be performed to solve a subquest.\n path (str):\n Path of the compiled game (.ulx or .z8). Also, the source (.ni)\n and metadata (.json) files will be saved along with it.\n force_recompile (bool):\n If `True`, recompile game even if it already exists.\n file_ext (str):\n Type of the generated game file. Either .z8 (Z-Machine) or .ulx (Glulx).\n If `path` already has an extension, this is ignored.\n seeds (Optional[Union[int, Dict]]):\n Seeds for the different generation processes.\n\n * If `None`, seeds will be sampled from\n :py:data:`textworld.g_rng <textworld.utils.g_rng>`.\n * If `int`, it acts as a seed for a random generator that will be\n used to sample the other seeds.\n * If dict, the following keys can be set:\n\n * `'map'`: control the map generation;\n * `'objects'`: control the type of objects and their\n location;\n * `'quest'`: control the quest generation;\n * `'grammar'`: control the text generation.\n\n For any key missing, a random number gets assigned (sampled\n from :py:data:`textworld.g_rng <textworld.utils.g_rng>`).\n kb (KnowledgeBase):\n The knowledge base containing the logic and the text grammars (see\n :py:class:`textworld.generator.KnowledgeBase <textworld.generator.data.KnowledgeBase>`\n for more information).\n chaining (ChainingOptions):\n For customizing the quest generation (see\n :py:class:`textworld.generator.ChainingOptions <textworld.generator.chaining.ChainingOptions>`\n for the list of available options).\n grammar (GrammarOptions):\n For customizing the text generation (see\n :py:class:`textworld.generator.GrammarOptions <textworld.generator.text_grammar.GrammarOptions>`\n for the list of available options).\n \"\"\"\n\n def __init__(self):\n self.chaining = ChainingOptions()\n self.grammar = GrammarOptions()\n self._kb = None\n self._seeds = None\n\n self.nb_parallel_quests = 1\n self.nb_rooms = 1\n self.nb_objects = 1\n self.force_recompile = False\n self.file_ext = \".ulx\"\n self.path = \"./tw_games/\"\n\n @property\n def quest_length(self) -> int:\n assert self.chaining.min_length == self.chaining.max_length\n return self.chaining.min_length\n\n @quest_length.setter\n def quest_length(self, value: int) -> None:\n self.chaining.min_length = value\n self.chaining.max_length = value\n self.chaining.max_depth = value\n\n @property\n def quest_breadth(self) -> int:\n assert self.chaining.min_breadth == self.chaining.max_breadth\n return self.chaining.min_breadth\n\n @quest_breadth.setter\n def quest_breadth(self, value: int) -> None:\n self.chaining.min_breadth = value\n self.chaining.max_breadth = value\n\n @property\n def seeds(self):\n if self._seeds is None:\n self.seeds = {} # Generate seeds from g_rng.\n\n return self._seeds\n\n @seeds.setter\n def seeds(self, value: Union[int, Mapping[str, int]]) -> None:\n keys = ['map', 'objects', 'quest', 'grammar']\n\n def _key_missing(seeds):\n return not set(seeds.keys()).issuperset(keys)\n\n seeds = value\n if type(value) is int:\n rng = RandomState(value)\n seeds = {}\n elif _key_missing(value):\n rng = g_rng.next()\n\n # Check if we need to generate missing seeds.\n self._seeds = {}\n for key in keys:\n if key in seeds:\n self._seeds[key] = seeds[key]\n else:\n self._seeds[key] = rng.randint(65635)\n\n @property\n def rngs(self) -> Dict[str, RandomState]:\n rngs = {}\n for key, seed in self.seeds.items():\n rngs[key] = RandomState(seed)\n\n return rngs\n\n @property\n def kb(self) -> KnowledgeBase:\n if self._kb is None:\n self.kb = KnowledgeBase.load()\n\n return self._kb\n\n @kb.setter\n def kb(self, value: KnowledgeBase) -> None:\n self._kb = value\n self.chaining.kb = self._kb\n\n def copy(self) -> \"GameOptions\":\n return copy.copy(self)\n\n @property\n def uuid(self) -> str:\n # TODO: generate uuid from chaining options?\n uuid = \"tw-{specs}-{grammar}-{seeds}\"\n uuid = uuid.format(specs=encode_seeds((self.nb_rooms, self.nb_objects, self.nb_parallel_quests,\n self.chaining.min_length, self.chaining.max_length,\n self.chaining.min_depth, self.chaining.max_depth,\n self.chaining.min_breadth, self.chaining.max_breadth)),\n grammar=self.grammar.uuid,\n seeds=encode_seeds([self.seeds[k] for k in sorted(self._seeds)]))\n return uuid\n\n def __str__(self) -> str:\n infos = [\"-= Game options =-\"]\n slots = [\"nb_rooms\", \"nb_objects\", \"nb_parallel_quests\", \"path\", \"force_recompile\", \"file_ext\", \"seeds\"]\n for slot in slots:\n infos.append(\"{}: {}\".format(slot, getattr(self, slot)))\n\n text = \"\\n \".join(infos)\n text += \"\\n chaining options:\\n\"\n text += textwrap.indent(str(self.chaining), \" \")\n\n text += \"\\n grammar options:\\n\"\n text += textwrap.indent(str(self.grammar), \" \")\n\n text += \"\\n KB:\\n\"\n text += textwrap.indent(str(self.kb), \" \")\n return text\n"
] | [
[
"numpy.random.RandomState"
]
] |
noshita/image-labelling-tool | [
"59c8eb8dae1ab24b35192fb858ed21596d433fcd"
] | [
"flask_app.py"
] | [
"# The MIT License (MIT)\n#\n# Copyright (c) 2015 University of East Anglia, Norwich, UK\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n#\n# Developed by Geoffrey French in collaboration with Dr. M. Fisher and\n# Dr. M. Mackiewicz.\nimport argparse\nimport json\nfrom matplotlib import pyplot as plt\nimport numpy as np\nimport os\nimport yaml\nimport cv2\n\nfrom flask import Flask, render_template, request, make_response, send_from_directory\n\nfrom image_labelling_tool import labelling_tool\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Image labelling tool - Flask app')\n parser.add_argument('--slic', action='store_true', help='Use SLIC segmentation to generate initial labels')\n parser.add_argument('--readonly', action='store_true', help='Don\\'t persist changes to disk')\n parser.add_argument('--image_dir')\n parser.add_argument('--label_names')\n parser.add_argument('--file_ext', type=str, default='png')\n parser.add_argument('--port', type=int, default=5000)\n args = parser.parse_args()\n\n file_ext = '.{}'.format(args.file_ext)\n port = args.port\n\n # `LabelClass` parameters are: symbolic name, human readable name for UI, and RGB colour as list\n with open(args.label_names, 'r') as f:\n label_names = yaml.load(f)\n\n cmap = plt.get_cmap('Spectral')\n colors = [(np.array(cmap(i)[:3]) * 255).astype(np.int32).tolist()\n for i in range(1, len(label_names) + 1)]\n label_classes = [labelling_tool.LabelClass(name, name, color)\n for color, name in zip(colors, label_names)]\n\n img_dir = args.image_dir\n if args.slic:\n import glob\n from skimage.segmentation import slic\n\n for path in glob.glob(os.path.join(img_dir, '*{}'.format(file_ext))):\n name = os.path.splitext(path)[0]\n out_name = name + '__labels.json'\n if os.path.exists(out_name):\n print('Label already exits at {}'.format(out_name))\n # raise ValueError\n continue\n\n print('Segmenting {0}'.format(path))\n img = plt.imread(path)\n # slic_labels = slic(img, 1000, compactness=20.0)\n # slic_labels = slic(img, 1000, slic_zero=True) + 1\n # slic_labels = slic(img, 1500, slic_zero=True) + 1\n lsc = cv2.ximgproc.createSuperpixelLSC(img, region_size = 8, ratio=0.08)\n lsc.iterate(20)\n lsc.enforceLabelConnectivity(10)\n slic_labels = lsc.getLabels()\n\n print('Converting SLIC labels to vector labels...')\n labels = labelling_tool.ImageLabels.from_label_image(slic_labels)\n\n with open(out_name, 'w') as f:\n json.dump(labels.labels_json, f)\n\n readonly = args.readonly\n # Load in .JPG images from the 'images' directory.\n labelled_images = labelling_tool.PersistentLabelledImage.for_directory(\n img_dir, image_filename_pattern='*{}'.format(file_ext),\n readonly=readonly)\n print('Loaded {0} images'.format(len(labelled_images)))\n\n # Generate image IDs list\n image_ids = [str(i) for i in range(len(labelled_images))]\n # Generate images table mapping image ID to image so we can get an image by ID\n images_table = {image_id: img for image_id, img in zip(image_ids, labelled_images)}\n # Generate image descriptors list to hand over to the labelling tool\n # Each descriptor provides the image ID, the URL and the size\n image_descriptors = []\n for image_id, img in zip(image_ids, labelled_images):\n data, mimetype, width, height = img.data_and_mime_type_and_size()\n image_descriptors.append(labelling_tool.image_descriptor(\n image_id=image_id, url='/image/{}'.format(image_id),\n width=width, height=height\n ))\n\n app = Flask(__name__, static_folder='image_labelling_tool/static')\n config = {\n 'tools': {\n 'imageSelector': True,\n 'labelClassSelector': True,\n 'drawPolyLabel': True,\n 'compositeLabel': True,\n 'deleteLabel': True,\n }\n }\n\n\n @app.route('/')\n def index():\n label_classes_json = [{'name': cls.name, 'human_name': cls.human_name, 'colour': cls.colour} for cls in label_classes]\n return render_template('labeller_page.jinja2',\n tool_js_urls=labelling_tool.js_file_urls('/static/labelling_tool/'),\n label_classes=json.dumps(label_classes_json),\n image_descriptors=json.dumps(image_descriptors),\n initial_image_index=0,\n config=json.dumps(config))\n\n\n @app.route('/labelling/get_labels/<image_id>')\n def get_labels(image_id):\n image = images_table[image_id]\n\n labels = image.labels_json\n complete = False\n\n\n label_header = {\n 'labels': labels,\n 'image_id': image_id,\n 'complete': complete\n }\n\n r = make_response(json.dumps(label_header))\n r.mimetype = 'application/json'\n return r\n\n\n @app.route('/labelling/set_labels', methods=['POST'])\n def set_labels():\n label_header = json.loads(request.form['labels'])\n image_id = label_header['image_id']\n complete = label_header['complete']\n labels = label_header['labels']\n\n image = images_table[image_id]\n image.labels_json = labels\n\n return make_response('')\n\n\n @app.route('/image/<image_id>')\n def get_image(image_id):\n image = images_table[image_id]\n data, mimetype, width, height = image.data_and_mime_type_and_size()\n r = make_response(data)\n r.mimetype = mimetype\n return r\n\n\n\n @app.route('/ext_static/<path:filename>')\n def base_static(filename):\n return send_from_directory(app.root_path + '/ext_static/', filename)\n\n # app.run(debug=True)\n app.run(debug=False, host='0.0.0.0', port=port)\n"
] | [
[
"matplotlib.pyplot.imread",
"matplotlib.pyplot.get_cmap"
]
] |
BarakeelFanseu/GRAC_CIKM | [
"3cbdbbb6c4902653f633c6d8f1c80f370b2938cf"
] | [
"others_clean.py"
] | [
"import scipy.io as sio\nimport time\n# import tensorflow as tf\nimport numpy as np\nimport scipy.sparse as sp\nfrom sklearn.cluster import KMeans, SpectralClustering\nfrom metrics import clustering_metrics\nfrom sklearn.metrics.pairwise import euclidean_distances\nfrom sklearn.feature_extraction.text import TfidfTransformer\nfrom sklearn.preprocessing import normalize\nfrom data import data\nimport argparse, os,sys,inspect\nfrom Laplacian_HGCN import Laplacian\nfrom sklearn.preprocessing import normalize\nimport random\n\n\np = argparse.ArgumentParser(description='Choose Parameter for Filter interpolation')\np.add_argument('--data', type=str, default='coauthorship', help='data name (coauthorship/cocitation)')\np.add_argument('--dataset', type=str, default='cora', help='dataset name (e.g.: cora/dblp/acm for coauthorship, cora/citeseer/pubmed for cocitation)')\np.add_argument('--num_runs', type=int, default=10, help='number of times to run experiment')\np.add_argument('--gpu', type=int, default=None, help='gpu number to use')\np.add_argument('--cuda', type=bool, default=False, help='cuda for gpu')\np.add_argument('--seeds', type=int, default=0, help='seed for randomness')\np.add_argument('--others', type=str, default='Kmeans', help='Kmeans, cliqueNcut, HyperNcut, HyperA')\n# p.add_argument('--alpha', type=float, default=0.5, help='balance parameter')\n# p.add_argument('-f') # for jupyter default\n\nargs = p.parse_args()\n\ndef preprocess_adj(H, variable_weight=False):\n \"\"\"\n calculate G from hypgraph incidence matrix H\n :param H: hypergraph incidence matrix H\n :param variable_weight: whether the weight of hyperedge is variable\n :return: G\n \"\"\"\n H = np.array(H)\n n_edge = H.shape[1]\n \n # the weight of the hyperedge\n W = np.ones(n_edge)\n \n # the degree of the node\n DV = np.sum(H * W, axis=1)\n \n # the degree of the hyperedge\n DE = np.sum(H, axis=0)\n\n invDE = np.mat(np.diag(np.power(DE, -1)))\n DV2 = np.mat(np.diag(np.power(DV, -0.5)))\n DV2[np.isinf(DV2)] = 0.\n W = np.mat(np.diag(W))\n H = np.mat(H)\n HT = H.T\n\n if variable_weight:\n DV2_H = DV2 * H\n invDE_HT_DV2 = invDE * HT * DV2\n return DV2_H, W, invDE_HT_DV2\n else:\n \n G = DV2.dot(H.dot(W.dot(invDE.dot(HT.dot(DV2))))) \n I = sp.eye(G.shape[0]).toarray()\n L = I - G\n \n return L\n\ndef Hyp_adj(H):\n \"\"\"\n calculate G from hypgraph incidence matrix H\n :param H: hypergraph incidence matrix H\n :param variable_weight: whether the weight of hyperedge is variable\n :return: G\n \"\"\"\n H = np.array(H)\n n_edge = H.shape[1]\n\n # the weight of the hyperedge\n W = np.ones(n_edge)\n\n # the degree of the node\n DV = np.sum(H * W, axis=1)\n DV = np.mat(np.diag(DV))\n\n W = np.mat(np.diag(W))\n H = np.mat(H)\n HT = H.T\n\n adj = H.dot(W.dot(HT))\n adj = adj - DV\n \n return adj \n\ndef clique_adj(H):\n \"\"\"\n calculate G from hypgraph incidence matrix H\n :param H: hypergraph incidence matrix H\n :param variable_weight: whether the weight of hyperedge is variable\n :return: G\n \"\"\"\n H = np.array(H)\n n_edge = H.shape[1]\n \n # the weight of the hyperedge\n W = np.ones(n_edge)\n \n W = np.mat(np.diag(W))\n H = np.mat(H)\n HT = H.T\n return H.dot(W.dot(HT))\n\ndef to_onehot(prelabel):\n k = len(np.unique(prelabel))\n label = np.zeros([prelabel.shape[0], k])\n label[range(prelabel.shape[0]), prelabel] = 1\n label = label.T\n return label\n\ndef square_dist(prelabel, feature):\n if sp.issparse(feature):\n feature = feature.todense()\n feature = np.array(feature)\n\n onehot = to_onehot(prelabel)\n\n m, n = onehot.shape\n count = onehot.sum(1).reshape(m, 1)\n count[count == 0] = 1\n\n mean = onehot.dot(feature) / count\n a2 = (onehot.dot(feature * feature) / count).sum(1)\n pdist2 = np.array(a2 + a2.T - 2 * mean.dot(mean.T))\n\n intra_dist = pdist2.trace()\n inter_dist = pdist2.sum() - intra_dist\n intra_dist /= m\n inter_dist /= m * (m - 1)\n return intra_dist, inter_dist\n\ndef dist(prelabel, feature):\n k = len(np.unique(prelabel))\n intra_dist = 0\n\n for i in range(k):\n Data_i = feature[np.where(prelabel == i)]\n Dis = euclidean_distances(Data_i, Data_i)\n n_i = Data_i.shape[0]\n if n_i == 0 or n_i == 1:\n intra_dist = intra_dist\n else:\n intra_dist = intra_dist + 1 / k * 1 / (n_i * (n_i - 1)) * sum(sum(Dis))\n\n return intra_dist\n\ndef Normalized_cut(prelabel, Laplacian, degree):\n label = to_onehot(prelabel)\n label = label.T\n k = len(np.unique(prelabel))\n\n for i in range(k):\n vol = degree[np.where(prelabel == i)]\n vol = vol.T[np.where(prelabel == i)]\n vol = vol.sum(1).sum()\n vol = np.sqrt(vol)\n label[np.where(prelabel == i)] = label[np.where(prelabel == i)] / vol\n\n return np.trace(label.T.dot(Laplacian.dot(label))).item()\n\ndef Incidence_mat(num_nodes, Hypergraph):\n print(\"creating incidence matrix\")\n Incidence = np.zeros(shape=(num_nodes, len(Hypergraph)))\n for edgei, (k, v) in enumerate(Hypergraph.items()):\n for i in v:\n Incidence[i][edgei] = 1\n return Incidence\n\n\n# def running(others='Kmeans', rep=10, seed=0, features=None, Incidence=None, labels=None, k=None):\ndef running():\n\n intra_list = []\n inter_list = []\n acc_list = []\n stdacc_list = []\n f1_list = []\n stdf1_list =[]\n nmi_list = []\n stdnmi_list = []\n ncut_list = []\n precision_list = []\n adj_score_list = []\n recall_macro_list = []\n\n intra_list.append(10000000)\n inter_list.append(10000000)\n \n t = time.time()\n \n \n IntraD = np.zeros(rep)\n InterD = np.zeros(rep)\n # Ncut = np.zeros(rep)\n ac = np.zeros(rep)\n nm = np.zeros(rep)\n f1 = np.zeros(rep)\n pre = np.zeros(rep)\n rec = np.zeros(rep)\n adj_s = np.zeros(rep)\n # mod = np.zeros(rep)\n \n \n for i in range(rep):\n np.random.seed(seed)\n random.seed(seed)\n \n if others=='Kmeans':\n print('+++++++++++++++++Kmeans++++++++++++++')\n \n u = features\n kmeans = KMeans(n_clusters=k, init='k-means++', random_state=seed).fit(u)\n predict_labels = kmeans.predict(u)\n \n elif others=='HyperNcut':\n print('+++++++++++++++++HyperNcut++++++++++++++')\n\n print('creating Laplacian for HyperNcut')\n adj_norm = preprocess_adj(Incidence)\n print('Done Creating Laplacian')\n \n u, s, v = sp.linalg.svds(adj_norm, k=k, which='LM')\n kmeans = KMeans(n_clusters=k, init='k-means++', random_state=seed).fit(u)\n predict_labels = kmeans.predict(u)\n\n elif others=='HyperA':\n print('+++++++++++++++++HyperA++++++++++++++')\n \n print('creating adj for HyperA') \n adj_norm = Hyp_adj(Incidence)\n print('Done Creating adj')\n\n spectral = SpectralClustering(n_clusters=k, affinity='precomputed', assign_labels='kmeans', random_state=seed)\n predict_labels = spectral.fit_predict(adj_norm)\n \n elif others=='cliqueNcut':\n print('+++++++++++++++++Hyperclique++++++++++++++')\n \n print('creating adj for Hyper-clique') \n adj_norm = clique_adj(Incidence)\n print('Done Creating adj')\n\n spectral = SpectralClustering(n_clusters=k, affinity='precomputed', assign_labels='kmeans', random_state=seed)\n predict_labels = spectral.fit_predict(adj_norm)\n\n else:\n print('args.others are in [Kmeans, cliqueNcut, HyperNcut, HyperA] else modify lines 340-342 to include other types')\n\n IntraD[i], InterD[i] = square_dist(predict_labels, features)\n #intraD[i] = dist(predict_labels, features)\n cm = clustering_metrics(labels, predict_labels)\n ac[i], nm[i], f1[i], pre[i], adj_s[i], rec[i] = cm.evaluationClusterModelFromLabel()\n # mod[i] = modularity(predict_labels, adj)\n \n intramean = np.mean(IntraD)\n intermean = np.mean(InterD)\n # ncut_mean = np.mean(Ncut)\n acc_means = np.mean(ac)\n acc_stds = np.std(ac)\n nmi_means = np.mean(nm)\n nmi_stds = np.std(nm)\n f1_means = np.mean(f1)\n f1_stds = np.std(f1)\n # mod_means = np.mean(mod)\n pre_mean = np.mean(pre)\n rec_mean = np.mean(rec)\n adj_smean = np.mean(adj_s)\n\n # modularity_list.append(mod_means)\n # ncut_list.append(ncut_mean)\n intra_list.append(intramean)\n inter_list.append(intermean)\n acc_list.append(acc_means)\n stdacc_list.append(acc_stds)\n nmi_list.append(nmi_means)\n stdnmi_list.append(nmi_stds)\n f1_list.append(f1_means)\n stdf1_list.append(f1_stds)\n precision_list.append(pre_mean)\n recall_macro_list.append(rec_mean)\n adj_score_list.append(adj_smean)\n if others=='Kmeans':\n print('=====================FinishedKMEANS================')\n \n elif others=='HyperNcut':\n print('=====================FinishedHYPERNCUT================')\n \n elif others=='HyperA':\n print('=====================FinishedHYPERA================')\n \n elif others=='cliqueNcut':\n print('=====================FinishedHYPERCLIQUE================')\n\n \n print('dataset: {}_{}, ac: {}, f1: {}, nm: {}, intraD: {}, InterD: {}, pre: {}, rec: {}, adj_score: {}'.format(args.dataset, args.data, acc_means, f1_means, nmi_means, intramean, intermean, pre_mean, rec_mean, adj_smean))\n t = time.time() - t\n print('Kmeans time taken: {}'.format(t))\n \n \n\nif __name__ == '__main__':\n '''this is not the file used for the run times in the paper as this file contains too many if statements. \n A file implementing each model separately was used to report the average run times and memory usage. \n But the conclusions even using this file are the same.'''\n\n # Using datasets from HyperGCN: A New Method For Training Graph Convolutional Networks on Hypergraphs NIPS 2019\n # coauthorship: cora, dblp\n # cocitation: citeseer, cora, pubmed\n\n # args = parse()\n dataset = data.load(args.data, args.dataset)\n\n # {'hypergraph': hypergraph, 'features': features, 'labels': labels, 'n': features.shape[0]}\n labels = dataset['labels']\n num_nodes = dataset['n']\n num_hyperedges = dataset['e']\n labels = np.asarray(np.argmax(labels, axis=1))\n # labels = np.squeeze(labels, axis=1)\n k = len(np.unique(labels))\n print('k: {}, labels: {}, labels.shape: {}'.format(k, labels, labels.shape))\n\n\n # elif args.others=='Kmeans': # for storage studies only\n features = dataset['features']\n\n # if args.others in ['cliqueNcut', 'HyperNcut', 'HyperA']: # for storage studies only\n Hypergraph = dataset['hypergraph']\n Incidence = Incidence_mat(num_nodes, Hypergraph)\n\n rep = args.num_runs\n \n others = args.others\n seed = args.seeds\n \n # running(others=others, rep=rep, seed=seed, features=features, Incidence=Incidence, labels=labels, k=k)\n running()\n "
] | [
[
"numpy.ones",
"numpy.sum",
"scipy.sparse.linalg.svds",
"numpy.diag",
"numpy.random.seed",
"sklearn.cluster.KMeans",
"sklearn.metrics.pairwise.euclidean_distances",
"numpy.where",
"numpy.unique",
"numpy.mat",
"numpy.mean",
"sklearn.cluster.SpectralClustering",
"numpy.zeros",
"numpy.argmax",
"scipy.sparse.eye",
"numpy.power",
"numpy.std",
"numpy.array",
"scipy.sparse.issparse",
"numpy.isinf",
"numpy.sqrt"
]
] |
c-hydro/hyde | [
"b0728397522aceebec3e7ff115aff160a10efede"
] | [
"src/common/plot/lib_plot_point.py"
] | [
"\"\"\"\nLibrary Features:\n\nName: lib_plot_point\nAuthor(s): Fabio Delogu ([email protected])\nDate: '20180918'\nVersion: '1.0.0'\n\"\"\"\n\n#######################################################################################\n# Library\nfrom numpy import arange\n\nimport matplotlib.pylab as plt\nfrom mpl_toolkits.basemap import Basemap\n#######################################################################################\n\n# -------------------------------------------------------------------------------------\n# Method to plot scattered points on a map\ndef plotPoint_Scatter(a1dData, a1dGeoX, a1dGeoY, a1dGeoBox,\n dDataMin=0, dDataMax=10, sColorMap='RdBu_r', sCBarLabel='NA', sMapRes='l'):\n # Define geobox\n dGeoYMin = a1dGeoBox[0]\n dGeoXMin = a1dGeoBox[1]\n dGeoYMax = a1dGeoBox[2]\n dGeoXMax = a1dGeoBox[3]\n\n oFig = plt.figure(figsize=(18, 18))\n oBaseMap = Basemap(projection='cea', resolution=sMapRes,\n llcrnrlat=dGeoYMin, llcrnrlon=dGeoXMin,\n urcrnrlat=dGeoYMax, urcrnrlon=dGeoXMax\n )\n\n oBaseMap.drawlsmask(land_color=\"#ddaa66\", ocean_color=\"#7777ff\", resolution='i')\n\n oBaseMap.drawcoastlines(color='lightgray', linewidth=1.25)\n oBaseMap.fillcontinents()\n oBaseMap.drawmapboundary(fill_color='aqua')\n\n oBaseMap.scatter(a1dGeoX, a1dGeoY, s=a1dData*10, c=a1dData, cmap=sColorMap, zorder=10, latlon=True)\n\n oBaseMap.drawparallels(arange(dGeoYMin, dGeoYMax, 0.4), labels=[True, False, False, False])\n oBaseMap.drawmeridians(arange(dGeoXMin, dGeoXMax, 0.4), labels=[False, False, False, True])\n\n oBaseMap.colorbar(location='bottom', format='%d', label=sCBarLabel)\n\n plt.clim(dDataMin, dDataMax)\n plt.show()\n\n# -------------------------------------------------------------------------------------\n"
] | [
[
"numpy.arange",
"matplotlib.pylab.figure",
"matplotlib.pylab.show",
"matplotlib.pylab.clim"
]
] |
Andy51/openvino | [
"61ccde700f1d8a587291547d96b1eaa00955b89c"
] | [
"model-optimizer/mo/front/tf/loader.py"
] | [
"\"\"\"\n Copyright (C) 2018-2020 Intel Corporation\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\"\"\"\n\nimport logging as log\nimport os\nimport re\n\nfrom distutils.version import LooseVersion\nfrom mo.utils.error import Error, FrameworkError\nfrom mo.utils.utils import refer_to_faq_msg\nfrom mo.utils.versions_checker import get_environment_setup\n\ntry:\n import tensorflow.compat.v1 as tf_v1\n # disable eager execution of TensorFlow 2 environment immediately\n tf_v1.disable_eager_execution()\n import tensorflow as tf\n from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2\nexcept ImportError:\n import tensorflow as tf_v1\n\nfrom google.protobuf import text_format\nfrom mo.graph.graph import fill_graph_with_nodes, Graph\nfrom mo.utils.summarize_graph import summarize_graph\n\n\ndef freeze_checkpoints(graph_def: tf_v1.GraphDef, checkpoint_dir: str, output_node_names: list):\n \"\"\"\n Loads all the variables in a graph and stores them in a separate dictionary. Freezes output nodes in the graph\n :param graph_def: GraphDef object holding the network.\n :param checkpoint_dir: path to directory with checkpoint files with values of graph variables.\n :param output_node_names: list of output node names.\n :return: GraphDef containing a simplified version of the original.\n \"\"\"\n log.debug(\"Loading checkpoint files from directory: {}\".format(checkpoint_dir))\n checkpoint_files = []\n for checkpoint_name in sorted(os.listdir(checkpoint_dir)):\n checkpoint_path = os.path.join(checkpoint_dir, checkpoint_name)\n if os.path.isfile(checkpoint_path):\n checkpoint_files.append(checkpoint_path)\n log.debug(\"File {} will be loaded\".format(checkpoint_path))\n else:\n log.debug(\"Path {} is not a file. Skipping\")\n\n if len(checkpoint_files) == 0:\n raise Error(\"There are no checkpoint files in directory: {}\".format(checkpoint_dir))\n\n tf_v1.import_graph_def(graph_def, name='')\n\n with tf_v1.Session() as sess:\n uninitialized_variables = [str(v, 'utf-8') for v in set(sess.run(tf_v1.report_uninitialized_variables()))]\n all_variables = [n.name for n in sess.graph.as_graph_def().node if n.op in ['Variable', 'VariableV2']]\n white_list = [v for v in all_variables if v not in uninitialized_variables]\n black_list = [v for v in all_variables if v in uninitialized_variables]\n output_graph_def = tf_v1.graph_util.convert_variables_to_constants(sess, graph_def, output_node_names,\n variable_names_whitelist=white_list,\n variable_names_blacklist=black_list)\n variable_values = {}\n for checkpoint_file in checkpoint_files:\n log.debug(\"Loading {}\".format(checkpoint_file))\n with tf_v1.Session() as sess:\n var_list = {}\n var_to_shape_map = tf_v1.train.load_checkpoint(checkpoint_file).get_variable_to_shape_map()\n for key in var_to_shape_map:\n try:\n tensor = sess.graph.get_operation_by_name(key).outputs[0]\n except KeyError:\n continue\n var_list[key] = tensor\n tf_v1.train.Saver(var_list=var_list).restore(sess, checkpoint_file)\n for name, tensor in var_list.items():\n variable_values[name] = sess.run(tensor)\n return output_graph_def, variable_values\n\n\ndef freeze_checkpoint(graph_def, checkpoint, output_node_names):\n \"\"\"\n Replaces all the variables in a graph with constants of the same values.\n :param graph_def: GraphDef object holding the network.\n :param checkpoint: path to checkpoint file with values of variables.\n :param output_node_names: list of output node names\n :return: GraphDef containing a simplified version of the original.\n \"\"\"\n tf_v1.import_graph_def(graph_def, name=\"\")\n\n with tf_v1.Session() as sess:\n var_list = {}\n var_to_shape_map = tf_v1.train.NewCheckpointReader(checkpoint).get_variable_to_shape_map()\n for key in var_to_shape_map:\n try:\n tensor = sess.graph.get_operation_by_name(key).outputs[0]\n except KeyError:\n continue\n var_list[key] = tensor\n tf_v1.train.Saver(var_list=var_list).restore(sess, checkpoint)\n output_graph_def = tf_v1.graph_util.convert_variables_to_constants(sess, graph_def, output_node_names)\n return output_graph_def\n\n\ndef read_file_to_graph_def(graph_def: [tf_v1.GraphDef, tf_v1.MetaGraphDef], graph_file_name: str = \"\",\n is_binary: bool = True):\n \"\"\"\n Reads file to protobuf\n :param graph_def: GraphDef orr MetaGraphDef object to store the network\n :param graph_file_name: path to file with graph\n :param is_binary: flag to switch between binary and test protobuf format of graph file\n :return: GraphDef or MetaGaphDef containing the network with cleared device info.\n \"\"\"\n try:\n if is_binary:\n with open(graph_file_name, \"rb\") as f:\n graph_def.ParseFromString(f.read())\n else:\n with open(graph_file_name, \"r\") as f:\n text_format.Merge(f.read(), graph_def)\n nodes_to_clear_device = graph_def.node if isinstance(graph_def, tf_v1.GraphDef) else graph_def.graph_def.node\n for node in nodes_to_clear_device:\n node.device = \"\"\n except Exception as e:\n raise FrameworkError(\n 'TensorFlow cannot read the model file: \"{}\" is incorrect TensorFlow model file. '\n '\\nThe file should contain one of the following TensorFlow graphs:'\n '\\n1. frozen graph in text or binary format'\n '\\n2. inference graph for freezing with checkpoint (--input_checkpoint) in text or binary format'\n '\\n3. meta graph'\n '\\n\\nMake sure that --input_model_is_text is provided for a model in text format. '\n 'By default, a model is interpreted in binary format. Framework error details: {}. ' +\n refer_to_faq_msg(43),\n graph_file_name,\n str(e)\n ) from e\n return graph_def\n\n\ndef get_output_node_names_list(graph_def, user_defined_output_node_names_list: list):\n return summarize_graph(graph_def)['outputs'] \\\n if user_defined_output_node_names_list is None or len(user_defined_output_node_names_list) == 0 \\\n else user_defined_output_node_names_list\n\n\ndef deducing_metagraph_path(meta_graph_file: str):\n match = re.search('^(.*)\\.(data-\\d*-of-\\d*|index|meta)$', meta_graph_file)\n if match is not None:\n deduced_meta_graph_file = match.group(1) + '.meta'\n if not os.path.isfile(deduced_meta_graph_file):\n raise Error('\\n\\nMetaGraph freezing mechanism was enabled. '\n '\\n{} file does not represent MetaGraph. '\n '\\n{} path to MetaGraph was deduced, but it does not exist'\n '\\n\\nModel with MetaGraph consists of 3-4 files:'\n '\\n1. model_name.meta'\n '\\n2. model_name.index'\n '\\n3. model_name.data-00000-of-00001 (digit part may vary)'\n '\\n4. checkpoint (optional)'.format(meta_graph_file, deduced_meta_graph_file))\n else:\n meta_graph_file = deduced_meta_graph_file\n else:\n raise Error('\\n\\nMetaGraph freezing mechanism was enabled. '\n '\\n{} file does not represent MetaGraph. '\n '\\n\\nModel with MetaGraph consists of 3-4 files:'\n '\\n1. model_name.meta'\n '\\n2. model_name.index'\n '\\n3. model_name.data-00000-of-00001 (digit part may vary)'\n '\\n4. checkpoint (optional)'\n '\\n\\nTo load this model, simply run:'\n '\\npython3 mo_tf.py --input_meta_graph model_name.meta'\n ''.format(meta_graph_file))\n return meta_graph_file\n\n\ndef load_tf_graph_def(graph_file_name: str = \"\", is_binary: bool = True, checkpoint: str = \"\",\n model_dir: str = \"\", saved_model_tags: list = [], meta_graph_file: str = \"\",\n user_output_node_names_list: list = []):\n # As a provisional solution, use a native TF methods to load a model protobuf\n graph_def = tf_v1.GraphDef()\n if isinstance(graph_file_name, str) and (re.match('.*\\.(ckpt|meta)$', graph_file_name)):\n print('[ WARNING ] The value for the --input_model command line parameter ends with \".ckpt\" or \".meta\" '\n 'extension.\\n'\n 'It means that the model is not frozen.\\n'\n 'To load non frozen model to Model Optimizer run:'\n '\\n\\n1. For \"*.ckpt\" file:'\n '\\n- if inference graph is in binary format'\n '\\npython3 mo_tf.py --input_model \"path/to/inference_graph.pb\" --input_checkpoint \"path/to/*.ckpt\"'\n '\\n- if inference graph is in text format'\n '\\npython3 mo_tf.py --input_model \"path/to/inference_graph.pbtxt\" --input_model_is_text '\n '--input_checkpoint \"path/to/*.ckpt\"'\n '\\n\\n2. For \"*.meta\" file:'\n '\\npython3 mo_tf.py --input_meta_graph \"path/to/*.meta\"')\n variables_values = {}\n try:\n if graph_file_name and not meta_graph_file and not checkpoint:\n # frozen graph\n return read_file_to_graph_def(graph_def, graph_file_name, is_binary), variables_values\n if graph_file_name and not meta_graph_file and checkpoint:\n # inference graph and checkpoint\n graph_def = read_file_to_graph_def(graph_def, graph_file_name, is_binary)\n outputs = get_output_node_names_list(graph_def, user_output_node_names_list)\n if os.path.isfile(checkpoint):\n graph_def = freeze_checkpoint(graph_def=graph_def, checkpoint=checkpoint, output_node_names=outputs)\n elif os.path.isdir(checkpoint):\n graph_def, variables_values = freeze_checkpoints(graph_def=graph_def, checkpoint_dir=checkpoint,\n output_node_names=outputs)\n # we are sure that checkpoint is existing file or directory due to cli_parser configuration\n return graph_def, variables_values\n if not graph_file_name and meta_graph_file:\n meta_graph_file = deducing_metagraph_path(meta_graph_file)\n input_meta_graph_def = read_file_to_graph_def(tf_v1.MetaGraphDef(), meta_graph_file, is_binary)\n # pylint: disable=no-member\n with tf_v1.Session() as sess:\n restorer = tf_v1.train.import_meta_graph(input_meta_graph_def)\n restorer.restore(sess, re.sub('\\.meta$', '', meta_graph_file))\n outputs = get_output_node_names_list(input_meta_graph_def.graph_def, user_output_node_names_list)\n graph_def = tf_v1.graph_util.convert_variables_to_constants(sess, input_meta_graph_def.graph_def,\n outputs)\n return graph_def, variables_values\n if model_dir:\n # saved model directory\n try:\n env_setup = get_environment_setup()\n # enable eager execution temporarily while TensorFlow 2 model is being loaded\n tf_v1.enable_eager_execution()\n # code to extract GraphDef for TF 2.0 SavedModel format\n # tf.saved_model.load function throws TypeError for TF 1.x SavedModel format in case TF 1.x installed\n imported = tf.saved_model.load(model_dir, saved_model_tags) # pylint: disable=E1120\n # to get a signature by key throws KeyError for TF 1.x SavedModel format in case TF 2.x installed\n concrete_func = imported.signatures[tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY]\n # the aggressive inlining parameter needs to freeze a table of embeddings for Keras Embedding operation\n # and a model with Embedding operation cannot properly converted to IR without this function parameter\n if \"tensorflow\" in env_setup and env_setup[\"tensorflow\"] >= LooseVersion(\"2.2.0\"):\n frozen_func = convert_variables_to_constants_v2(concrete_func,\n lower_control_flow=False,\n aggressive_inlining=True) # pylint: disable=E1123\n else:\n frozen_func = convert_variables_to_constants_v2(concrete_func,\n lower_control_flow=False) # pylint: disable=E1123\n graph_def = frozen_func.graph.as_graph_def(add_shapes=True)\n # disable eager execution since next steps are executed with a graph in non-eager mode\n tf_v1.disable_eager_execution()\n return graph_def, variables_values\n except (TypeError, KeyError):\n # disable eager execution since TensorFlow 1 model is handled\n tf_v1.disable_eager_execution()\n # code to extract GraphDef for TF 1.0 SavedModel format\n tags = saved_model_tags if saved_model_tags is not None else [tf_v1.saved_model.tag_constants.SERVING]\n with tf_v1.Session() as sess:\n meta_graph_def = tf_v1.saved_model.loader.load(sess, tags, model_dir)\n outputs = get_output_node_names_list(meta_graph_def.graph_def, user_output_node_names_list)\n graph_def = tf_v1.graph_util.convert_variables_to_constants(sess, meta_graph_def.graph_def, outputs)\n return graph_def, variables_values\n except Exception as e:\n raise FrameworkError('SavedModel format load failure: {}', e) from e\n except Exception as e:\n raise FrameworkError('Cannot load input model: {}', e) from e\n raise Error(\"Unknown configuration of input model parameters\")\n\n\ndef protobuf_attrs(pb:tf_v1.NodeDef):\n return {'pb': pb}\n\n\ndef protobuf2nx(graph, pb: tf_v1.GraphDef):\n fill_graph_with_nodes(graph, pb.node, get_id=lambda pb: pb.name, get_attrs=protobuf_attrs)\n # initial order of nodes in the GraphDef. It is used to specify order in\n # which merged nodes are added to the generated sub-graph GraphDef for the TensorFlow offload feature.\n graph.graph['initial_nodes_order'] = [node.name for node in pb.node]\n\n # Remove data dependency edges. This is needed for the TF offload case\n for _, attrs in list(graph.nodes(data=True)):\n pb = attrs['pb']\n if '_class' in pb.attr:\n index = 0\n while index < len(pb.attr['_class'].list.s):\n if re.match('^loc:@.*', pb.attr['_class'].list.s[index].decode('utf-8')):\n del pb.attr['_class'].list.s[index]\n else:\n index = index + 1\n\n\ndef variables_to_constants(graph: Graph, variables_values: dict):\n \"\"\"\n Converts `Variable<V2>` operations to FakeConst operations with `value` from `variables_values` dictionary\n :param graph: graph to operate on\n :param variables_values: dictionary with variable names as keys and np.array data as values\n \"\"\"\n for node in graph.get_op_nodes(op='FakeConst'):\n node_name = node.name\n\n if node_name not in variables_values:\n log.debug(\"There is no value for '{}': {} in checkpoint variable values\".format(node.op, node_name))\n continue\n\n node['value'] = variables_values[node_name]\n"
] | [
[
"tensorflow.saved_model.load",
"tensorflow.enable_eager_execution",
"tensorflow.train.load_checkpoint",
"tensorflow.disable_eager_execution",
"tensorflow.report_uninitialized_variables",
"tensorflow.train.NewCheckpointReader",
"tensorflow.train.import_meta_graph",
"tensorflow.python.framework.convert_to_constants.convert_variables_to_constants_v2",
"tensorflow.import_graph_def",
"tensorflow.Session",
"tensorflow.train.Saver",
"tensorflow.graph_util.convert_variables_to_constants",
"tensorflow.saved_model.loader.load",
"tensorflow.MetaGraphDef",
"tensorflow.GraphDef"
]
] |
flyflyinit/GUI-admin-tool | [
"1fa97393ee2a39a65f5b7bbe95eb5b5f04bc6ad6"
] | [
"project/networking/networkingplots.py"
] | [
"try:\n import psutil\nexcept ImportError as e:\n print(f'package not found\\n{e}\\n')\n\ntry:\n from PyQt5 import QtCore, QtWidgets\nexcept ImportError as e:\n print(\n f'package PyQt5 Not Found\\n{e}\\ntry :\\npip3 install --user pyqt5\\nOR\\ndnf install python3-pyqt5, yum install python3-pyqt5\\n')\n\ntry:\n from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas\n from matplotlib.figure import Figure\n import matplotlib.pyplot as plt\nexcept ImportError as e:\n print(f'package matplotlib Not Found\\n{e}\\ntry :\\npip3 install --user matplotlib\\n')\n\n\nclass MyMplCanvas(FigureCanvas):\n def __init__(self, parent=None, interface='a', width=5, height=5, dpi=50):\n fig = Figure(figsize=(width, height), dpi=dpi)\n plt.style.use('Solarize_Light2')\n self.Axes = fig.add_subplot()\n self.compute_initial_figure()\n\n FigureCanvas.__init__(self, fig)\n self.setParent(parent)\n FigureCanvas.setSizePolicy(self, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)\n FigureCanvas.updateGeometry(self)\n\n def compute_initial_figure(self):\n pass\n\n\nclass NetSentCanvas(MyMplCanvas):\n def __init__(self, *args, **kwargs):\n for key, value in kwargs.items():\n if key == 'interface':\n self.interface = value\n\n MyMplCanvas.__init__(self, *args, **kwargs)\n self.timer = QtCore.QTimer(self)\n self.timer.timeout.connect(self.netsent_update_figure)\n self.timer.start(1000)\n\n def compute_initial_figure(self):\n global netsent\n global netsenttimee\n global netsentcurrenttime\n global Axes\n global netsentval\n\n netsent = []\n netsenttimee = []\n netsentcurrenttime = 0\n\n if self.interface == \"All\":\n netsentval = psutil.net_io_counters(pernic=False, nowrap=False)[0]\n else:\n netsentval = psutil.net_io_counters(pernic=True, nowrap=False)[self.interface][0]\n\n self.Axes.plot(netsenttimee, netsent, label='sent')\n self.Axes.set_xlabel(\"Seconds\")\n self.Axes.set_ylabel(\"Bytes\")\n self.Axes.set_title(\"Network Sent\")\n self.Axes.set_xlim(0, 100)\n self.Axes.ticklabel_format(style='sci', axis='y', scilimits=(0, 0), useMathText=True)\n self.Axes.grid(True)\n self.Axes.get_xaxis().set_visible(False)\n self.Axes.legend(loc='upper left')\n\n def netsent_update_figure(self):\n global netsent\n global netsenttimee\n global netsentcurrenttime\n global Axes\n global netsentval\n\n netsentvalpre = netsentval\n if self.interface == \"All\":\n netsentval = psutil.net_io_counters(pernic=False, nowrap=False)[0]\n else:\n netsentval = psutil.net_io_counters(pernic=True, nowrap=False)[self.interface][0]\n\n netsent.append(netsentval - netsentvalpre)\n\n netsentcurrenttime = netsentcurrenttime + 1\n netsenttimee.append(str(netsentcurrenttime))\n\n if len(netsenttimee) == 100:\n netsent.pop(0)\n netsenttimee.pop(0)\n\n self.Axes.cla()\n self.Axes.plot(netsenttimee, netsent, label='sent')\n self.Axes.set_xlabel(\"Seconds\")\n self.Axes.set_ylabel(\"Bytes\")\n self.Axes.set_title(\"Network Sent\")\n self.Axes.set_xlim(0, 100)\n self.Axes.ticklabel_format(style='sci', axis='y', scilimits=(0, 0), useMathText=True)\n self.Axes.grid(True)\n self.Axes.get_xaxis().set_visible(False)\n self.Axes.legend(loc='upper left')\n self.draw()\n\n\nclass NetRecCanvas(MyMplCanvas):\n def __init__(self, *args, **kwargs):\n for key, value in kwargs.items():\n if key == 'interface':\n self.interface = value\n MyMplCanvas.__init__(self, *args, **kwargs)\n self.timer = QtCore.QTimer(self)\n self.timer.timeout.connect(self.netrec_update_figure)\n self.timer.start(1000)\n\n def compute_initial_figure(self):\n global netrec\n global netrectimee\n global netreccurrenttime\n global Axes\n global netrecval\n\n netrec = []\n netrectimee = []\n netreccurrenttime = 0\n\n if self.interface == \"All\":\n netrecval = psutil.net_io_counters(pernic=False, nowrap=False)[1]\n else:\n netrecval = psutil.net_io_counters(pernic=True, nowrap=False)[self.interface][1]\n\n self.Axes.plot(netrectimee, netrec, label='recieved')\n self.Axes.set_xlabel(\"Seconds\")\n self.Axes.set_ylabel(\"Bytes\")\n self.Axes.set_title(\"Network Recieved\")\n self.Axes.set_xlim(0, 100)\n self.Axes.ticklabel_format(style='sci', axis='y', scilimits=(0, 0), useMathText=True)\n self.Axes.grid(True)\n self.Axes.get_xaxis().set_visible(False)\n self.Axes.legend(loc='upper left')\n\n def netrec_update_figure(self):\n global netrec\n global netrectimee\n global netreccurrenttime\n global Axes\n global netrecval\n\n netrecvalpre = netrecval\n if self.interface == \"All\":\n netrecval = psutil.net_io_counters(pernic=False, nowrap=False)[1]\n else:\n netrecval = psutil.net_io_counters(pernic=True, nowrap=False)[self.interface][1]\n\n netrec.append(netrecval - netrecvalpre)\n netreccurrenttime = netreccurrenttime + 1\n netrectimee.append(str(netreccurrenttime))\n\n if len(netrectimee) == 100:\n netrec.pop(0)\n netrectimee.pop(0)\n\n self.Axes.cla()\n self.Axes.plot(netrectimee, netrec, label='recieved')\n self.Axes.set_xlabel(\"Seconds\")\n self.Axes.set_ylabel(\"Bytes\")\n self.Axes.set_title(\"Network Recieved\")\n self.Axes.set_xlim(0, 100)\n self.Axes.ticklabel_format(style='sci', axis='y', scilimits=(0, 0), useMathText=True)\n self.Axes.grid(True)\n self.Axes.get_xaxis().set_visible(False)\n self.Axes.legend(loc='upper left')\n self.draw()\n"
] | [
[
"matplotlib.pyplot.style.use",
"matplotlib.backends.backend_qt5agg.FigureCanvasQTAgg.updateGeometry",
"matplotlib.figure.Figure",
"matplotlib.backends.backend_qt5agg.FigureCanvasQTAgg.__init__",
"matplotlib.backends.backend_qt5agg.FigureCanvasQTAgg.setSizePolicy"
]
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.